]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Mon, 4 Oct 2010 18:56:38 +0000 (11:56 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 4 Oct 2010 18:56:38 +0000 (11:56 -0700)
Conflicts:
net/ipv4/Kconfig
net/ipv4/tcp_timer.c

13 files changed:
1  2 
drivers/net/3c59x.c
drivers/net/Kconfig
drivers/net/tulip/de2104x.c
net/8021q/vlan_core.c
net/core/iovec.c
net/ipv4/Kconfig
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_timer.c
net/ipv6/route.c
net/phonet/pep.c
net/sctp/socket.c

diff --combined drivers/net/3c59x.c
index ed964964fe1f55539db8a4c9b0fdb571fab93a8d,179871d9e71f9a1f111645f58c68b7fea55b0861..e1da258bbfb7bfd89c24354e7deb626f4da777f9
@@@ -1742,7 -1742,7 +1742,7 @@@ vortex_open(struct net_device *dev
  
        /* Use the now-standard shared IRQ implementation. */
        if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
 -                              &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
 +                              boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) {
                pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
                goto err;
        }
@@@ -2942,6 -2942,9 +2942,9 @@@ static void vortex_get_wol(struct net_d
  {
        struct vortex_private *vp = netdev_priv(dev);
  
+       if (!VORTEX_PCI(vp))
+               return;
        wol->supported = WAKE_MAGIC;
  
        wol->wolopts = 0;
  static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  {
        struct vortex_private *vp = netdev_priv(dev);
+       if (!VORTEX_PCI(vp))
+               return -EOPNOTSUPP;
        if (wol->wolopts & ~WAKE_MAGIC)
                return -EINVAL;
  
@@@ -3201,6 -3208,9 +3208,9 @@@ static void acpi_set_WOL(struct net_dev
                        return;
                }
  
+               if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
+                       return;
                /* Change the power state to D3; RxEnable doesn't take effect. */
                pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
        }
diff --combined drivers/net/Kconfig
index ef683a993dcebefdd0214ed1d6a14e3e12bb31e8,5db667c0b3711f235dfc49c52a4d165b12e6b3fd..13d01f358f341ad60d6a3e2421c42c4b5ab1e0ed
@@@ -2428,7 -2428,7 +2428,7 @@@ config UGETH_TX_ON_DEMAN
  
  config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
-       depends on MV64X60 || PPC32 || PLAT_ORION
+       depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
        select INET_LRO
        select PHYLIB
        help
@@@ -2515,18 -2515,6 +2515,18 @@@ config S6GMA
  
  source "drivers/net/stmmac/Kconfig"
  
 +config PCH_GBE
 +      tristate "PCH Gigabit Ethernet"
 +      depends on PCI
 +      ---help---
 +        This is a gigabit ethernet driver for Topcliff PCH.
 +        Topcliff PCH is the platform controller hub that is used in Intel's
 +        general embedded platform.
 +        Topcliff PCH has Gigabit Ethernet interface.
 +        Using this interface, it is able to access system devices connected
 +        to Gigabit Ethernet.
 +        This driver enables Gigabit Ethernet function.
 +
  endif # NETDEV_1000
  
  #
@@@ -2815,7 -2803,7 +2815,7 @@@ config NI
  
  config PASEMI_MAC
        tristate "PA Semi 1/10Gbit MAC"
-       depends on PPC_PASEMI && PCI
+       depends on PPC_PASEMI && PCI && INET
        select PHYLIB
        select INET_LRO
        help
@@@ -2881,20 -2869,6 +2881,20 @@@ config QLG
          To compile this driver as a module, choose M here: the module
          will be called qlge.
  
 +config BNA
 +        tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
 +        depends on PCI
 +        ---help---
 +          This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
 +          cards.
 +          To compile this driver as a module, choose M here: the module
 +          will be called bna.
 +
 +          For general information and support, go to the Brocade support
 +          website at:
 +
 +          <http://support.brocade.com>
 +
  source "drivers/net/sfc/Kconfig"
  
  source "drivers/net/benet/Kconfig"
@@@ -3228,17 -3202,6 +3228,17 @@@ config PPPO
          which contains instruction on how to use this driver (under 
          the heading "Kernel mode PPPoE").
  
 +config PPTP
 +      tristate "PPP over IPv4 (PPTP) (EXPERIMENTAL)"
 +      depends on EXPERIMENTAL && PPP && NET_IPGRE_DEMUX
 +      help
 +        Support for PPP over IPv4.(Point-to-Point Tunneling Protocol)
 +
 +        This driver requires pppd plugin to work in client mode or
 +        modified pptpd (poptop) to work in server mode.
 +        See http://accel-pptp.sourceforge.net/ for information how to
 +        utilize this module.
 +
  config PPPOATM
        tristate "PPP over ATM"
        depends on ATM && PPP
index 8054009c4b22445230019a0a78df8a62fb9ac50d,6888e3d41462081952c7320b501736f03230ba78..28e1ffb13db99df5b0537a7426b2af3c03980feb
@@@ -364,9 -364,9 +364,9 @@@ static u16 t21040_csr15[] = { 0, 0, 0x0
  
  /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
  static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
- static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+ static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
  /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
- static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+ static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
  static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
  
  
@@@ -948,9 -948,8 +948,9 @@@ static void de_set_media (struct de_pri
        else
                macmode &= ~FullDuplex;
  
 -      if (netif_msg_link(de)) {
 +      if (netif_msg_link(de))
                dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
 +      if (netif_msg_hw(de)) {
                dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
                         dr32(MacMode), dr32(SIAStatus),
                         dr32(CSR13), dr32(CSR14), dr32(CSR15));
@@@ -1597,12 -1596,15 +1597,15 @@@ static int __de_set_settings(struct de_
                return 0; /* nothing to change */
  
        de_link_down(de);
+       mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
        de_stop_rxtx(de);
  
        de->media_type = new_media;
        de->media_lock = media_lock;
        de->media_advertise = ecmd->advertising;
        de_set_media(de);
+       if (netif_running(de->dev))
+               de_start_rxtx(de);
  
        return 0;
  }
diff --combined net/8021q/vlan_core.c
index 0eb486d342dcf1aa91baf64af831517f413ae97a,0eb96f7e44befb0155e364749f38eb37af0c2354..b6d55a9304f2bcb39a3bf9c4ee4f586412532f8e
@@@ -24,10 -24,13 +24,13 @@@ int __vlan_hwaccel_rx(struct sk_buff *s
  
        if (vlan_dev)
                skb->dev = vlan_dev;
-       else if (vlan_id)
-               goto drop;
+       else if (vlan_id) {
+               if (!(skb->dev->flags & IFF_PROMISC))
+                       goto drop;
+               skb->pkt_type = PACKET_OTHERHOST;
+       }
  
 -      return (polling ? netif_receive_skb(skb) : netif_rx(skb));
 +      return polling ? netif_receive_skb(skb) : netif_rx(skb);
  
  drop:
        dev_kfree_skb_any(skb);
  }
  EXPORT_SYMBOL(__vlan_hwaccel_rx);
  
 -int vlan_hwaccel_do_receive(struct sk_buff *skb)
 +void vlan_hwaccel_do_receive(struct sk_buff *skb)
  {
        struct net_device *dev = skb->dev;
        struct vlan_rx_stats     *rx_stats;
  
 -      skb->dev = vlan_dev_info(dev)->real_dev;
 +      skb->dev = vlan_dev_real_dev(dev);
        netif_nit_deliver(skb);
  
        skb->dev = dev;
@@@ -69,6 -72,7 +72,6 @@@
                break;
        }
        u64_stats_update_end(&rx_stats->syncp);
 -      return 0;
  }
  
  struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@@ -101,16 -105,16 +104,19 @@@ vlan_gro_common(struct napi_struct *nap
  
        if (vlan_dev)
                skb->dev = vlan_dev;
-       else if (vlan_id)
-               goto drop;
+       else if (vlan_id) {
+               if (!(skb->dev->flags & IFF_PROMISC))
+                       goto drop;
+               skb->pkt_type = PACKET_OTHERHOST;
+       }
  
        for (p = napi->gro_list; p; p = p->next) {
 -              NAPI_GRO_CB(p)->same_flow =
 -                      p->dev == skb->dev && !compare_ether_header(
 -                              skb_mac_header(p), skb_gro_mac_header(skb));
 +              unsigned long diffs;
 +
 +              diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
 +              diffs |= compare_ether_header(skb_mac_header(p),
 +                                            skb_gro_mac_header(skb));
 +              NAPI_GRO_CB(p)->same_flow = !diffs;
                NAPI_GRO_CB(p)->flush = 0;
        }
  
diff --combined net/core/iovec.c
index f4657c2127b4a9bc998cb0e91bf5405f83d5846e,e6b133b77ccb5615d65bcdac0ecc01b759808c76..72aceb1fe4fae6c072b12e5acc4c97bc4ed73cf7
   *    in any case.
   */
  
int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
  {
-       int size, err, ct;
+       int size, ct;
+       long err;
  
        if (m->msg_namelen) {
                if (mode == VERIFY_READ) {
 -                      err = move_addr_to_kernel(m->msg_name, m->msg_namelen,
 +                      void __user *namep;
 +                      namep = (void __user __force *) m->msg_name;
 +                      err = move_addr_to_kernel(namep, m->msg_namelen,
                                                  address);
                        if (err < 0)
                                return err;
@@@ -54,7 -53,7 +55,7 @@@
        }
  
        size = m->msg_iovlen * sizeof(struct iovec);
 -      if (copy_from_user(iov, m->msg_iov, size))
 +      if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
                return -EFAULT;
  
        m->msg_iov = iov;
diff --combined net/ipv4/Kconfig
index 5462e2d147a690b69e780c5c161427e6a5f745f8,7cd7760144f7dd1998276f4e2976e4a4a1c8135d..e848e6c062cddbc29d8b846227476f778a20e6ab
@@@ -215,15 -215,9 +215,15 @@@ config NET_IPI
          be inserted in and removed from the running kernel whenever you
          want). Most people won't need this and can say N.
  
 +config NET_IPGRE_DEMUX
 +      tristate "IP: GRE demultiplexer"
 +      help
 +       This is helper module to demultiplex GRE packets on GRE version field criteria.
 +       Required by ip_gre and pptp modules.
 +
  config NET_IPGRE
        tristate "IP: GRE tunnels over IP"
-       depends on NET_IPGRE_DEMUX
 -      depends on IPV6 || IPV6=n
++      depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX
        help
          Tunneling means encapsulating data of one protocol type within
          another protocol and sending it over a channel that understands the
@@@ -419,7 -413,7 +419,7 @@@ config INET_XFRM_MODE_BEE
          If unsure, say Y.
  
  config INET_LRO
-       bool "Large Receive Offload (ipv4/tcp)"
+       tristate "Large Receive Offload (ipv4/tcp)"
        default y
        ---help---
          Support for Large Receive Offload (ipv4/tcp).
diff --combined net/ipv4/route.c
index c3cb8bd23638490c9cedd7ca027a2f0e80c4082a,ac6559cb54f9f650986e4dd86996349e328cd92a..04e0df82b88cde2573fe30e380a1c5d0ec50f0da
@@@ -1107,7 -1107,6 +1107,7 @@@ restart
                 * on the route gc list.
                 */
  
 +              rt->dst.flags |= DST_NOCACHE;
                if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
                        int err = arp_bind_neighbour(&rt->dst);
                        if (err) {
                        }
  
                        if (net_ratelimit())
-                               printk(KERN_WARNING "Neighbour table overflow.\n");
+                               printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
                        rt_drop(rt);
                        return -ENOBUFS;
                }
@@@ -1269,11 -1268,18 +1269,11 @@@ skip_hashing
  
  void rt_bind_peer(struct rtable *rt, int create)
  {
 -      static DEFINE_SPINLOCK(rt_peer_lock);
        struct inet_peer *peer;
  
        peer = inet_getpeer(rt->rt_dst, create);
  
 -      spin_lock_bh(&rt_peer_lock);
 -      if (rt->peer == NULL) {
 -              rt->peer = peer;
 -              peer = NULL;
 -      }
 -      spin_unlock_bh(&rt_peer_lock);
 -      if (peer)
 +      if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
                inet_putpeer(peer);
  }
  
@@@ -2359,8 -2365,9 +2359,8 @@@ static int __mkroute_output(struct rtab
        struct rtable *rth;
        struct in_device *in_dev;
        u32 tos = RT_FL_TOS(oldflp);
 -      int err = 0;
  
 -      if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
 +      if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
                return -EINVAL;
  
        if (fl->fl4_dst == htonl(0xFFFFFFFF))
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
  
 -      /* get work reference to inet device */
 -      in_dev = in_dev_get(dev_out);
 -      if (!in_dev)
 +      rcu_read_lock();
 +      in_dev = __in_dev_get_rcu(dev_out);
 +      if (!in_dev) {
 +              rcu_read_unlock();
                return -EINVAL;
 -
 +      }
        if (res->type == RTN_BROADCAST) {
                flags |= RTCF_BROADCAST | RTCF_LOCAL;
                if (res->fi) {
                        res->fi = NULL;
                }
        } else if (res->type == RTN_MULTICAST) {
 -              flags |= RTCF_MULTICAST|RTCF_LOCAL;
 +              flags |= RTCF_MULTICAST | RTCF_LOCAL;
                if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
                                 oldflp->proto))
                        flags &= ~RTCF_LOCAL;
                /* If multicast route do not exist use
 -                 default one, but do not gateway in this case.
 -                 Yes, it is hack.
 +               * default one, but do not gateway in this case.
 +               * Yes, it is hack.
                 */
                if (res->fi && res->prefixlen < 4) {
                        fib_info_put(res->fi);
  
        rth = dst_alloc(&ipv4_dst_ops);
        if (!rth) {
 -              err = -ENOBUFS;
 -              goto cleanup;
 +              rcu_read_unlock();
 +              return -ENOBUFS;
        }
 +      in_dev_hold(in_dev);
 +      rcu_read_unlock();
 +      rth->idev = in_dev;
  
        atomic_set(&rth->dst.__refcnt, 1);
        rth->dst.flags= DST_HOST;
           cache entry */
        rth->dst.dev    = dev_out;
        dev_hold(dev_out);
 -      rth->idev       = in_dev_get(dev_out);
        rth->rt_gateway = fl->fl4_dst;
        rth->rt_spec_dst= fl->fl4_src;
  
        rt_set_nexthop(rth, res, 0);
  
        rth->rt_flags = flags;
 -
        *result = rth;
 - cleanup:
 -      /* release work reference to inet device */
 -      in_dev_put(in_dev);
 -
 -      return err;
 +      return 0;
  }
  
  static int ip_mkroute_output(struct rtable **rp,
  
  /*
   * Major route resolver routine.
 + * called with rcu_read_lock();
   */
  
  static int ip_route_output_slow(struct net *net, struct rtable **rp,
                            .iif = net->loopback_dev->ifindex,
                            .oif = oldflp->oif };
        struct fib_result res;
 -      unsigned flags = 0;
 +      unsigned int flags = 0;
        struct net_device *dev_out = NULL;
        int free_res = 0;
        int err;
                    (ipv4_is_multicast(oldflp->fl4_dst) ||
                     oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
                        /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
 -                      dev_out = ip_dev_find(net, oldflp->fl4_src);
 +                      dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
                        if (dev_out == NULL)
                                goto out;
  
  
                if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
                        /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
 -                      dev_out = ip_dev_find(net, oldflp->fl4_src);
 -                      if (dev_out == NULL)
 +                      if (!__ip_dev_find(net, oldflp->fl4_src, false))
                                goto out;
 -                      dev_put(dev_out);
 -                      dev_out = NULL;
                }
        }
  
  
        if (oldflp->oif) {
 -              dev_out = dev_get_by_index(net, oldflp->oif);
 +              dev_out = dev_get_by_index_rcu(net, oldflp->oif);
                err = -ENODEV;
                if (dev_out == NULL)
                        goto out;
  
                /* RACE: Check return value of inet_select_addr instead. */
 -              if (__in_dev_get_rtnl(dev_out) == NULL) {
 -                      dev_put(dev_out);
 +              if (rcu_dereference(dev_out->ip_ptr) == NULL)
                        goto out;       /* Wrong error code */
 -              }
  
                if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
                    oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
                fl.fl4_dst = fl.fl4_src;
                if (!fl.fl4_dst)
                        fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
 -              if (dev_out)
 -                      dev_put(dev_out);
                dev_out = net->loopback_dev;
 -              dev_hold(dev_out);
                fl.oif = net->loopback_dev->ifindex;
                res.type = RTN_LOCAL;
                flags |= RTCF_LOCAL;
                        res.type = RTN_UNICAST;
                        goto make_route;
                }
 -              if (dev_out)
 -                      dev_put(dev_out);
                err = -ENETUNREACH;
                goto out;
        }
        if (res.type == RTN_LOCAL) {
                if (!fl.fl4_src)
                        fl.fl4_src = fl.fl4_dst;
 -              if (dev_out)
 -                      dev_put(dev_out);
                dev_out = net->loopback_dev;
 -              dev_hold(dev_out);
                fl.oif = dev_out->ifindex;
                if (res.fi)
                        fib_info_put(res.fi);
        if (!fl.fl4_src)
                fl.fl4_src = FIB_RES_PREFSRC(res);
  
 -      if (dev_out)
 -              dev_put(dev_out);
        dev_out = FIB_RES_DEV(res);
 -      dev_hold(dev_out);
        fl.oif = dev_out->ifindex;
  
  
  make_route:
        err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
  
 -
        if (free_res)
                fib_res_put(&res);
 -      if (dev_out)
 -              dev_put(dev_out);
  out:  return err;
  }
  
  int __ip_route_output_key(struct net *net, struct rtable **rp,
                          const struct flowi *flp)
  {
 -      unsigned hash;
 +      unsigned int hash;
 +      int res;
        struct rtable *rth;
  
        if (!rt_caching(net))
        rcu_read_unlock_bh();
  
  slow_output:
 -      return ip_route_output_slow(net, rp, flp);
 +      rcu_read_lock();
 +      res = ip_route_output_slow(net, rp, flp);
 +      rcu_read_unlock();
 +      return res;
  }
  EXPORT_SYMBOL_GPL(__ip_route_output_key);
  
@@@ -2775,7 -2798,7 +2775,7 @@@ static int ipv4_dst_blackhole(struct ne
  
        dst_release(&(*rp)->dst);
        *rp = rt;
 -      return (rt ? 0 : -ENOMEM);
 +      return rt ? 0 : -ENOMEM;
  }
  
  int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
diff --combined net/ipv4/tcp.c
index 19192c5fe67a52eed8381d6f36ce690eb0fef751,f115ea68a4efa264c59b20f61222db97a6050a9d..1664a0590bb8f28fe5662e2cfdbc3a6d660d4549
@@@ -943,7 -943,7 +943,7 @@@ int tcp_sendmsg(struct kiocb *iocb, str
        sg = sk->sk_route_caps & NETIF_F_SG;
  
        while (--iovlen >= 0) {
-               int seglen = iov->iov_len;
+               size_t seglen = iov->iov_len;
                unsigned char __user *from = iov->iov_base;
  
                iov++;
@@@ -2392,12 -2392,7 +2392,12 @@@ static int do_tcp_setsockopt(struct soc
                err = tp->af_specific->md5_parse(sk, optval, optlen);
                break;
  #endif
 -
 +      case TCP_USER_TIMEOUT:
 +              /* Cap the max timeout in ms TCP will retry/retrans
 +               * before giving up and aborting (ETIMEDOUT) a connection.
 +               */
 +              icsk->icsk_user_timeout = msecs_to_jiffies(val);
 +              break;
        default:
                err = -ENOPROTOOPT;
                break;
@@@ -2616,10 -2611,6 +2616,10 @@@ static int do_tcp_getsockopt(struct soc
        case TCP_THIN_DUPACK:
                val = tp->thin_dupack;
                break;
 +
 +      case TCP_USER_TIMEOUT:
 +              val = jiffies_to_msecs(icsk->icsk_user_timeout);
 +              break;
        default:
                return -ENOPROTOOPT;
        }
diff --combined net/ipv4/tcp_input.c
index eaf20e7e61daacf9b9d06c253451ca4c1485308c,b55f60f6fcbe934c1364ee3aece309dff4d1be4b..f6fdd727a23dcf85cf81a7a0bbfe361d1904e74d
@@@ -182,7 -182,7 +182,7 @@@ static void tcp_incr_quickack(struct so
                icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
  }
  
 -void tcp_enter_quickack_mode(struct sock *sk)
 +static void tcp_enter_quickack_mode(struct sock *sk)
  {
        struct inet_connection_sock *icsk = inet_csk(sk);
        tcp_incr_quickack(sk);
@@@ -805,12 -805,25 +805,12 @@@ void tcp_update_metrics(struct sock *sk
        }
  }
  
 -/* Numbers are taken from RFC3390.
 - *
 - * John Heffner states:
 - *
 - *    The RFC specifies a window of no more than 4380 bytes
 - *    unless 2*MSS > 4380.  Reading the pseudocode in the RFC
 - *    is a bit misleading because they use a clamp at 4380 bytes
 - *    rather than use a multiplier in the relevant range.
 - */
  __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
  {
        __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
  
 -      if (!cwnd) {
 -              if (tp->mss_cache > 1460)
 -                      cwnd = 2;
 -              else
 -                      cwnd = (tp->mss_cache > 1095) ? 3 : 4;
 -      }
 +      if (!cwnd)
 +              cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
  }
  
@@@ -2301,7 -2314,7 +2301,7 @@@ static inline int tcp_dupack_heuristics
  
  static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
  {
 -      return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
 +      return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
  }
  
  static inline int tcp_head_timedout(struct sock *sk)
@@@ -2532,7 -2545,8 +2532,8 @@@ static void tcp_mark_head_lost(struct s
                        cnt += tcp_skb_pcount(skb);
  
                if (cnt > packets) {
-                       if (tcp_is_sack(tp) || (oldcnt >= packets))
+                       if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+                           (oldcnt >= packets))
                                break;
  
                        mss = skb_shinfo(skb)->gso_size;
@@@ -3398,8 -3412,8 +3399,8 @@@ static void tcp_ack_probe(struct sock *
  
  static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
  {
 -      return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
 -              inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
 +      return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
 +              inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
  }
  
  static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
@@@ -3416,9 -3430,9 +3417,9 @@@ static inline int tcp_may_update_window
                                        const u32 ack, const u32 ack_seq,
                                        const u32 nwin)
  {
 -      return (after(ack, tp->snd_una) ||
 +      return  after(ack, tp->snd_una) ||
                after(ack_seq, tp->snd_wl1) ||
 -              (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
 +              (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
  }
  
  /* Update our send window.
diff --combined net/ipv4/tcp_timer.c
index baea4a1290224e86c9f7c3cab646636fe86e35f9,74c54b30600f618522e07581c43130eea031f2db..f3c8c6c019ae9790d0d1c79bb695d1cb5ac8ff25
@@@ -135,13 -135,16 +135,16 @@@ static void tcp_mtu_probing(struct inet
  
  /* This function calculates a "timeout" which is equivalent to the timeout of a
   * TCP connection after "boundary" unsuccessful, exponentially backed-off
-  * retransmissions with an initial RTO of TCP_RTO_MIN.
+  * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
+  * syn_set flag is set.
   */
  static bool retransmits_timed_out(struct sock *sk,
                                  unsigned int boundary,
-                                 unsigned int timeout)
++                                unsigned int timeout,
+                                 bool syn_set)
  {
 -      unsigned int timeout, linear_backoff_thresh;
 -      unsigned int start_ts;
 +      unsigned int linear_backoff_thresh, start_ts;
+       unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
  
        if (!inet_csk(sk)->icsk_retransmits)
                return false;
        else
                start_ts = tcp_sk(sk)->retrans_stamp;
  
 -      linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
 -
 -      if (boundary <= linear_backoff_thresh)
 -              timeout = ((2 << boundary) - 1) * rto_base;
 -      else
 -              timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
 -                        (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
 +      if (likely(timeout == 0)) {
-               linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
++              linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
  
-                       timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
 +              if (boundary <= linear_backoff_thresh)
-                       timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
++                      timeout = ((2 << boundary) - 1) * rto_base;
 +              else
++                      timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
 +                              (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
 +      }
        return (tcp_time_stamp - start_ts) >= timeout;
  }
  
@@@ -168,14 -170,15 +171,15 @@@ static int tcp_write_timeout(struct soc
  {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int retry_until;
-       bool do_reset;
+       bool do_reset, syn_set = 0;
  
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                if (icsk->icsk_retransmits)
                        dst_negative_advice(sk);
                retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+               syn_set = 1;
        } else {
--              if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) {
++              if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
  
  
                        retry_until = tcp_orphan_retries(sk, alive);
                        do_reset = alive ||
--                                 !retransmits_timed_out(sk, retry_until, 0);
++                              !retransmits_timed_out(sk, retry_until, 0, 0);
  
                        if (tcp_out_of_resources(sk, do_reset))
                                return 1;
                }
        }
  
 -      if (retransmits_timed_out(sk, retry_until, syn_set)) {
 +      if (retransmits_timed_out(sk, retry_until,
-           (1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV) ? 0 :
-           icsk->icsk_user_timeout)) {
++                                syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
                /* Has it gone just too far? */
                tcp_write_err(sk);
                return 1;
@@@ -439,7 -440,7 +442,7 @@@ out_reset_timer
                icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
        }
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
--      if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0))
++      if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
                __sk_dst_reset(sk);
  
  out:;
@@@ -559,14 -560,7 +562,14 @@@ static void tcp_keepalive_timer (unsign
        elapsed = keepalive_time_elapsed(tp);
  
        if (elapsed >= keepalive_time_when(tp)) {
 -              if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
 +              /* If the TCP_USER_TIMEOUT option is enabled, use that
 +               * to determine when to timeout instead.
 +               */
 +              if ((icsk->icsk_user_timeout != 0 &&
 +                  elapsed >= icsk->icsk_user_timeout &&
 +                  icsk->icsk_probes_out > 0) ||
 +                  (icsk->icsk_user_timeout == 0 &&
 +                  icsk->icsk_probes_out >= keepalive_probes(tp))) {
                        tcp_send_active_reset(sk, GFP_ATOMIC);
                        tcp_write_err(sk);
                        goto out;
diff --combined net/ipv6/route.c
index 25476e7e708b25d24570711caa8106c141388343,a275c6e1e25c23884d7d1859e46a2ee82c00acef..17e217933885afdae3e5b1b160b0f714953e2f6d
@@@ -217,14 -217,14 +217,14 @@@ static void ip6_dst_ifdown(struct dst_e
  
  static __inline__ int rt6_check_expired(const struct rt6_info *rt)
  {
 -      return (rt->rt6i_flags & RTF_EXPIRES &&
 -              time_after(jiffies, rt->rt6i_expires));
 +      return (rt->rt6i_flags & RTF_EXPIRES) &&
 +              time_after(jiffies, rt->rt6i_expires);
  }
  
  static inline int rt6_need_strict(struct in6_addr *daddr)
  {
 -      return (ipv6_addr_type(daddr) &
 -              (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK));
 +      return ipv6_addr_type(daddr) &
 +              (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
  }
  
  /*
@@@ -440,7 -440,7 +440,7 @@@ static struct rt6_info *rt6_select(stru
                  __func__, match);
  
        net = dev_net(rt0->rt6i_dev);
 -      return (match ? match : net->ipv6.ip6_null_entry);
 +      return match ? match : net->ipv6.ip6_null_entry;
  }
  
  #ifdef CONFIG_IPV6_ROUTE_INFO
@@@ -670,7 -670,7 +670,7 @@@ static struct rt6_info *rt6_alloc_cow(s
  
                        if (net_ratelimit())
                                printk(KERN_WARNING
-                                      "Neighbour table overflow.\n");
+                                      "ipv6: Neighbour table overflow.\n");
                        dst_free(&rt->dst);
                        return NULL;
                }
@@@ -859,7 -859,7 +859,7 @@@ int ip6_dst_blackhole(struct sock *sk, 
  
        dst_release(*dstp);
        *dstp = new;
 -      return (new ? 0 : -ENOMEM);
 +      return new ? 0 : -ENOMEM;
  }
  EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
  
@@@ -1070,7 -1070,7 +1070,7 @@@ static int ip6_dst_gc(struct dst_ops *o
                net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
  out:
        net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
 -      return (atomic_read(&ops->entries) > rt_max_size);
 +      return atomic_read(&ops->entries) > rt_max_size;
  }
  
  /* Clean host part of a prefix. Not necessary in radix tree,
@@@ -1169,8 -1169,6 +1169,8 @@@ int ip6_route_add(struct fib6_config *c
  
        if (addr_type & IPV6_ADDR_MULTICAST)
                rt->dst.input = ip6_mc_input;
 +      else if (cfg->fc_flags & RTF_LOCAL)
 +              rt->dst.input = ip6_input;
        else
                rt->dst.input = ip6_forward;
  
           they would result in kernel looping; promote them to reject routes
         */
        if ((cfg->fc_flags & RTF_REJECT) ||
 -          (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
 +          (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
 +                                            && !(cfg->fc_flags&RTF_LOCAL))) {
                /* hold loopback dev/idev if we haven't done so. */
                if (dev != net->loopback_dev) {
                        if (dev) {
   *    i.e. Path MTU discovery
   */
  
void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
-                       struct net_device *dev, u32 pmtu)
static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
+                            struct net *net, u32 pmtu, int ifindex)
  {
        struct rt6_info *rt, *nrt;
-       struct net *net = dev_net(dev);
        int allfrag = 0;
  
-       rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
+       rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
        if (rt == NULL)
                return;
  
        dst_release(&rt->dst);
  }
  
+ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
+                       struct net_device *dev, u32 pmtu)
+ {
+       struct net *net = dev_net(dev);
+       /*
+        * RFC 1981 states that a node "MUST reduce the size of the packets it
+        * is sending along the path" that caused the Packet Too Big message.
+        * Since it's not possible in the general case to determine which
+        * interface was used to send the original packet, we update the MTU
+        * on the interface that will be used to send future packets. We also
+        * update the MTU on the interface that received the Packet Too Big in
+        * case the original packet was forced out that interface with
+        * SO_BINDTODEVICE or similar. This is the next best thing to the
+        * correct behaviour, which would be to update the MTU on all
+        * interfaces.
+        */
+       rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
+       rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
+ }
  /*
   *    Misc support functions
   */
@@@ -2085,9 -2102,6 +2105,9 @@@ static int rtm_to_fib6_config(struct sk
        if (rtm->rtm_type == RTN_UNREACHABLE)
                cfg->fc_flags |= RTF_REJECT;
  
 +      if (rtm->rtm_type == RTN_LOCAL)
 +              cfg->fc_flags |= RTF_LOCAL;
 +
        cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
        cfg->fc_nlinfo.nlh = nlh;
        cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@@ -2208,8 -2222,6 +2228,8 @@@ static int rt6_fill_node(struct net *ne
        NLA_PUT_U32(skb, RTA_TABLE, table);
        if (rt->rt6i_flags&RTF_REJECT)
                rtm->rtm_type = RTN_UNREACHABLE;
 +      else if (rt->rt6i_flags&RTF_LOCAL)
 +              rtm->rtm_type = RTN_LOCAL;
        else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
                rtm->rtm_type = RTN_LOCAL;
        else
diff --combined net/phonet/pep.c
index 552fb665645f005d33b7249bf733fb28a3d7092f,15003021f4f0a8706e540150425b4f995dc582a6..aa3d8700d213a7e8913ebd928f3acab1f90a92c1
@@@ -88,15 -88,6 +88,15 @@@ static int pep_reply(struct sock *sk, s
        const struct pnpipehdr *oph = pnp_hdr(oskb);
        struct pnpipehdr *ph;
        struct sk_buff *skb;
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      const struct phonethdr *hdr = pn_hdr(oskb);
 +      struct sockaddr_pn spn = {
 +              .spn_family = AF_PHONET,
 +              .spn_resource = 0xD9,
 +              .spn_dev = hdr->pn_sdev,
 +              .spn_obj = hdr->pn_sobj,
 +      };
 +#endif
  
        skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
        if (!skb)
        ph->pipe_handle = oph->pipe_handle;
        ph->error_code = code;
  
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      return pn_skb_send(sk, skb, &spn);
 +#else
        return pn_skb_send(sk, skb, &pipe_srv);
 +#endif
  }
  
  #define PAD 0x00
 +
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +static u8 pipe_negotiate_fc(u8 *host_fc, u8 *remote_fc, int len)
 +{
 +      int i, j;
 +      u8 base_fc, final_fc;
 +
 +      for (i = 0; i < len; i++) {
 +              base_fc = host_fc[i];
 +              for (j = 0; j < len; j++) {
 +                      if (remote_fc[j] == base_fc) {
 +                              final_fc = base_fc;
 +                              goto done;
 +                      }
 +              }
 +      }
 +      return -EINVAL;
 +
 +done:
 +      return final_fc;
 +
 +}
 +
 +static int pipe_get_flow_info(struct sock *sk, struct sk_buff *skb,
 +              u8 *pref_rx_fc, u8 *req_tx_fc)
 +{
 +      struct pnpipehdr *hdr;
 +      u8 n_sb;
 +
 +      if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
 +              return -EINVAL;
 +
 +      hdr = pnp_hdr(skb);
 +      n_sb = hdr->data[4];
 +
 +      __skb_pull(skb, sizeof(*hdr) + 4);
 +      while (n_sb > 0) {
 +              u8 type, buf[3], len = sizeof(buf);
 +              u8 *data = pep_get_sb(skb, &type, &len, buf);
 +
 +              if (data == NULL)
 +                      return -EINVAL;
 +
 +              switch (type) {
 +              case PN_PIPE_SB_REQUIRED_FC_TX:
 +                      if (len < 3 || (data[2] | data[3] | data[4]) > 3)
 +                              break;
 +                      req_tx_fc[0] = data[2];
 +                      req_tx_fc[1] = data[3];
 +                      req_tx_fc[2] = data[4];
 +                      break;
 +
 +              case PN_PIPE_SB_PREFERRED_FC_RX:
 +                      if (len < 3 || (data[2] | data[3] | data[4]) > 3)
 +                              break;
 +                      pref_rx_fc[0] = data[2];
 +                      pref_rx_fc[1] = data[3];
 +                      pref_rx_fc[2] = data[4];
 +                      break;
 +
 +              }
 +              n_sb--;
 +      }
 +      return 0;
 +}
 +
 +static int pipe_handler_send_req(struct sock *sk, u16 dobj, u8 utid,
 +              u8 msg_id, u8 p_handle, gfp_t priority)
 +{
 +      int len;
 +      struct pnpipehdr *ph;
 +      struct sk_buff *skb;
 +      struct sockaddr_pn spn = {
 +              .spn_family = AF_PHONET,
 +              .spn_resource = 0xD9,
 +              .spn_dev = pn_dev(dobj),
 +              .spn_obj = pn_obj(dobj),
 +      };
 +
 +      static const u8 data[4] = {
 +              PAD, PAD, PAD, PAD,
 +      };
 +
 +      switch (msg_id) {
 +      case PNS_PEP_CONNECT_REQ:
 +              len = sizeof(data);
 +              break;
 +
 +      case PNS_PEP_DISCONNECT_REQ:
 +      case PNS_PEP_ENABLE_REQ:
 +      case PNS_PEP_DISABLE_REQ:
 +              len = 0;
 +              break;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
 +      if (!skb)
 +              return -ENOMEM;
 +      skb_set_owner_w(skb, sk);
 +
 +      skb_reserve(skb, MAX_PNPIPE_HEADER);
 +      if (len) {
 +              __skb_put(skb, len);
 +              skb_copy_to_linear_data(skb, data, len);
 +      }
 +      __skb_push(skb, sizeof(*ph));
 +      skb_reset_transport_header(skb);
 +      ph = pnp_hdr(skb);
 +      ph->utid = utid;
 +      ph->message_id = msg_id;
 +      ph->pipe_handle = p_handle;
 +      ph->error_code = PN_PIPE_NO_ERROR;
 +
 +      return pn_skb_send(sk, skb, &spn);
 +}
 +
 +static int pipe_handler_send_created_ind(struct sock *sk, u16 dobj,
 +              u8 utid, u8 p_handle, u8 msg_id, u8 tx_fc, u8 rx_fc)
 +{
 +      int err_code;
 +      struct pnpipehdr *ph;
 +      struct sk_buff *skb;
 +      struct sockaddr_pn spn = {
 +              .spn_family = AF_PHONET,
 +              .spn_resource = 0xD9,
 +              .spn_dev = pn_dev(dobj),
 +              .spn_obj = pn_obj(dobj),
 +      };
 +
 +      static u8 data[4] = {
 +              0x03, 0x04,
 +      };
 +      data[2] = tx_fc;
 +      data[3] = rx_fc;
 +
 +      /*
 +       * actually, below is number of sub-blocks and not error code.
 +       * Pipe_created_ind message format does not have any
 +       * error code field. However, the Phonet stack will always send
 +       * an error code as part of pnpipehdr. So, use that err_code to
 +       * specify the number of sub-blocks.
 +       */
 +      err_code = 0x01;
 +
 +      skb = alloc_skb(MAX_PNPIPE_HEADER + sizeof(data), GFP_ATOMIC);
 +      if (!skb)
 +              return -ENOMEM;
 +      skb_set_owner_w(skb, sk);
 +
 +      skb_reserve(skb, MAX_PNPIPE_HEADER);
 +      __skb_put(skb, sizeof(data));
 +      skb_copy_to_linear_data(skb, data, sizeof(data));
 +      __skb_push(skb, sizeof(*ph));
 +      skb_reset_transport_header(skb);
 +      ph = pnp_hdr(skb);
 +      ph->utid = utid;
 +      ph->message_id = msg_id;
 +      ph->pipe_handle = p_handle;
 +      ph->error_code = err_code;
 +
 +      return pn_skb_send(sk, skb, &spn);
 +}
 +
 +static int pipe_handler_send_ind(struct sock *sk, u16 dobj, u8 utid,
 +              u8 p_handle, u8 msg_id)
 +{
 +      int err_code;
 +      struct pnpipehdr *ph;
 +      struct sk_buff *skb;
 +      struct sockaddr_pn spn = {
 +              .spn_family = AF_PHONET,
 +              .spn_resource = 0xD9,
 +              .spn_dev = pn_dev(dobj),
 +              .spn_obj = pn_obj(dobj),
 +      };
 +
 +      /*
 +       * actually, below is a filler.
 +       * Pipe_enabled/disabled_ind message format does not have any
 +       * error code field. However, the Phonet stack will always send
 +       * an error code as part of pnpipehdr. So, use that err_code to
 +       * specify the filler value.
 +       */
 +      err_code = 0x0;
 +
 +      skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
 +      if (!skb)
 +              return -ENOMEM;
 +      skb_set_owner_w(skb, sk);
 +
 +      skb_reserve(skb, MAX_PNPIPE_HEADER);
 +      __skb_push(skb, sizeof(*ph));
 +      skb_reset_transport_header(skb);
 +      ph = pnp_hdr(skb);
 +      ph->utid = utid;
 +      ph->message_id = msg_id;
 +      ph->pipe_handle = p_handle;
 +      ph->error_code = err_code;
 +
 +      return pn_skb_send(sk, skb, &spn);
 +}
 +
 +static int pipe_handler_enable_pipe(struct sock *sk, int cmd)
 +{
 +      int ret;
 +      struct pep_sock *pn = pep_sk(sk);
 +
 +      switch (cmd) {
 +      case PNPIPE_ENABLE:
 +              ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
 +                              PNS_PIPE_ENABLE_UTID, PNS_PEP_ENABLE_REQ,
 +                              pn->pipe_handle, GFP_ATOMIC);
 +              break;
 +
 +      case PNPIPE_DISABLE:
 +              ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
 +                              PNS_PIPE_DISABLE_UTID, PNS_PEP_DISABLE_REQ,
 +                              pn->pipe_handle, GFP_ATOMIC);
 +              break;
 +
 +      default:
 +              ret = -EINVAL;
 +      }
 +
 +      return ret;
 +}
 +
 +static int pipe_handler_create_pipe(struct sock *sk, int pipe_handle, int cmd)
 +{
 +      int ret;
 +      struct pep_sock *pn = pep_sk(sk);
 +
 +      switch (cmd) {
 +      case PNPIPE_CREATE:
 +              ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
 +                              PNS_PEP_CONNECT_UTID, PNS_PEP_CONNECT_REQ,
 +                              pipe_handle, GFP_ATOMIC);
 +              break;
 +
 +      case PNPIPE_DESTROY:
 +              ret = pipe_handler_send_req(sk, pn->remote_pep,
 +                              PNS_PEP_DISCONNECT_UTID,
 +                              PNS_PEP_DISCONNECT_REQ,
 +                              pn->pipe_handle, GFP_ATOMIC);
 +              break;
 +
 +      default:
 +              ret = -EINVAL;
 +      }
 +
 +      return ret;
 +}
 +#endif
 +
  static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
  {
        static const u8 data[20] = {
@@@ -443,14 -173,6 +443,14 @@@ static int pipe_snd_status(struct sock 
        struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *ph;
        struct sk_buff *skb;
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      struct sockaddr_pn spn = {
 +              .spn_family = AF_PHONET,
 +              .spn_resource = 0xD9,
 +              .spn_dev = pn_dev(pn->remote_pep),
 +              .spn_obj = pn_obj(pn->remote_pep),
 +      };
 +#endif
  
        skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
        if (!skb)
        ph->data[3] = PAD;
        ph->data[4] = status;
  
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      return pn_skb_send(sk, skb, &spn);
 +#else
        return pn_skb_send(sk, skb, &pipe_srv);
 +#endif
  }
  
  /* Send our RX flow control information to the sender.
@@@ -507,12 -225,13 +507,13 @@@ static void pipe_grant_credits(struct s
  static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
  {
        struct pep_sock *pn = pep_sk(sk);
-       struct pnpipehdr *hdr = pnp_hdr(skb);
+       struct pnpipehdr *hdr;
        int wake = 0;
  
        if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
                return -EINVAL;
  
+       hdr = pnp_hdr(skb);
        if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
                LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
                                (unsigned)hdr->data[0]);
@@@ -590,12 -309,6 +591,12 @@@ static int pipe_do_rcv(struct sock *sk
        struct pnpipehdr *hdr = pnp_hdr(skb);
        struct sk_buff_head *queue;
        int err = 0;
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      struct phonethdr *ph = pn_hdr(skb);
 +      static u8 host_pref_rx_fc[3], host_req_tx_fc[3];
 +      u8 remote_pref_rx_fc[3], remote_req_tx_fc[3];
 +      u8 negotiated_rx_fc, negotiated_tx_fc;
 +#endif
  
        BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
  
                pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
                break;
  
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      case PNS_PEP_CONNECT_RESP:
 +              if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
 +                              (ph->pn_sobj == pn_obj(pn->remote_pep))) {
 +                      pipe_get_flow_info(sk, skb, remote_pref_rx_fc,
 +                                      remote_req_tx_fc);
 +
 +                       negotiated_tx_fc = pipe_negotiate_fc(remote_req_tx_fc,
 +                                       host_pref_rx_fc,
 +                                       sizeof(host_pref_rx_fc));
 +                       negotiated_rx_fc = pipe_negotiate_fc(host_req_tx_fc,
 +                                       remote_pref_rx_fc,
 +                                       sizeof(host_pref_rx_fc));
 +
 +                      pn->pipe_state = PIPE_DISABLED;
 +                      pipe_handler_send_created_ind(sk, pn->remote_pep,
 +                                      PNS_PIPE_CREATED_IND_UTID,
 +                                      pn->pipe_handle, PNS_PIPE_CREATED_IND,
 +                                      negotiated_tx_fc, negotiated_rx_fc);
 +                      pipe_handler_send_created_ind(sk, pn->pn_sk.sobject,
 +                                      PNS_PIPE_CREATED_IND_UTID,
 +                                      pn->pipe_handle, PNS_PIPE_CREATED_IND,
 +                                      negotiated_tx_fc, negotiated_rx_fc);
 +              } else {
 +                      pipe_handler_send_req(sk, pn->remote_pep,
 +                                      PNS_PEP_CONNECT_UTID,
 +                                      PNS_PEP_CONNECT_REQ, pn->pipe_handle,
 +                                      GFP_ATOMIC);
 +                      pipe_get_flow_info(sk, skb, host_pref_rx_fc,
 +                                      host_req_tx_fc);
 +              }
 +              break;
 +#endif
 +
        case PNS_PEP_DISCONNECT_REQ:
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
                sk->sk_state = TCP_CLOSE_WAIT;
                        sk->sk_state_change(sk);
                break;
  
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      case PNS_PEP_DISCONNECT_RESP:
 +              pn->pipe_state = PIPE_IDLE;
 +              pipe_handler_send_req(sk, pn->pn_sk.sobject,
 +                              PNS_PEP_DISCONNECT_UTID,
 +                              PNS_PEP_DISCONNECT_REQ, pn->pipe_handle,
 +                              GFP_KERNEL);
 +              break;
 +#endif
 +
        case PNS_PEP_ENABLE_REQ:
                /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
                break;
  
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      case PNS_PEP_ENABLE_RESP:
 +              if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
 +                              (ph->pn_sobj == pn_obj(pn->remote_pep))) {
 +                      pn->pipe_state = PIPE_ENABLED;
 +                      pipe_handler_send_ind(sk, pn->remote_pep,
 +                                      PNS_PIPE_ENABLED_IND_UTID,
 +                                      pn->pipe_handle, PNS_PIPE_ENABLED_IND);
 +                      pipe_handler_send_ind(sk, pn->pn_sk.sobject,
 +                                      PNS_PIPE_ENABLED_IND_UTID,
 +                                      pn->pipe_handle, PNS_PIPE_ENABLED_IND);
 +              } else
 +                      pipe_handler_send_req(sk, pn->remote_pep,
 +                                      PNS_PIPE_ENABLE_UTID,
 +                                      PNS_PEP_ENABLE_REQ, pn->pipe_handle,
 +                                      GFP_KERNEL);
 +
 +              break;
 +#endif
 +
        case PNS_PEP_RESET_REQ:
                switch (hdr->state_after_reset) {
                case PN_PIPE_DISABLE:
                pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
                break;
  
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      case PNS_PEP_DISABLE_RESP:
 +              if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
 +                              (ph->pn_sobj == pn_obj(pn->remote_pep))) {
 +                      pn->pipe_state = PIPE_DISABLED;
 +                      pipe_handler_send_ind(sk, pn->remote_pep,
 +                                      PNS_PIPE_DISABLED_IND_UTID,
 +                                      pn->pipe_handle,
 +                                      PNS_PIPE_DISABLED_IND);
 +                      pipe_handler_send_ind(sk, pn->pn_sk.sobject,
 +                                      PNS_PIPE_DISABLED_IND_UTID,
 +                                      pn->pipe_handle,
 +                                      PNS_PIPE_DISABLED_IND);
 +              } else
 +                      pipe_handler_send_req(sk, pn->remote_pep,
 +                                      PNS_PIPE_DISABLE_UTID,
 +                                      PNS_PEP_DISABLE_REQ, pn->pipe_handle,
 +                                      GFP_KERNEL);
 +              break;
 +#endif
 +
        case PNS_PEP_CTRL_REQ:
                if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
                        atomic_inc(&sk->sk_drops);
@@@ -892,9 -520,6 +893,9 @@@ static int pep_connreq_rcv(struct sock 
        newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
        newpn->init_enable = enabled;
        newpn->aligned = aligned;
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      newpn->remote_pep = pn->remote_pep;
 +#endif
  
        BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
        skb_queue_head(&newsk->sk_receive_queue, skb);
@@@ -996,28 -621,6 +997,28 @@@ drop
        return err;
  }
  
 +static int pipe_do_remove(struct sock *sk)
 +{
 +      struct pep_sock *pn = pep_sk(sk);
 +      struct pnpipehdr *ph;
 +      struct sk_buff *skb;
 +
 +      skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_KERNEL);
 +      if (!skb)
 +              return -ENOMEM;
 +
 +      skb_reserve(skb, MAX_PNPIPE_HEADER);
 +      __skb_push(skb, sizeof(*ph));
 +      skb_reset_transport_header(skb);
 +      ph = pnp_hdr(skb);
 +      ph->utid = 0;
 +      ph->message_id = PNS_PIPE_REMOVE_REQ;
 +      ph->pipe_handle = pn->pipe_handle;
 +      ph->data[0] = PAD;
 +
 +      return pn_skb_send(sk, skb, &pipe_srv);
 +}
 +
  /* associated socket ceases to exist */
  static void pep_sock_close(struct sock *sk, long timeout)
  {
                sk_for_each_safe(sknode, p, n, &pn->ackq)
                        sk_del_node_init(sknode);
                sk->sk_state = TCP_CLOSE;
 -      }
 +      } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
 +              /* Forcefully remove dangling Phonet pipe */
 +              pipe_do_remove(sk);
 +
        ifindex = pn->ifindex;
        pn->ifindex = 0;
        release_sock(sk);
@@@ -1157,10 -757,6 +1158,10 @@@ static int pep_setsockopt(struct sock *
  {
        struct pep_sock *pn = pep_sk(sk);
        int val = 0, err = 0;
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      int remote_pep;
 +      int pipe_handle;
 +#endif
  
        if (level != SOL_PNPIPE)
                return -ENOPROTOOPT;
  
        lock_sock(sk);
        switch (optname) {
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      case PNPIPE_CREATE:
 +              if (val) {
 +                      if (pn->pipe_state > PIPE_IDLE) {
 +                              err = -EFAULT;
 +                              break;
 +                      }
 +                      remote_pep = val & 0xFFFF;
 +                      pipe_handle =  (val >> 16) & 0xFF;
 +                      pn->remote_pep = remote_pep;
 +                      err = pipe_handler_create_pipe(sk, pipe_handle,
 +                                      PNPIPE_CREATE);
 +                      break;
 +              }
 +
 +      case PNPIPE_ENABLE:
 +              if (pn->pipe_state != PIPE_DISABLED) {
 +                      err = -EFAULT;
 +                      break;
 +              }
 +              err = pipe_handler_enable_pipe(sk, PNPIPE_ENABLE);
 +              break;
 +
 +      case PNPIPE_DISABLE:
 +              if (pn->pipe_state != PIPE_ENABLED) {
 +                      err = -EFAULT;
 +                      break;
 +              }
 +
 +              err = pipe_handler_enable_pipe(sk, PNPIPE_DISABLE);
 +              break;
 +
 +      case PNPIPE_DESTROY:
 +              if (pn->pipe_state < PIPE_DISABLED) {
 +                      err = -EFAULT;
 +                      break;
 +              }
 +
 +              err = pipe_handler_create_pipe(sk, 0x0, PNPIPE_DESTROY);
 +              break;
 +#endif
 +
        case PNPIPE_ENCAP:
                if (val && val != PNPIPE_ENCAP_IP) {
                        err = -EINVAL;
@@@ -1262,13 -816,6 +1263,13 @@@ static int pep_getsockopt(struct sock *
        case PNPIPE_ENCAP:
                val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
                break;
 +
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      case PNPIPE_INQ:
 +              val = pn->pipe_state;
 +              break;
 +#endif
 +
        case PNPIPE_IFINDEX:
                val = pn->ifindex;
                break;
@@@ -1288,15 -835,6 +1289,15 @@@ static int pipe_skb_send(struct sock *s
  {
        struct pep_sock *pn = pep_sk(sk);
        struct pnpipehdr *ph;
 +      int err;
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      struct sockaddr_pn spn = {
 +              .spn_family = AF_PHONET,
 +              .spn_resource = 0xD9,
 +              .spn_dev = pn_dev(pn->remote_pep),
 +              .spn_obj = pn_obj(pn->remote_pep),
 +      };
 +#endif
  
        if (pn_flow_safe(pn->tx_fc) &&
            !atomic_add_unless(&pn->tx_credits, -1, 0)) {
        } else
                ph->message_id = PNS_PIPE_DATA;
        ph->pipe_handle = pn->pipe_handle;
 +#ifdef CONFIG_PHONET_PIPECTRLR
 +      err = pn_skb_send(sk, skb, &spn);
 +#else
 +      err = pn_skb_send(sk, skb, &pipe_srv);
 +#endif
 +
 +      if (err && pn_flow_safe(pn->tx_fc))
 +              atomic_inc(&pn->tx_credits);
 +      return err;
  
 -      return pn_skb_send(sk, skb, &pipe_srv);
  }
  
  static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
        skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
                                        flags & MSG_DONTWAIT, &err);
        if (!skb)
 -              return -ENOBUFS;
 +              return err;
  
        skb_reserve(skb, MAX_PHONET_HEADER + 3);
        err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
diff --combined net/sctp/socket.c
index d4bf2a78cb8a2ce4b8742243511307862f6604f5,fbb70770ad05d05807d25b5527e5616a621f58e8..e34ca9cc11675e249cd703ea58ba4fe2e5feeb82
@@@ -57,8 -57,6 +57,8 @@@
   * be incorporated into the next SCTP release.
   */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/types.h>
  #include <linux/kernel.h>
  #include <linux/wait.h>
@@@ -918,6 -916,11 +918,11 @@@ SCTP_STATIC int sctp_setsockopt_bindx(s
        /* Walk through the addrs buffer and count the number of addresses. */
        addr_buf = kaddrs;
        while (walk_size < addrs_size) {
+               if (walk_size + sizeof(sa_family_t) > addrs_size) {
+                       kfree(kaddrs);
+                       return -EINVAL;
+               }
                sa_addr = (struct sockaddr *)addr_buf;
                af = sctp_get_af_specific(sa_addr->sa_family);
  
@@@ -1004,9 -1007,13 +1009,13 @@@ static int __sctp_connect(struct sock* 
        /* Walk through the addrs buffer and count the number of addresses. */
        addr_buf = kaddrs;
        while (walk_size < addrs_size) {
+               if (walk_size + sizeof(sa_family_t) > addrs_size) {
+                       err = -EINVAL;
+                       goto out_free;
+               }
                sa_addr = (union sctp_addr *)addr_buf;
                af = sctp_get_af_specific(sa_addr->sa.sa_family);
-               port = ntohs(sa_addr->v4.sin_port);
  
                /* If the address family is not supported or if this address
                 * causes the address buffer to overflow return EINVAL.
                        goto out_free;
                }
  
+               port = ntohs(sa_addr->v4.sin_port);
                /* Save current address so we can work with it */
                memcpy(&to, sa_addr, af->sockaddr_len);
  
@@@ -2460,8 -2469,9 +2471,8 @@@ static int sctp_setsockopt_delayed_ack(
                if (params.sack_delay == 0 && params.sack_freq == 0)
                        return 0;
        } else if (optlen == sizeof(struct sctp_assoc_value)) {
 -              printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value "
 -                     "in delayed_ack socket option deprecated\n");
 -              printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
 +              pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
 +              pr_warn("Use struct sctp_sack_info instead\n");
                if (copy_from_user(&params, optval, optlen))
                        return -EFAULT;
  
@@@ -2869,8 -2879,10 +2880,8 @@@ static int sctp_setsockopt_maxseg(struc
        int val;
  
        if (optlen == sizeof(int)) {
 -              printk(KERN_WARNING
 -                 "SCTP: Use of int in maxseg socket option deprecated\n");
 -              printk(KERN_WARNING
 -                 "SCTP: Use struct sctp_assoc_value instead\n");
 +              pr_warn("Use of int in maxseg socket option deprecated\n");
 +              pr_warn("Use struct sctp_assoc_value instead\n");
                if (copy_from_user(&val, optval, optlen))
                        return -EFAULT;
                params.assoc_id = 0;
@@@ -3120,8 -3132,10 +3131,8 @@@ static int sctp_setsockopt_maxburst(str
        int assoc_id = 0;
  
        if (optlen == sizeof(int)) {
 -              printk(KERN_WARNING
 -                 "SCTP: Use of int in max_burst socket option deprecated\n");
 -              printk(KERN_WARNING
 -                 "SCTP: Use struct sctp_assoc_value instead\n");
 +              pr_warn("Use of int in max_burst socket option deprecated\n");
 +              pr_warn("Use struct sctp_assoc_value instead\n");
                if (copy_from_user(&val, optval, optlen))
                        return -EFAULT;
        } else if (optlen == sizeof(struct sctp_assoc_value)) {
  /* The SCTP ioctl handler. */
  SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
  {
 -      return -ENOIOCTLCMD;
 +      int rc = -ENOTCONN;
 +
 +      sctp_lock_sock(sk);
 +
 +      /*
 +       * SEQPACKET-style sockets in LISTENING state are valid, for
 +       * SCTP, so only discard TCP-style sockets in LISTENING state.
 +       */
 +      if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
 +              goto out;
 +
 +      switch (cmd) {
 +      case SIOCINQ: {
 +              struct sk_buff *skb;
 +              unsigned int amount = 0;
 +
 +              skb = skb_peek(&sk->sk_receive_queue);
 +              if (skb != NULL) {
 +                      /*
 +                       * We will only return the amount of this packet since
 +                       * that is all that will be read.
 +                       */
 +                      amount = skb->len;
 +              }
 +              rc = put_user(amount, (int __user *)arg);
 +              break;
 +      }
 +      default:
 +              rc = -ENOIOCTLCMD;
 +              break;
 +      }
 +out:
 +      sctp_release_sock(sk);
 +      return rc;
  }
  
  /* This is the function which gets called during socket creation to
@@@ -3884,7 -3865,7 +3895,7 @@@ static int sctp_getsockopt_sctp_status(
        }
  
  out:
 -      return (retval);
 +      return retval;
  }
  
  
@@@ -3940,7 -3921,7 +3951,7 @@@ static int sctp_getsockopt_peer_addr_in
        }
  
  out:
 -      return (retval);
 +      return retval;
  }
  
  /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
@@@ -4311,8 -4292,9 +4322,8 @@@ static int sctp_getsockopt_delayed_ack(
                if (copy_from_user(&params, optval, len))
                        return -EFAULT;
        } else if (len == sizeof(struct sctp_assoc_value)) {
 -              printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value "
 -                     "in delayed_ack socket option deprecated\n");
 -              printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
 +              pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
 +              pr_warn("Use struct sctp_sack_info instead\n");
                if (copy_from_user(&params, optval, len))
                        return -EFAULT;
        } else
@@@ -4958,8 -4940,10 +4969,8 @@@ static int sctp_getsockopt_maxseg(struc
        struct sctp_association *asoc;
  
        if (len == sizeof(int)) {
 -              printk(KERN_WARNING
 -                 "SCTP: Use of int in maxseg socket option deprecated\n");
 -              printk(KERN_WARNING
 -                 "SCTP: Use struct sctp_assoc_value instead\n");
 +              pr_warn("Use of int in maxseg socket option deprecated\n");
 +              pr_warn("Use struct sctp_assoc_value instead\n");
                params.assoc_id = 0;
        } else if (len >= sizeof(struct sctp_assoc_value)) {
                len = sizeof(struct sctp_assoc_value);
@@@ -5050,8 -5034,10 +5061,8 @@@ static int sctp_getsockopt_maxburst(str
        struct sctp_association *asoc;
  
        if (len == sizeof(int)) {
 -              printk(KERN_WARNING
 -                 "SCTP: Use of int in max_burst socket option deprecated\n");
 -              printk(KERN_WARNING
 -                 "SCTP: Use struct sctp_assoc_value instead\n");
 +              pr_warn("Use of int in max_burst socket option deprecated\n");
 +              pr_warn("Use struct sctp_assoc_value instead\n");
                params.assoc_id = 0;
        } else if (len >= sizeof(struct sctp_assoc_value)) {
                len = sizeof(struct sctp_assoc_value);
@@@ -5594,7 -5580,7 +5605,7 @@@ static int sctp_get_port(struct sock *s
        /* Note: sk->sk_num gets filled in if ephemeral port request. */
        ret = sctp_get_port_local(sk, &addr);
  
 -      return (ret ? 1 : 0);
 +      return ret ? 1 : 0;
  }
  
  /*
@@@ -5611,7 -5597,8 +5622,7 @@@ SCTP_STATIC int sctp_listen_start(struc
                tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
                if (IS_ERR(tfm)) {
                        if (net_ratelimit()) {
 -                              printk(KERN_INFO
 -                                     "SCTP: failed to load transform for %s: %ld\n",
 +                              pr_info("failed to load transform for %s: %ld\n",
                                        sctp_hmac_alg, PTR_ERR(tfm));
                        }
                        return -ENOSYS;
@@@ -5740,12 -5727,13 +5751,12 @@@ unsigned int sctp_poll(struct file *fil
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
                mask |= POLLERR;
        if (sk->sk_shutdown & RCV_SHUTDOWN)
 -              mask |= POLLRDHUP;
 +              mask |= POLLRDHUP | POLLIN | POLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
                mask |= POLLHUP;
  
        /* Is it readable?  Reconsider this code with TCP-style support.  */
 -      if (!skb_queue_empty(&sk->sk_receive_queue) ||
 -          (sk->sk_shutdown & RCV_SHUTDOWN))
 +      if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
  
        /* The association is either gone or not ready.  */