]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Mon, 11 Oct 2010 19:30:34 +0000 (12:30 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 11 Oct 2010 19:30:34 +0000 (12:30 -0700)
Conflicts:
net/core/ethtool.c

1  2 
drivers/atm/iphase.c
drivers/net/r8169.c
drivers/net/wimax/i2400m/rx.c
net/core/ethtool.c

diff --combined drivers/atm/iphase.c
index 8b358d7d958fbc17aeb5b9be4b5b7983700732e0,8cb0347dec2848e4d6c33f5276b9ba986a9f3fac..9309d4724e1322effe4010042dc0a3136a432ebb
@@@ -220,7 -220,7 +220,7 @@@ static u16 get_desc (IADEV *dev, struc
    while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
       dev->ffL.tcq_rd += 2;
       if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) 
 -     dev->ffL.tcq_rd = dev->ffL.tcq_st;
 +      dev->ffL.tcq_rd = dev->ffL.tcq_st;
       if (dev->ffL.tcq_rd == dev->host_tcq_wr) 
          return 0xFFFF; 
       desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
@@@ -3156,7 -3156,6 +3156,6 @@@ static int __devinit ia_init_one(struc
  {  
        struct atm_dev *dev;  
        IADEV *iadev;  
-         unsigned long flags;
        int ret;
  
        iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
        ia_dev[iadev_count] = iadev;
        _ia_dev[iadev_count] = dev;
        iadev_count++;
-       spin_lock_init(&iadev->misc_lock);
-       /* First fixes first. I don't want to think about this now. */
-       spin_lock_irqsave(&iadev->misc_lock, flags); 
        if (ia_init(dev) || ia_start(dev)) {  
                IF_INIT(printk("IA register failed!\n");)
                iadev_count--;
                ia_dev[iadev_count] = NULL;
                _ia_dev[iadev_count] = NULL;
-               spin_unlock_irqrestore(&iadev->misc_lock, flags); 
                ret = -EINVAL;
                goto err_out_deregister_dev;
        }
-       spin_unlock_irqrestore(&iadev->misc_lock, flags); 
        IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
  
        iadev->next_board = ia_boards;  
diff --combined drivers/net/r8169.c
index fe3b7622fba05445d265e26ed343e71626e89856,992db2fa136e9c5e6f5130c053393aa662d4e5c9..bc669a40ae963a8498e7e52e6856c199cf7b9a73
@@@ -1076,12 -1076,7 +1076,12 @@@ static int rtl8169_rx_vlan_skb(struct r
        int ret;
  
        if (vlgrp && (opts2 & RxVlanTag)) {
 -              __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
 +              u16 vtag = swab16(opts2 & 0xffff);
 +
 +              if (likely(polling))
 +                      vlan_gro_receive(&tp->napi, vlgrp, vtag, skb);
 +              else
 +                      __vlan_hwaccel_rx(skb, vlgrp, vtag, polling);
                ret = 0;
        } else
                ret = -1;
@@@ -1217,7 -1212,8 +1217,8 @@@ static void rtl8169_update_counters(str
        if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
                return;
  
-       counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
+       counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters),
+                                     &paddr, GFP_KERNEL);
        if (!counters)
                return;
  
        RTL_W32(CounterAddrLow, 0);
        RTL_W32(CounterAddrHigh, 0);
  
-       pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
+       dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters,
+                         paddr);
  }
  
  static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@@ -3191,7 -3188,6 +3193,7 @@@ rtl8169_init_one(struct pci_dev *pdev, 
  #ifdef CONFIG_R8169_VLAN
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  #endif
 +      dev->features |= NETIF_F_GRO;
  
        tp->intr_mask = 0xffff;
        tp->align = cfg->align;
@@@ -3298,15 -3294,15 +3300,15 @@@ static int rtl8169_open(struct net_devi
  
        /*
         * Rx and Tx desscriptors needs 256 bytes alignment.
-        * pci_alloc_consistent provides more.
+        * dma_alloc_coherent provides more.
         */
-       tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
-                                              &tp->TxPhyAddr);
+       tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
+                                            &tp->TxPhyAddr, GFP_KERNEL);
        if (!tp->TxDescArray)
                goto err_pm_runtime_put;
  
-       tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
-                                              &tp->RxPhyAddr);
+       tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
+                                            &tp->RxPhyAddr, GFP_KERNEL);
        if (!tp->RxDescArray)
                goto err_free_tx_0;
  
  err_release_ring_2:
        rtl8169_rx_clear(tp);
  err_free_rx_1:
-       pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
-                           tp->RxPhyAddr);
+       dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+                         tp->RxPhyAddr);
        tp->RxDescArray = NULL;
  err_free_tx_0:
-       pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
-                           tp->TxPhyAddr);
+       dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+                         tp->TxPhyAddr);
        tp->TxDescArray = NULL;
  err_pm_runtime_put:
        pm_runtime_put_noidle(&pdev->dev);
@@@ -3981,7 -3977,7 +3983,7 @@@ static void rtl8169_free_rx_skb(struct 
  {
        struct pci_dev *pdev = tp->pci_dev;
  
-       pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
+       dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz,
                         PCI_DMA_FROMDEVICE);
        dev_kfree_skb(*sk_buff);
        *sk_buff = NULL;
@@@ -4006,7 -4002,7 +4008,7 @@@ static inline void rtl8169_map_to_asic(
  static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
                                            struct net_device *dev,
                                            struct RxDesc *desc, int rx_buf_sz,
-                                           unsigned int align)
+                                           unsigned int align, gfp_t gfp)
  {
        struct sk_buff *skb;
        dma_addr_t mapping;
  
        pad = align ? align : NET_IP_ALIGN;
  
-       skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
+       skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp);
        if (!skb)
                goto err_out;
  
        skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
  
-       mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+       mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz,
                                 PCI_DMA_FROMDEVICE);
  
        rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@@ -4045,7 -4041,7 +4047,7 @@@ static void rtl8169_rx_clear(struct rtl
  }
  
  static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
-                          u32 start, u32 end)
+                          u32 start, u32 end, gfp_t gfp)
  {
        u32 cur;
  
  
                skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
                                           tp->RxDescArray + i,
-                                          tp->rx_buf_sz, tp->align);
+                                          tp->rx_buf_sz, tp->align, gfp);
                if (!skb)
                        break;
  
@@@ -4088,7 -4084,7 +4090,7 @@@ static int rtl8169_init_ring(struct net
        memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
        memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
  
-       if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+       if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC)
                goto err_out;
  
        rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
@@@ -4105,7 -4101,8 +4107,8 @@@ static void rtl8169_unmap_tx_skb(struc
  {
        unsigned int len = tx_skb->len;
  
-       pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+       dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len,
+                        PCI_DMA_TODEVICE);
        desc->opts1 = 0x00;
        desc->opts2 = 0x00;
        desc->addr = 0x00;
@@@ -4249,7 -4246,8 +4252,8 @@@ static int rtl8169_xmit_frags(struct rt
                txd = tp->TxDescArray + entry;
                len = frag->size;
                addr = ((void *) page_address(frag->page)) + frag->page_offset;
-               mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE);
+               mapping = dma_map_single(&tp->pci_dev->dev, addr, len,
+                                        PCI_DMA_TODEVICE);
  
                /* anti gcc 2.95.3 bugware (sic) */
                status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
@@@ -4319,7 -4317,8 +4323,8 @@@ static netdev_tx_t rtl8169_start_xmit(s
                tp->tx_skb[entry].skb = skb;
        }
  
-       mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+       mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
+                                PCI_DMA_TODEVICE);
  
        tp->tx_skb[entry].len = len;
        txd->addr = cpu_to_le64(mapping);
@@@ -4456,8 -4455,9 +4461,8 @@@ static inline int rtl8169_fragmented_fr
        return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
  }
  
 -static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
 +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
  {
 -      u32 opts1 = le32_to_cpu(desc->opts1);
        u32 status = opts1 & RxProtoMask;
  
        if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
            ((status == RxProtoIP) && !(opts1 & IPFail)))
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        else
 -              skb->ip_summed = CHECKSUM_NONE;
 +              skb_checksum_none_assert(skb);
  }
  
  static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
        if (!skb)
                goto out;
  
-       pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
-                                   PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size,
+                               PCI_DMA_FROMDEVICE);
        skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
        *sk_buff = skb;
        done = true;
@@@ -4551,23 -4551,24 +4556,23 @@@ static int rtl8169_rx_interrupt(struct 
                                continue;
                        }
  
 -                      rtl8169_rx_csum(skb, desc);
 -
                        if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
-                               pci_dma_sync_single_for_device(pdev, addr,
+                               dma_sync_single_for_device(&pdev->dev, addr,
                                        pkt_size, PCI_DMA_FROMDEVICE);
                                rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
                        } else {
-                               pci_unmap_single(pdev, addr, tp->rx_buf_sz,
+                               dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz,
                                                 PCI_DMA_FROMDEVICE);
                                tp->Rx_skbuff[entry] = NULL;
                        }
  
 +                      rtl8169_rx_csum(skb, status);
                        skb_put(skb, pkt_size);
                        skb->protocol = eth_type_trans(skb, dev);
  
                        if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
                                if (likely(polling))
 -                                      netif_receive_skb(skb);
 +                                      napi_gro_receive(&tp->napi, skb);
                                else
                                        netif_rx(skb);
                        }
        count = cur_rx - tp->cur_rx;
        tp->cur_rx = cur_rx;
  
-       delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+       delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC);
        if (!delta && count)
                netif_info(tp, intr, dev, "no Rx buffer allocated\n");
        tp->dirty_rx += delta;
@@@ -4773,10 -4774,10 +4778,10 @@@ static int rtl8169_close(struct net_dev
  
        free_irq(dev->irq, dev);
  
-       pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
-                           tp->RxPhyAddr);
-       pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
-                           tp->TxPhyAddr);
+       dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
+                         tp->RxPhyAddr);
+       dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
+                         tp->TxPhyAddr);
        tp->TxDescArray = NULL;
        tp->RxDescArray = NULL;
  
index c4876d029201c979dfad9f59d18a2d7be97383c5,1737d1488b35704f3196975a76bcc919e943808b..844133b44af04eb8b36436c8c7147e8c995685a4
@@@ -922,7 -922,7 +922,7 @@@ void i2400m_roq_queue_update_ws(struct 
   * rx_roq_refcount becomes zero. This routine gets executed when
   * rx_roq_refcount becomes zero.
   */
 -void i2400m_rx_roq_destroy(struct kref *ref)
 +static void i2400m_rx_roq_destroy(struct kref *ref)
  {
        unsigned itr;
        struct i2400m *i2400m
@@@ -1244,16 -1244,16 +1244,16 @@@ int i2400m_rx(struct i2400m *i2400m, st
        int i, result;
        struct device *dev = i2400m_dev(i2400m);
        const struct i2400m_msg_hdr *msg_hdr;
-       size_t pl_itr, pl_size, skb_len;
+       size_t pl_itr, pl_size;
        unsigned long flags;
-       unsigned num_pls, single_last;
+       unsigned num_pls, single_last, skb_len;
  
        skb_len = skb->len;
-       d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
+       d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n",
                  i2400m, skb, skb_len);
        result = -EIO;
        msg_hdr = (void *) skb->data;
-       result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
+       result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len);
        if (result < 0)
                goto error_msg_hdr_check;
        result = -EIO;
        pl_itr = sizeof(*msg_hdr) +     /* Check payload descriptor(s) */
                num_pls * sizeof(msg_hdr->pld[0]);
        pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN);
-       if (pl_itr > skb->len) {        /* got all the payload descriptors? */
+       if (pl_itr > skb_len) { /* got all the payload descriptors? */
                dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
                        "%u payload descriptors (%zu each, total %zu)\n",
-                       skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
+                       skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
                goto error_pl_descr_short;
        }
        /* Walk each payload payload--check we really got it */
                /* work around old gcc warnings */
                pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
                result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
-                                                 pl_itr, skb->len);
+                                                 pl_itr, skb_len);
                if (result < 0)
                        goto error_pl_descr_check;
                single_last = num_pls == 1 || i == num_pls - 1;
        if (i < i2400m->rx_pl_min)
                i2400m->rx_pl_min = i;
        i2400m->rx_num++;
-       i2400m->rx_size_acc += skb->len;
-       if (skb->len < i2400m->rx_size_min)
-               i2400m->rx_size_min = skb->len;
-       if (skb->len > i2400m->rx_size_max)
-               i2400m->rx_size_max = skb->len;
+       i2400m->rx_size_acc += skb_len;
+       if (skb_len < i2400m->rx_size_min)
+               i2400m->rx_size_min = skb_len;
+       if (skb_len > i2400m->rx_size_max)
+               i2400m->rx_size_max = skb_len;
        spin_unlock_irqrestore(&i2400m->rx_lock, flags);
  error_pl_descr_check:
  error_pl_descr_short:
  error_msg_hdr_check:
-       d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
+       d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n",
                i2400m, skb, skb_len, result);
        return result;
  }
diff --combined net/core/ethtool.c
index 7d7e572cedc7edb36b54f28de1c327629746d5b4,8451ab481095fc523c47fa01ccb11392b15dbc30..685c7005e87fe928df83c780f1c7aa8552f5117e
@@@ -19,7 -19,6 +19,7 @@@
  #include <linux/netdevice.h>
  #include <linux/bitops.h>
  #include <linux/uaccess.h>
 +#include <linux/vmalloc.h>
  #include <linux/slab.h>
  
  /*
@@@ -206,24 -205,18 +206,24 @@@ static noinline_for_stack int ethtool_g
        struct ethtool_drvinfo info;
        const struct ethtool_ops *ops = dev->ethtool_ops;
  
 -      if (!ops->get_drvinfo)
 -              return -EOPNOTSUPP;
 -
        memset(&info, 0, sizeof(info));
        info.cmd = ETHTOOL_GDRVINFO;
 -      ops->get_drvinfo(dev, &info);
 +      if (ops && ops->get_drvinfo) {
 +              ops->get_drvinfo(dev, &info);
 +      } else if (dev->dev.parent && dev->dev.parent->driver) {
 +              strlcpy(info.bus_info, dev_name(dev->dev.parent),
 +                      sizeof(info.bus_info));
 +              strlcpy(info.driver, dev->dev.parent->driver->name,
 +                      sizeof(info.driver));
 +      } else {
 +              return -EOPNOTSUPP;
 +      }
  
        /*
         * this method of obtaining string set info is deprecated;
         * Use ETHTOOL_GSSET_INFO instead.
         */
 -      if (ops->get_sset_count) {
 +      if (ops && ops->get_sset_count) {
                int rc;
  
                rc = ops->get_sset_count(dev, ETH_SS_TEST);
                if (rc >= 0)
                        info.n_priv_flags = rc;
        }
 -      if (ops->get_regs_len)
 +      if (ops && ops->get_regs_len)
                info.regdump_len = ops->get_regs_len(dev);
 -      if (ops->get_eeprom_len)
 +      if (ops && ops->get_eeprom_len)
                info.eedump_len = ops->get_eeprom_len(dev);
  
        if (copy_to_user(useraddr, &info, sizeof(info)))
@@@ -355,7 -348,7 +355,7 @@@ static noinline_for_stack int ethtool_g
        if (info.cmd == ETHTOOL_GRXCLSRLALL) {
                if (info.rule_cnt > 0) {
                        if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
-                               rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
+                               rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
                                                   GFP_USER);
                        if (!rule_buf)
                                return -ENOMEM;
@@@ -404,7 -397,7 +404,7 @@@ static noinline_for_stack int ethtool_g
            (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
                return -ENOMEM;
        full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
-       indir = kmalloc(full_size, GFP_USER);
+       indir = kzalloc(full_size, GFP_USER);
        if (!indir)
                return -ENOMEM;
  
@@@ -486,38 -479,6 +486,38 @@@ static void __rx_ntuple_filter_add(stru
        list->count++;
  }
  
 +/*
 + * ethtool does not (or did not) set masks for flow parameters that are
 + * not specified, so if both value and mask are 0 then this must be
 + * treated as equivalent to a mask with all bits set.  Implement that
 + * here rather than in drivers.
 + */
 +static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs)
 +{
 +      struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec;
 +      struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec;
 +
 +      if (fs->flow_type != TCP_V4_FLOW &&
 +          fs->flow_type != UDP_V4_FLOW &&
 +          fs->flow_type != SCTP_V4_FLOW)
 +              return;
 +
 +      if (!(entry->ip4src | mask->ip4src))
 +              mask->ip4src = htonl(0xffffffff);
 +      if (!(entry->ip4dst | mask->ip4dst))
 +              mask->ip4dst = htonl(0xffffffff);
 +      if (!(entry->psrc | mask->psrc))
 +              mask->psrc = htons(0xffff);
 +      if (!(entry->pdst | mask->pdst))
 +              mask->pdst = htons(0xffff);
 +      if (!(entry->tos | mask->tos))
 +              mask->tos = 0xff;
 +      if (!(fs->vlan_tag | fs->vlan_tag_mask))
 +              fs->vlan_tag_mask = 0xffff;
 +      if (!(fs->data | fs->data_mask))
 +              fs->data_mask = 0xffffffffffffffffULL;
 +}
 +
  static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
                                                    void __user *useraddr)
  {
        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
                return -EFAULT;
  
 +      rx_ntuple_fix_masks(&cmd.fs);
 +
        /*
         * Cache filter in dev struct for GET operation only if
         * the underlying driver doesn't have its own GET operation, and
@@@ -579,7 -538,7 +579,7 @@@ static int ethtool_get_rx_ntuple(struc
  
        gstrings.len = ret;
  
-       data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+       data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
        if (!data)
                return -ENOMEM;
  
                        break;
                case IP_USER_FLOW:
                        sprintf(p, "\tSrc IP addr: 0x%x\n",
 -                              fsc->fs.h_u.raw_ip4_spec.ip4src);
 +                              fsc->fs.h_u.usr_ip4_spec.ip4src);
                        p += ETH_GSTRING_LEN;
                        num_strings++;
                        sprintf(p, "\tSrc IP mask: 0x%x\n",
 -                              fsc->fs.m_u.raw_ip4_spec.ip4src);
 +                              fsc->fs.m_u.usr_ip4_spec.ip4src);
                        p += ETH_GSTRING_LEN;
                        num_strings++;
                        sprintf(p, "\tDest IP addr: 0x%x\n",
 -                              fsc->fs.h_u.raw_ip4_spec.ip4dst);
 +                              fsc->fs.h_u.usr_ip4_spec.ip4dst);
                        p += ETH_GSTRING_LEN;
                        num_strings++;
                        sprintf(p, "\tDest IP mask: 0x%x\n",
 -                              fsc->fs.m_u.raw_ip4_spec.ip4dst);
 +                              fsc->fs.m_u.usr_ip4_spec.ip4dst);
                        p += ETH_GSTRING_LEN;
                        num_strings++;
                        break;
@@@ -816,7 -775,7 +816,7 @@@ static int ethtool_get_regs(struct net_
        if (regs.len > reglen)
                regs.len = reglen;
  
 -      regbuf = kzalloc(reglen, GFP_USER);
 +      regbuf = vmalloc(reglen);
        if (!regbuf)
                return -ENOMEM;
  
        ret = 0;
  
   out:
 -      kfree(regbuf);
 +      vfree(regbuf);
        return ret;
  }
  
@@@ -1216,11 -1175,8 +1216,11 @@@ static int ethtool_set_gro(struct net_d
                return -EFAULT;
  
        if (edata.data) {
 -              if (!dev->ethtool_ops->get_rx_csum ||
 -                  !dev->ethtool_ops->get_rx_csum(dev))
 +              u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
 +                              dev->ethtool_ops->get_rx_csum(dev) :
 +                              ethtool_op_get_rx_csum(dev);
 +
 +              if (!rxcsum)
                        return -EINVAL;
                dev->features |= NETIF_F_GRO;
        } else
@@@ -1446,22 -1402,14 +1446,22 @@@ int dev_ethtool(struct net *net, struc
        if (!dev || !netif_device_present(dev))
                return -ENODEV;
  
 -      if (!dev->ethtool_ops)
 -              return -EOPNOTSUPP;
 -
        if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
                return -EFAULT;
  
 +      if (!dev->ethtool_ops) {
 +              /* ETHTOOL_GDRVINFO does not require any driver support.
 +               * It is also unprivileged and does not change anything,
 +               * so we can take a shortcut to it. */
 +              if (ethcmd == ETHTOOL_GDRVINFO)
 +                      return ethtool_get_drvinfo(dev, useraddr);
 +              else
 +                      return -EOPNOTSUPP;
 +      }
 +
        /* Allow some commands to be done by anyone */
        switch (ethcmd) {
 +      case ETHTOOL_GSET:
        case ETHTOOL_GDRVINFO:
        case ETHTOOL_GMSGLVL:
        case ETHTOOL_GCOALESCE: