From: David S. Miller Date: Thu, 4 Nov 2010 01:52:32 +0000 (-0700) Subject: Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6 X-Git-Url: https://bbs.cooldavid.org/git/?a=commitdiff_plain;h=758cb41106e87d7e26ef3ee78f04360168460b9d;hp=b5f15ac4f89f84853544c934fc7a744289e95e34;p=net-next-2.6.git Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-2.6 --- diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c index 40b914bded8..2e72227bd07 100644 --- a/drivers/isdn/hisax/isar.c +++ b/drivers/isdn/hisax/isar.c @@ -1427,8 +1427,8 @@ modeisar(struct BCState *bcs, int mode, int bc) &bcs->hw.isar.reg->Flags)) bcs->hw.isar.dpath = 1; else { - printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n"); - debugl1(cs, "isar modeisar analog funktions only with DP1"); + printk(KERN_WARNING"isar modeisar analog functions only with DP1\n"); + debugl1(cs, "isar modeisar analog functions only with DP1"); return(1); } break; diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 3232206406b..7446d8b4282 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -392,6 +392,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) if (dev) { struct mISDN_devinfo di; + memset(&di, 0, sizeof(di)); di.id = dev->id; di.Dprotocols = dev->Dprotocols; di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); @@ -672,6 +673,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) if (dev) { struct mISDN_devinfo di; + memset(&di, 0, sizeof(di)); di.id = dev->id; di.Dprotocols = dev->Dprotocols; di.Bprotocols = dev->Bprotocols | get_all_Bprotocols(); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 77c1fab7d77..f24179d98db 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2520,6 +2520,7 @@ source "drivers/net/stmmac/Kconfig" config PCH_GBE tristate "PCH Gigabit Ethernet" depends on PCI + select MII ---help--- This is a gigabit ethernet driver for Topcliff PCH. Topcliff PCH is the platform controller hub that is used in Intel's diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index 3134e532623..8cb27cb7bca 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c @@ -407,7 +407,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag, int writeflag) { int ret; - long flags; + unsigned long flags; long *vbr, save_berr; local_irq_save(flags); diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 43579b3b24a..53363108994 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -3043,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, atl1_pcie_patch(adapter); /* assume we have no link for now */ netif_carrier_off(netdev); - netif_stop_queue(netdev); setup_timer(&adapter->phy_config_timer, atl1_phy_config, (unsigned long)adapter); diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 9eea225deca..863e73a85fb 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -20,8 +20,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.60.00-3" -#define DRV_MODULE_RELDATE "2010/10/19" +#define DRV_MODULE_VERSION "1.60.00-4" +#define DRV_MODULE_RELDATE "2010/11/01" #define BNX2X_BC_VER 0x040200 #define BNX2X_MULTI_QUEUE diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index 18c8e23a0e8..4cfd4e9b558 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h @@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ u16 xgxs_config_tx[4]; /* 0x1A0 */ - u32 Reserved1[57]; /* 0x1A8 */ + u32 Reserved1[56]; /* 0x1A8 */ + u32 default_cfg; /* 0x288 */ + /* Enable BAM on KR */ +#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 +#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 +#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 +#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 + u32 speed_capability_mask2; /* 0x28C */ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 2326774df84..58091961925 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -610,7 +610,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, /* reset and unreset the BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - udelay(10); + msleep(1); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); @@ -3525,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); /* Enable CL37 BAM */ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8073_BAM, &val); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8073_BAM, val | 1); + if (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, &val); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, val | 1); + DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); + } if (params->loopback_mode == LOOPBACK_EXT) { bnx2x_807x_force_10G(bp, phy); DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); @@ -5302,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 autoneg_val, an_1000_val, an_10_100_val; - bnx2x_wait_reset_complete(bp, phy); + bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 1 << NIG_LATCH_BC_ENABLE_MI_INT); @@ -5431,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy, /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_wait_reset_complete(bp, phy); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); return bnx2x_848xx_cmn_config_init(phy, params, vars); @@ -5441,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u8 port = params->port, initialize = 1; + u8 port, initialize = 1; u16 val; u16 temp; u32 actual_phy_selection; @@ -5450,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ msleep(1); + if (CHIP_IS_E2(bp)) + port = BP_PATH(bp); + else + port = params->port; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); - msleep(200); /* 100 is not enough */ - + bnx2x_wait_reset_complete(bp, phy); + /* Wait for GPHY to come out of reset */ + msleep(50); /* BCM84823 requires that XGXS links up first @ 10G for normal behavior */ temp = vars->line_speed; @@ -5625,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; - u8 port = params->port; + u8 port; + if (CHIP_IS_E2(bp)) + port = BP_PATH(bp); + else + port = params->port; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); @@ -6928,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, u8 reset_ext_phy) { struct bnx2x *bp = params->bp; - u8 phy_index, port = params->port; + u8 phy_index, port = params->port, clear_latch_ind = 0; DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); /* disable attentions */ vars->link_status = 0; @@ -6966,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, params->phy[phy_index].link_reset( ¶ms->phy[phy_index], params); + if (params->phy[phy_index].flags & + FLAGS_REARM_LATCH_SIGNAL) + clear_latch_ind = 1; } } + if (clear_latch_ind) { + /* Clear latching indication */ + bnx2x_rearm_latch_signal(bp, port, 0); + bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + } if (params->phy[INT_PHY].link_reset) params->phy[INT_PHY].link_reset( ¶ms->phy[INT_PHY], params); @@ -6999,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, s8 port; s8 port_of_path = 0; + bnx2x_ext_phy_hw_reset(bp, 0); /* PART1 - Reset both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { u32 shmem_base, shmem2_base; @@ -7021,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, return -EINVAL; } /* disable attentions */ - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path*4, (NIG_MASK_XGXS0_LINK_STATUS | NIG_MASK_XGXS0_LINK10G | NIG_MASK_SERDES0_LINK_STATUS | @@ -7132,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); - bnx2x_ext_phy_hw_reset(bp, 1); + bnx2x_ext_phy_hw_reset(bp, 0); msleep(5); for (port = 0; port < PORT_MAX; port++) { u32 shmem_base, shmem2_base; diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 8427533fe31..8b4cea57a6c 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -33,6 +33,9 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Daniel Martensson"); MODULE_DESCRIPTION("CAIF SPI driver"); +/* Returns the number of padding bytes for alignment. */ +#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1))))) + static int spi_loop; module_param(spi_loop, bool, S_IRUGO); MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); @@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); module_param(spi_frm_align, int, S_IRUGO); MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); -/* SPI padding options. */ +/* + * SPI padding options. + * Warning: must be a base of 2 (& operation used) and can not be zero ! + */ module_param(spi_up_head_align, int, S_IRUGO); MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); @@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, static const struct file_operations dbgfs_state_fops = { .open = dbgfs_open, .read = dbgfs_state, - .owner = THIS_MODULE, - .llseek = default_llseek, + .owner = THIS_MODULE }; static const struct file_operations dbgfs_frame_fops = { .open = dbgfs_open, .read = dbgfs_frame, - .owner = THIS_MODULE, - .llseek = default_llseek, + .owner = THIS_MODULE }; static inline void dev_debugfs_add(struct cfspi *cfspi) @@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len) u8 *dst = buf; caif_assert(buf); + if (cfspi->slave && !cfspi->slave_talked) + cfspi->slave_talked = true; + do { struct sk_buff *skb; struct caif_payload_info *info; @@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len) * Compute head offset i.e. number of bytes to add to * get the start of the payload aligned. */ - if (spi_up_head_align) { - spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); + if (spi_up_head_align > 1) { + spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align); *dst = (u8)(spad - 1); dst += spad; } @@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len) * Compute tail offset i.e. number of bytes to add to * get the complete CAIF frame aligned. */ - epad = (skb->len + spad) & spi_up_tail_align; + epad = PAD_POW2((skb->len + spad), spi_up_tail_align); dst += epad; dev_kfree_skb(skb); @@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi) * Compute head offset i.e. number of bytes to add to * get the start of the payload aligned. */ - if (spi_up_head_align) - spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); + if (spi_up_head_align > 1) + spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align); /* * Compute tail offset i.e. number of bytes to add to * get the complete CAIF frame aligned. */ - epad = (skb->len + spad) & spi_up_tail_align; + epad = PAD_POW2((skb->len + spad), spi_up_tail_align); if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { skb_queue_tail(&cfspi->chead, skb); @@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi) } else { /* Put back packet. */ skb_queue_head(&cfspi->qhead, skb); + break; } } while (pkts <= CAIF_MAX_SPI_PKTS); @@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc) { struct cfspi *cfspi = (struct cfspi *)ifc->priv; + /* + * The slave device is the master on the link. Interrupts before the + * slave has transmitted are considered spurious. + */ + if (cfspi->slave && !cfspi->slave_talked) { + printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n"); + return; + } + if (!in_interrupt()) spin_lock(&cfspi->lock); if (assert) { @@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc) spin_unlock(&cfspi->lock); /* Wake up the xfer thread. */ - wake_up_interruptible(&cfspi->wait); + if (assert) + wake_up_interruptible(&cfspi->wait); } static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) @@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len) * Compute head offset i.e. number of bytes added to * get the start of the payload aligned. */ - if (spi_down_head_align) { + if (spi_down_head_align > 1) { spad = 1 + *src; src += spad; } @@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len) * Compute tail offset i.e. number of bytes added to * get the complete CAIF frame aligned. */ - epad = (pkt_len + spad) & spi_down_tail_align; + epad = PAD_POW2((pkt_len + spad), spi_down_tail_align); src += epad; } while ((src - buf) < len); @@ -625,11 +643,20 @@ int cfspi_spi_probe(struct platform_device *pdev) cfspi->ndev = ndev; cfspi->pdev = pdev; - /* Set flow info */ + /* Set flow info. */ cfspi->flow_off_sent = 0; cfspi->qd_low_mark = LOW_WATER_MARK; cfspi->qd_high_mark = HIGH_WATER_MARK; + /* Set slave info. */ + if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) { + cfspi->slave = true; + cfspi->slave_talked = false; + } else { + cfspi->slave = false; + cfspi->slave_talked = false; + } + /* Assign the SPI device. */ cfspi->dev = dev; /* Assign the device ifc to this SPI interface. */ diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c index 2111dbfea6f..1b9943a4eda 100644 --- a/drivers/net/caif/caif_spi_slave.c +++ b/drivers/net/caif/caif_spi_slave.c @@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi) #endif int spi_frm_align = 2; -int spi_up_head_align = 1; -int spi_up_tail_align; -int spi_down_head_align = 3; -int spi_down_tail_align = 1; + +/* + * SPI padding options. + * Warning: must be a base of 2 (& operation used) and can not be zero ! + */ +int spi_up_head_align = 1 << 1; +int spi_up_tail_align = 1 << 0; +int spi_down_head_align = 1 << 2; +int spi_down_tail_align = 1 << 1; #ifdef CONFIG_DEBUG_FS static inline void debugfs_store_prev(struct cfspi *cfspi) diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index cee98fa668b..7ef83d06f7e 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1,7 +1,7 @@ /* * at91_can.c - CAN network driver for AT91 SoC CAN controller * - * (C) 2007 by Hans J. Koch + * (C) 2007 by Hans J. Koch * (C) 2008, 2009, 2010 by Marc Kleine-Budde * * This software may be distributed under the terms of the GNU General diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 55ec324caaf..672718261c6 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -213,12 +213,12 @@ static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = { }; MODULE_DEVICE_TABLE(pci, pch_pci_tbl); -static inline void pch_can_bit_set(u32 *addr, u32 mask) +static inline void pch_can_bit_set(void __iomem *addr, u32 mask) { iowrite32(ioread32(addr) | mask, addr); } -static inline void pch_can_bit_clear(u32 *addr, u32 mask) +static inline void pch_can_bit_clear(void __iomem *addr, u32 mask) { iowrite32(ioread32(addr) & ~mask, addr); } @@ -1437,7 +1437,7 @@ probe_exit_endev: return rc; } -static struct pci_driver pch_can_pcidev = { +static struct pci_driver pch_can_pci_driver = { .name = "pch_can", .id_table = pch_pci_tbl, .probe = pch_can_probe, @@ -1448,13 +1448,13 @@ static struct pci_driver pch_can_pcidev = { static int __init pch_can_pci_init(void) { - return pci_register_driver(&pch_can_pcidev); + return pci_register_driver(&pch_can_pci_driver); } module_init(pch_can_pci_init); static void __exit pch_can_pci_exit(void) { - pci_unregister_driver(&pch_can_pcidev); + pci_unregister_driver(&pch_can_pci_driver); } module_exit(pch_can_pci_exit); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 4e3c12371aa..046d846c652 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -3301,7 +3301,6 @@ static int __devinit init_one(struct pci_dev *pdev, pi->rx_offload = T3_RX_CSUM | T3_LRO; pi->port_id = i; netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); netdev->irq = pdev->irq; netdev->mem_start = mmio_start; netdev->mem_end = mmio_start + mmio_len - 1; diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 5d72bda5438..f9f6645b2e6 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -296,8 +296,10 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, if (d->skb) { /* an SGL is present */ if (need_unmap) unmap_skb(d->skb, q, cidx, pdev); - if (d->eop) + if (d->eop) { kfree_skb(d->skb); + d->skb = NULL; + } } ++d; if (++cidx == q->size) { diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index f17703f410b..f50bc98310f 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -3736,7 +3736,6 @@ static int __devinit init_one(struct pci_dev *pdev, __set_bit(i, &adapter->registered_device_map); adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; - netif_tx_stop_all_queues(adapter->port[i]); } } if (!adapter->registered_device_map) { diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 555ecc5a2e9..6de5e2e448a 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -2600,7 +2600,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, pi->xact_addr_filt = -1; pi->rx_offload = RX_CSO; netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); netdev->irq = pdev->irq; netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index ca663f19d7d..7236f1a53ba 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -52,6 +52,10 @@ (ID_LED_DEF1_DEF2)) #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF #define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */ @@ -1242,6 +1246,39 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw) return 0; } +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + bool phy_hung = false; + s32 ret_val = 0; + + /* + * Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); + + if (ret_val) + goto out; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); + if (ret_val) + goto out; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + phy_hung = true; + } +out: + return phy_hung; +} + /** * e1000_setup_link_82571 - Setup flow control and link settings * @hw: pointer to the HW structure @@ -1859,6 +1896,7 @@ struct e1000_info e1000_82574_info = { | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_CHECK_PHY_HANG, .pba = 36, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index cee882dd67b..fdc67fead4e 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -397,6 +397,7 @@ struct e1000_adapter { struct work_struct print_hang_task; bool idle_check; + int phy_hang_count; }; struct e1000_info { @@ -454,6 +455,7 @@ struct e1000_info { #define FLAG2_HAS_EEE (1 << 5) #define FLAG2_DMA_BURST (1 << 6) #define FLAG2_DISABLE_AIM (1 << 8) +#define FLAG2_CHECK_PHY_HANG (1 << 9) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) @@ -631,6 +633,7 @@ extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw); extern s32 e1000_check_polarity_ife(struct e1000_hw *hw); extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); extern s32 e1000_check_polarity_igp(struct e1000_hw *hw); +extern bool e1000_check_phy_82574(struct e1000_hw *hw); static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) { diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index ec8cf3f5142..c4ca1629f53 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -4098,6 +4098,25 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter) } } +static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* + * With 82574 controllers, PHY needs to be checked periodically + * for hung state and reset, if two calls return true + */ + if (e1000_check_phy_82574(hw)) + adapter->phy_hang_count++; + else + adapter->phy_hang_count = 0; + + if (adapter->phy_hang_count > 1) { + adapter->phy_hang_count = 0; + schedule_work(&adapter->reset_task); + } +} + /** * e1000_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long @@ -4333,6 +4352,9 @@ link_up: if (e1000e_get_laa_state_82571(hw)) e1000e_rar_set(hw, adapter->hw.mac.addr, 0); + if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) + e1000e_check_82574_phy_workaround(adapter); + /* Reset the timer */ if (!test_bit(__E1000_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, @@ -4860,8 +4882,11 @@ static void e1000_reset_task(struct work_struct *work) struct e1000_adapter *adapter; adapter = container_of(work, struct e1000_adapter, reset_task); - e1000e_dump(adapter); - e_err("Reset adapter\n"); + if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RX_RESTART_NOW))) { + e1000e_dump(adapter); + e_err("Reset adapter\n"); + } e1000e_reinit_locked(adapter); } diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 385dc3204cb..06bb9b79945 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev, SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); netif_carrier_off(ndev); - netif_stop_queue(ndev); err = register_netdev(ndev); if (err) { diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index ebfaa68ee63..28af019c97b 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -2783,15 +2783,15 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, /* reset the hardware with the new settings */ igbvf_reset(adapter); - /* tell the stack to leave us alone until igbvf_open() is called */ - netif_carrier_off(netdev); - netif_stop_queue(netdev); - strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) goto err_hw_init; + /* tell the stack to leave us alone until igbvf_open() is called */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + igbvf_print_device_info(adapter); igbvf_initialize_last_counter_stats(adapter); diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 666207a9c03..caa8192fff2 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -533,6 +533,7 @@ ixgb_remove(struct pci_dev *pdev) pci_release_regions(pdev); free_netdev(netdev); + pci_disable_device(pdev); } /** diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index 8bb9ddb6dff..0d44c6470ca 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c @@ -43,9 +43,12 @@ * ixgbe_dcb_check_config(). */ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, - u8 direction) + int max_frame, u8 direction) { struct tc_bw_alloc *p; + int min_credit; + int min_multiplier; + int min_percent = 100; s32 ret_val = 0; /* Initialization values default for Tx settings */ u32 credit_refill = 0; @@ -59,6 +62,31 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, goto out; } + min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / + DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; + + link_percentage = (link_percentage * bw_percent) / 100; + + if (link_percentage && link_percentage < min_percent) + min_percent = link_percentage; + } + + /* + * The ratio between traffic classes will control the bandwidth + * percentages seen on the wire. To calculate this ratio we use + * a multiplier. It is required that the refill credits must be + * larger than the max frame size so here we find the smallest + * multiplier that will allow all bandwidth percentages to be + * greater than the max frame size. + */ + min_multiplier = (min_credit / min_percent) + 1; + /* Find out the link percentage for each TC first */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { p = &dcb_config->tc_config[i].path[direction]; @@ -73,8 +101,9 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, /* Save link_percentage for reference */ p->link_percent = (u8)link_percentage; - /* Calculate credit refill and save it */ - credit_refill = link_percentage * MINIMUM_CREDIT_REFILL; + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, + MAX_CREDIT_REFILL); p->data_credits_refill = (u16)credit_refill; /* Calculate maximum credit for the TC */ @@ -85,8 +114,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, * of a TC is too small, the maximum credit may not be * enough to send out a jumbo frame in data plane arbitration. */ - if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO)) - credit_max = MINIMUM_CREDIT_FOR_JUMBO; + if (credit_max && (credit_max < min_credit)) + credit_max = min_credit; if (direction == DCB_TX_CONFIG) { /* diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h index eb1059f09da..0208a87b129 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ixgbe/ixgbe_dcb.h @@ -150,15 +150,14 @@ struct ixgbe_dcb_config { /* DCB driver APIs */ /* DCB credits calculation */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8); +s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8); /* DCB hw initialization */ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); /* DCB definitions for credit calculation */ +#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ #define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ -#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */ -#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */ #define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ #define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ #define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 67c219f86c3..05f22471507 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c @@ -397,6 +397,11 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw) reg &= ~IXGBE_RTTDCS_ARBDIS; IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + /* Enable Security TX Buffer IFG for DCB */ + reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + reg |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); + return 0; } diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h index 18d7fbf6c29..3841649fb95 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h @@ -95,6 +95,9 @@ #define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ +/* SECTXMINIFG DCB */ +#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ + /* DCB hardware-specific driver APIs */ diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index f85631263af..2bd3eb4ee5a 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3347,6 +3347,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; u32 txdctl; int i, j; @@ -3359,8 +3360,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82598EB) netif_set_gso_max_size(adapter->netdev, 32768); - ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG); +#ifdef CONFIG_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + + ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, + DCB_RX_CONFIG); /* reconfigure the hardware */ ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg); diff --git a/drivers/net/jme.c b/drivers/net/jme.c index d85edf3119c..c57d9a43cec 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -2955,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev, * Tell stack that we are not ready to work until open() */ netif_carrier_off(netdev); - netif_stop_queue(netdev); - /* - * Register netdev - */ rc = register_netdev(netdev); if (rc) { pr_err("Cannot register net device\n"); diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index 316bb70775b..e7030ceb178 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c @@ -1077,7 +1077,6 @@ static void __NS8390_init(struct net_device *dev, int startp) ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); - netif_start_queue(dev); ei_local->tx1 = ei_local->tx2 = 0; ei_local->txing = 0; diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 12612127a08..f7d06cbc70a 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c @@ -254,19 +254,6 @@ out_free_rq: return err; } -static void -nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter) -{ - - netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION, - adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0, - NX_CDRP_CMD_DESTROY_RX_CTX); - - netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION, - adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0, - NX_CDRP_CMD_DESTROY_TX_CTX); -} - static void nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) { @@ -698,8 +685,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) goto done; - if (reset_devices) - nx_fw_cmd_reset_ctx(adapter); err = nx_fw_cmd_create_rx_ctx(adapter); if (err) goto err_out_free; diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 50820beac3a..e1d30d7f207 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -41,9 +41,6 @@ MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); -MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME); -MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME); -MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME); MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); char netxen_nic_driver_name[] = "netxen_nic"; @@ -1240,7 +1237,6 @@ netxen_setup_netdev(struct netxen_adapter *adapter, dev_warn(&pdev->dev, "failed to read mac addr\n"); netif_carrier_off(netdev); - netif_stop_queue(netdev); err = register_netdev(netdev); if (err) { @@ -1356,6 +1352,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } + if (reset_devices) { + if (adapter->portnum == 0) { + NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); + adapter->need_fw_reset = 1; + } + } + err = netxen_start_firmware(adapter); if (err) goto err_out_decr_ref; diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index 03096c80103..d05c44692f0 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9), PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), + PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e2afdce0a43..f0bd1a1aba3 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -74,8 +74,8 @@ #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) #define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) -#define MII_88EC048_PHY_MSCR1_REG 16 -#define MII_88EC048_PHY_MSCR1_PAD_ODD BIT(6) +#define MII_88E1318S_PHY_MSCR1_REG 16 +#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) #define MII_88E1121_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_PAGE 3 @@ -240,7 +240,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) return err; } -static int m88ec048_config_aneg(struct phy_device *phydev) +static int m88e1318_config_aneg(struct phy_device *phydev) { int err, oldpage, mscr; @@ -251,10 +251,10 @@ static int m88ec048_config_aneg(struct phy_device *phydev) if (err < 0) return err; - mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG); - mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD; + mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG); + mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD; - err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); + err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr); if (err < 0) return err; @@ -659,12 +659,12 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { - .phy_id = MARVELL_PHY_ID_88EC048, + .phy_id = MARVELL_PHY_ID_88E1318S, .phy_id_mask = MARVELL_PHY_ID_MASK, - .name = "Marvell 88EC048", + .name = "Marvell 88E1318S", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_aneg = &m88ec048_config_aneg, + .config_aneg = &m88e1318_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 7a298cdf9ab..a3dcd04be22 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -1450,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, netdev->irq = adapter->msix_entries[0].vector; netif_carrier_off(netdev); - netif_stop_queue(netdev); err = register_netdev(netdev); if (err) { diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h index 016360c65ce..8a795856984 100644 --- a/drivers/net/smsc911x.h +++ b/drivers/net/smsc911x.h @@ -22,7 +22,7 @@ #define __SMSC911X_H__ #define TX_FIFO_LOW_THRESHOLD ((u32)1600) -#define SMSC911X_EEPROM_SIZE ((u32)7) +#define SMSC911X_EEPROM_SIZE ((u32)128) #define USE_DEBUG 0 /* This is the maximum number of packets to be received every diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 823b9e6431d..06bc6034ce8 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -337,33 +337,19 @@ static int stmmac_init_phy(struct net_device *dev) return 0; } -static inline void stmmac_mac_enable_rx(void __iomem *ioaddr) +static inline void stmmac_enable_mac(void __iomem *ioaddr) { u32 value = readl(ioaddr + MAC_CTRL_REG); - value |= MAC_RNABLE_RX; - /* Set the RE (receive enable bit into the MAC CTRL register). */ - writel(value, ioaddr + MAC_CTRL_REG); -} -static inline void stmmac_mac_enable_tx(void __iomem *ioaddr) -{ - u32 value = readl(ioaddr + MAC_CTRL_REG); - value |= MAC_ENABLE_TX; - /* Set the TE (transmit enable bit into the MAC CTRL register). */ + value |= MAC_RNABLE_RX | MAC_ENABLE_TX; writel(value, ioaddr + MAC_CTRL_REG); } -static inline void stmmac_mac_disable_rx(void __iomem *ioaddr) +static inline void stmmac_disable_mac(void __iomem *ioaddr) { u32 value = readl(ioaddr + MAC_CTRL_REG); - value &= ~MAC_RNABLE_RX; - writel(value, ioaddr + MAC_CTRL_REG); -} -static inline void stmmac_mac_disable_tx(void __iomem *ioaddr) -{ - u32 value = readl(ioaddr + MAC_CTRL_REG); - value &= ~MAC_ENABLE_TX; + value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX); writel(value, ioaddr + MAC_CTRL_REG); } @@ -857,8 +843,7 @@ static int stmmac_open(struct net_device *dev) writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); /* Enable the MAC Rx/Tx */ - stmmac_mac_enable_rx(priv->ioaddr); - stmmac_mac_enable_tx(priv->ioaddr); + stmmac_enable_mac(priv->ioaddr); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -928,9 +913,8 @@ static int stmmac_release(struct net_device *dev) /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv); - /* Disable the MAC core */ - stmmac_mac_disable_tx(priv->ioaddr); - stmmac_mac_disable_rx(priv->ioaddr); + /* Disable the MAC Rx/Tx */ + stmmac_disable_mac(priv->ioaddr); netif_carrier_off(dev); @@ -1787,8 +1771,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev) priv->hw->dma->stop_rx(priv->ioaddr); priv->hw->dma->stop_tx(priv->ioaddr); - stmmac_mac_disable_rx(priv->ioaddr); - stmmac_mac_disable_tx(priv->ioaddr); + stmmac_disable_mac(priv->ioaddr); netif_carrier_off(ndev); @@ -1839,13 +1822,11 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) dis_ic); priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); - stmmac_mac_disable_tx(priv->ioaddr); - /* Enable Power down mode by programming the PMT regs */ if (device_can_wakeup(priv->device)) priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); else - stmmac_mac_disable_rx(priv->ioaddr); + stmmac_disable_mac(priv->ioaddr); } else { priv->shutdown = 1; /* Although this can appear slightly redundant it actually @@ -1886,8 +1867,7 @@ static int stmmac_resume(struct platform_device *pdev) netif_device_attach(dev); /* Enable the MAC and DMA */ - stmmac_mac_enable_rx(priv->ioaddr); - stmmac_mac_enable_tx(priv->ioaddr); + stmmac_enable_mac(priv->ioaddr); priv->hw->dma->start_tx(priv->ioaddr); priv->hw->dma->start_rx(priv->ioaddr); diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 28e1ffb13db..c78a50586c1 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev, de->media_timer.data = (unsigned long) de; netif_carrier_off(dev); - netif_stop_queue(dev); /* wake up device, assign resources */ rc = pci_enable_device(pdev); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ca7fc9df1cc..c04d49e31f8 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -45,6 +45,7 @@ #include #include #include +#include #define DRIVER_VERSION "22-Aug-2005" @@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) struct usb_device *xdev; int status; const char *name; + struct usb_driver *driver = to_usb_driver(udev->dev.driver); + + /* usbnet already took usb runtime pm, so have to enable the feature + * for usb interface, otherwise usb_autopm_get_interface may return + * failure if USB_SUSPEND(RUNTIME_PM) is enabled. + */ + if (!driver->supports_autosuspend) { + driver->supports_autosuspend = 1; + pm_runtime_enable(&udev->dev); + } name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index e3658e10db3..21314e06e6d 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -873,7 +873,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + skb_shinfo(skb)->nr_frags + 1; - ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP)); + ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP)); ctx.mss = skb_shinfo(skb)->gso_size; if (ctx.mss) { diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 8a2f4712284..edf228843af 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -330,14 +330,14 @@ struct vmxnet3_adapter { }; #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ - writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg)) + writel((val), (adapter)->hw_addr0 + (reg)) #define VMXNET3_READ_BAR0_REG(adapter, reg) \ - le32_to_cpu(readl((adapter)->hw_addr0 + (reg))) + readl((adapter)->hw_addr0 + (reg)) #define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ - writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg)) + writel((val), (adapter)->hw_addr1 + (reg)) #define VMXNET3_READ_BAR1_REG(adapter, reg) \ - le32_to_cpu(readl((adapter)->hw_addr1 + (reg))) + readl((adapter)->hw_addr1 + (reg)) #define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c index cd0b14a0a93..fbe8aca975d 100644 --- a/drivers/net/wireless/ath/ath5k/attach.c +++ b/drivers/net/wireless/ath/ath5k/attach.c @@ -139,12 +139,12 @@ int ath5k_hw_attach(struct ath5k_softc *sc) /* Fill the ath5k_hw struct with the needed functions */ ret = ath5k_hw_init_desc_functions(ah); if (ret) - goto err_free; + goto err; /* Bring device out of sleep and reset its units */ ret = ath5k_hw_nic_wakeup(ah, 0, true); if (ret) - goto err_free; + goto err; /* Get MAC, PHY and RADIO revisions */ ah->ah_mac_srev = srev; @@ -234,7 +234,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc) } else { ATH5K_ERR(sc, "Couldn't identify radio revision.\n"); ret = -ENODEV; - goto err_free; + goto err; } } @@ -244,7 +244,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc) (srev < AR5K_SREV_AR2425)) { ATH5K_ERR(sc, "Device not yet supported.\n"); ret = -ENODEV; - goto err_free; + goto err; } /* @@ -252,7 +252,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc) */ ret = ath5k_hw_post(ah); if (ret) - goto err_free; + goto err; /* Enable pci core retry fix on Hainan (5213A) and later chips */ if (srev >= AR5K_SREV_AR5213A) @@ -265,7 +265,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc) ret = ath5k_eeprom_init(ah); if (ret) { ATH5K_ERR(sc, "unable to init EEPROM\n"); - goto err_free; + goto err; } ee = &ah->ah_capabilities.cap_eeprom; @@ -307,7 +307,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc) if (ret) { ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n", sc->pdev->device); - goto err_free; + goto err; } /* Crypto settings */ @@ -341,8 +341,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc) ath5k_hw_set_ledstate(ah, AR5K_LED_INIT); return 0; -err_free: - kfree(ah); +err: return ret; } diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 973c919fdd2..9b8e7e3fceb 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -310,7 +310,7 @@ struct ath_rx { u8 rxotherant; u32 *rxlink; unsigned int rxfilter; - spinlock_t rxflushlock; + spinlock_t pcu_lock; spinlock_t rxbuflock; struct list_head rxbuf; struct ath_descdma rxdma; diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 728d904c74d..6576f683dba 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -801,10 +801,16 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) } kfree(buf); - if ((hif_dev->device_id == 0x7010) || (hif_dev->device_id == 0x7015)) + switch (hif_dev->device_id) { + case 0x7010: + case 0x7015: + case 0x9018: firm_offset = AR7010_FIRMWARE_TEXT; - else + break; + default: firm_offset = AR9271_FIRMWARE_TEXT; + break; + } /* * Issue FW download complete command to firmware. diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index c6ec800d7a6..b52f1cf8a60 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -241,6 +241,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, */ ath9k_hw_set_interrupts(ah, 0); ath_drain_all_txq(sc, false); + + spin_lock_bh(&sc->rx.pcu_lock); + stopped = ath_stoprecv(sc); /* XXX: do not flush receive queue here. We don't want @@ -268,6 +271,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, "reset status %d\n", channel->center_freq, r); spin_unlock_bh(&sc->sc_resetlock); + spin_unlock_bh(&sc->rx.pcu_lock); goto ps_restore; } spin_unlock_bh(&sc->sc_resetlock); @@ -276,9 +280,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, ath_print(common, ATH_DBG_FATAL, "Unable to restart recv logic\n"); r = -EIO; + spin_unlock_bh(&sc->rx.pcu_lock); goto ps_restore; } + spin_unlock_bh(&sc->rx.pcu_lock); + ath_update_txpow(sc); ath9k_hw_set_interrupts(ah, ah->imask); @@ -613,7 +620,7 @@ void ath9k_tasklet(unsigned long data) rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); if (status & rxmask) { - spin_lock_bh(&sc->rx.rxflushlock); + spin_lock_bh(&sc->rx.pcu_lock); /* Check for high priority Rx first */ if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && @@ -621,7 +628,7 @@ void ath9k_tasklet(unsigned long data) ath_rx_tasklet(sc, 0, true); ath_rx_tasklet(sc, 0, false); - spin_unlock_bh(&sc->rx.rxflushlock); + spin_unlock_bh(&sc->rx.pcu_lock); } if (status & ATH9K_INT_TX) { @@ -876,6 +883,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) if (!ah->curchan) ah->curchan = ath_get_curchannel(sc, sc->hw); + spin_lock_bh(&sc->rx.pcu_lock); spin_lock_bh(&sc->sc_resetlock); r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); if (r) { @@ -890,8 +898,10 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) if (ath_startrecv(sc) != 0) { ath_print(common, ATH_DBG_FATAL, "Unable to restart recv logic\n"); + spin_unlock_bh(&sc->rx.pcu_lock); return; } + spin_unlock_bh(&sc->rx.pcu_lock); if (sc->sc_flags & SC_OP_BEACONS) ath_beacon_config(sc, NULL); /* restart beacons */ @@ -930,6 +940,9 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) ath9k_hw_set_interrupts(ah, 0); ath_drain_all_txq(sc, false); /* clear pending tx frames */ + + spin_lock_bh(&sc->rx.pcu_lock); + ath_stoprecv(sc); /* turn off frame recv */ ath_flushrecv(sc); /* flush recv queue */ @@ -947,6 +960,9 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) spin_unlock_bh(&sc->sc_resetlock); ath9k_hw_phy_disable(ah); + + spin_unlock_bh(&sc->rx.pcu_lock); + ath9k_hw_configpcipowersave(ah, 1, 1); ath9k_ps_restore(sc); ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); @@ -966,6 +982,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) ath9k_hw_set_interrupts(ah, 0); ath_drain_all_txq(sc, retry_tx); + + spin_lock_bh(&sc->rx.pcu_lock); + ath_stoprecv(sc); ath_flushrecv(sc); @@ -980,6 +999,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) ath_print(common, ATH_DBG_FATAL, "Unable to start recv logic\n"); + spin_unlock_bh(&sc->rx.pcu_lock); + /* * We may be doing a reset in response to a request * that changes the channel so update any state that @@ -1142,6 +1163,7 @@ static int ath9k_start(struct ieee80211_hw *hw) * be followed by initialization of the appropriate bits * and then setup of the interrupt mask. */ + spin_lock_bh(&sc->rx.pcu_lock); spin_lock_bh(&sc->sc_resetlock); r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); if (r) { @@ -1150,6 +1172,7 @@ static int ath9k_start(struct ieee80211_hw *hw) "(freq %u MHz)\n", r, curchan->center_freq); spin_unlock_bh(&sc->sc_resetlock); + spin_unlock_bh(&sc->rx.pcu_lock); goto mutex_unlock; } spin_unlock_bh(&sc->sc_resetlock); @@ -1171,8 +1194,10 @@ static int ath9k_start(struct ieee80211_hw *hw) ath_print(common, ATH_DBG_FATAL, "Unable to start recv logic\n"); r = -EIO; + spin_unlock_bh(&sc->rx.pcu_lock); goto mutex_unlock; } + spin_unlock_bh(&sc->rx.pcu_lock); /* Setup our intr mask. */ ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL | @@ -1371,12 +1396,14 @@ static void ath9k_stop(struct ieee80211_hw *hw) * before setting the invalid flag. */ ath9k_hw_set_interrupts(ah, 0); + spin_lock_bh(&sc->rx.pcu_lock); if (!(sc->sc_flags & SC_OP_INVALID)) { ath_drain_all_txq(sc, false); ath_stoprecv(sc); ath9k_hw_phy_disable(ah); } else sc->rx.rxlink = NULL; + spin_unlock_bh(&sc->rx.pcu_lock); /* disable HAL and put h/w to sleep */ ath9k_hw_disable(ah); diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 0cee90cf8dc..89978d71617 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -527,7 +527,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, for (i = 0; i < rateset->rs_nrates; i++) { for (j = 0; j < rate_table->rate_cnt; j++) { u32 phy = rate_table->info[j].phy; - u16 rate_flags = rate_table->info[i].rate_flags; + u16 rate_flags = rate_table->info[j].rate_flags; u8 rate = rateset->rs_rates[i]; u8 dot11rate = rate_table->info[j].dot11rate; diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index fe73fc50082..fddb0129bb5 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -297,19 +297,17 @@ static void ath_edma_start_recv(struct ath_softc *sc) ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); - spin_unlock_bh(&sc->rx.rxbuflock); - ath_opmode_init(sc); ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); + + spin_unlock_bh(&sc->rx.rxbuflock); } static void ath_edma_stop_recv(struct ath_softc *sc) { - spin_lock_bh(&sc->rx.rxbuflock); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); - spin_unlock_bh(&sc->rx.rxbuflock); } int ath_rx_init(struct ath_softc *sc, int nbufs) @@ -319,7 +317,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) struct ath_buf *bf; int error = 0; - spin_lock_init(&sc->rx.rxflushlock); + spin_lock_init(&sc->rx.pcu_lock); sc->sc_flags &= ~SC_OP_RXFLUSH; spin_lock_init(&sc->rx.rxbuflock); @@ -506,10 +504,11 @@ int ath_startrecv(struct ath_softc *sc) ath9k_hw_rxena(ah); start_recv: - spin_unlock_bh(&sc->rx.rxbuflock); ath_opmode_init(sc); ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL)); + spin_unlock_bh(&sc->rx.rxbuflock); + return 0; } @@ -518,6 +517,7 @@ bool ath_stoprecv(struct ath_softc *sc) struct ath_hw *ah = sc->sc_ah; bool stopped; + spin_lock_bh(&sc->rx.rxbuflock); ath9k_hw_stoppcurecv(ah); ath9k_hw_setrxfilter(ah, 0); stopped = ath9k_hw_stopdmarecv(ah); @@ -526,19 +526,18 @@ bool ath_stoprecv(struct ath_softc *sc) ath_edma_stop_recv(sc); else sc->rx.rxlink = NULL; + spin_unlock_bh(&sc->rx.rxbuflock); return stopped; } void ath_flushrecv(struct ath_softc *sc) { - spin_lock_bh(&sc->rx.rxflushlock); sc->sc_flags |= SC_OP_RXFLUSH; if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ath_rx_tasklet(sc, 1, true); ath_rx_tasklet(sc, 1, false); sc->sc_flags &= ~SC_OP_RXFLUSH; - spin_unlock_bh(&sc->rx.rxflushlock); } static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 30ef2dfc1ed..f2ade2402ce 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1089,15 +1089,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) txq->axq_tx_inprogress = false; spin_unlock_bh(&txq->axq_lock); - /* flush any pending frames if aggregation is enabled */ - if (sc->sc_flags & SC_OP_TXAGGR) { - if (!retry_tx) { - spin_lock_bh(&txq->axq_lock); - ath_txq_drain_pending_buffers(sc, txq); - spin_unlock_bh(&txq->axq_lock); - } - } - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { spin_lock_bh(&txq->axq_lock); while (!list_empty(&txq->txq_fifo_pending)) { @@ -1118,6 +1109,15 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) } spin_unlock_bh(&txq->axq_lock); } + + /* flush any pending frames if aggregation is enabled */ + if (sc->sc_flags & SC_OP_TXAGGR) { + if (!retry_tx) { + spin_lock_bh(&txq->axq_lock); + ath_txq_drain_pending_buffers(sc, txq); + spin_unlock_bh(&txq->axq_lock); + } + } } void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c index 45933cf8e8c..9a55338d957 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/b43/sdio.c @@ -175,7 +175,9 @@ static void b43_sdio_remove(struct sdio_func *func) struct b43_sdio *sdio = sdio_get_drvdata(func); ssb_bus_unregister(&sdio->ssb); + sdio_claim_host(func); sdio_disable_func(func); + sdio_release_host(func); kfree(sdio); sdio_set_drvdata(func, NULL); } diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 296fd00a512..e5685dc317a 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -684,18 +684,40 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card) lbs_deb_enter(LBS_DEB_SDIO); + /* + * Disable interrupts + */ + sdio_claim_host(card->func); + sdio_writeb(card->func, 0x00, IF_SDIO_H_INT_MASK, &ret); + sdio_release_host(card->func); + sdio_claim_host(card->func); scratch = if_sdio_read_scratch(card, &ret); sdio_release_host(card->func); + lbs_deb_sdio("firmware status = %#x\n", scratch); + lbs_deb_sdio("scratch ret = %d\n", ret); + if (ret) goto out; - lbs_deb_sdio("firmware status = %#x\n", scratch); + /* + * The manual clearly describes that FEDC is the right code to use + * to detect firmware presence, but for SD8686 it is not that simple. + * Scratch is also used to store the RX packet length, so we lose + * the FEDC value early on. So we use a non-zero check in order + * to validate firmware presence. + * Additionally, the SD8686 in the Gumstix always has the high scratch + * bit set, even when the firmware is not loaded. So we have to + * exclude that from the test. + */ if (scratch == IF_SDIO_FIRMWARE_OK) { lbs_deb_sdio("firmware already loaded\n"); goto success; + } else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) { + lbs_deb_sdio("firmware may be running\n"); + goto success; } ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name, @@ -709,10 +731,14 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card) if (ret) goto out; + lbs_deb_sdio("Helper firmware loaded\n"); + ret = if_sdio_prog_real(card, mainfw); if (ret) goto out; + lbs_deb_sdio("Firmware loaded\n"); + success: sdio_claim_host(card->func); sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE); @@ -1042,8 +1068,6 @@ static int if_sdio_probe(struct sdio_func *func, priv->exit_deep_sleep = if_sdio_exit_deep_sleep; priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup; - priv->fw_ready = 1; - sdio_claim_host(func); /* @@ -1064,6 +1088,8 @@ static int if_sdio_probe(struct sdio_func *func, if (ret) goto reclaim; + priv->fw_ready = 1; + /* * FUNC_INIT is required for SD8688 WLAN/BT multiple functions */ diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 6bb876d6525..fbe86ca9580 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -797,7 +797,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]) * - iff DATA transfer is active, carrier is "on" * - tx queueing enabled if open *and* carrier is "on" */ - netif_stop_queue(net); netif_carrier_off(net); dev->gadget = g; diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 7187bd8a75f..749f01ccd26 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h @@ -462,7 +462,8 @@ struct dccp_ackvec; * @dccps_hc_rx_insert_options - receiver wants to add options when acking * @dccps_hc_tx_insert_options - sender wants to add options when sending * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3) - * @dccps_xmit_timer - timer for when CCID is not ready to send + * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets + * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing) * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs) */ struct dccp_sock { @@ -502,6 +503,7 @@ struct dccp_sock { __u8 dccps_hc_rx_insert_options:1; __u8 dccps_hc_tx_insert_options:1; __u8 dccps_server_timewait:1; + struct tasklet_struct dccps_xmitlet; struct timer_list dccps_xmit_timer; }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index d0f08018335..1ff81b51b65 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -12,7 +12,7 @@ #define MARVELL_PHY_ID_88E1121R 0x01410cb0 #define MARVELL_PHY_ID_88E1145 0x01410cd0 #define MARVELL_PHY_ID_88E1240 0x01410e30 -#define MARVELL_PHY_ID_88EC048 0x01410e90 +#define MARVELL_PHY_ID_88E1318S 0x01410e90 /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 diff --git a/include/linux/socket.h b/include/linux/socket.h index 5146b50202c..86b652fabf6 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -322,7 +322,7 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, int offset, unsigned int len, __wsum *csump); -extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); +extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, int offset, int len); diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h index 6da573c75d5..8eff83b9536 100644 --- a/include/net/caif/caif_dev.h +++ b/include/net/caif/caif_dev.h @@ -28,7 +28,7 @@ struct caif_param { * @sockaddr: Socket address to connect. * @priority: Priority of the connection. * @link_selector: Link selector (high bandwidth or low latency) - * @link_name: Name of the CAIF Link Layer to use. + * @ifindex: kernel index of the interface. * @param: Connect Request parameters (CAIF_SO_REQ_PARAM). * * This struct is used when connecting a CAIF channel. @@ -39,7 +39,7 @@ struct caif_connect_request { struct sockaddr_caif sockaddr; enum caif_channel_priority priority; enum caif_link_selector link_selector; - char link_name[16]; + int ifindex; struct caif_param param; }; diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h index ce4570dff02..87c3d11b8e5 100644 --- a/include/net/caif/caif_spi.h +++ b/include/net/caif/caif_spi.h @@ -121,6 +121,8 @@ struct cfspi { wait_queue_head_t wait; spinlock_t lock; bool flow_stop; + bool slave; + bool slave_talked; #ifdef CONFIG_DEBUG_FS enum cfspi_state dbg_state; u16 pcmd; diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h index bd646faffa4..f688478bfb8 100644 --- a/include/net/caif/cfcnfg.h +++ b/include/net/caif/cfcnfg.h @@ -139,10 +139,10 @@ struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, enum cfcnfg_phy_preference phy_pref); /** - * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer + * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex, + * it matches caif physical id with the kernel interface id. * @cnfg: Configuration object - * @name: Name of the Physical Layer (Caif Link Layer) + * @ifi: ifindex obtained from socket.c bindtodevice. */ -int cfcnfg_get_named(struct cfcnfg *cnfg, char *name); - +int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi); #endif /* CFCNFG_H_ */ diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ba3666d3176..07bdb5e9e8a 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -158,6 +158,8 @@ extern int fib_table_flush(struct fib_table *table); extern void fib_table_select_default(struct fib_table *table, const struct flowi *flp, struct fib_result *res); +extern void fib_free_table(struct fib_table *tb); + #ifndef CONFIG_IP_MULTIPLE_TABLES diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c index 76ae68303d3..d522d8c1703 100644 --- a/net/caif/caif_config_util.c +++ b/net/caif/caif_config_util.c @@ -16,11 +16,18 @@ int connect_req_to_link_param(struct cfcnfg *cnfg, { struct dev_info *dev_info; enum cfcnfg_phy_preference pref; + int res; + memset(l, 0, sizeof(*l)); - l->priority = s->priority; + /* In caif protocol low value is high priority */ + l->priority = CAIF_PRIO_MAX - s->priority + 1; - if (s->link_name[0] != '\0') - l->phyid = cfcnfg_get_named(cnfg, s->link_name); + if (s->ifindex != 0){ + res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); + if (res < 0) + return res; + l->phyid = res; + } else { switch (s->link_selector) { case CAIF_LINK_HIGH_BANDW: diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index b99369a055d..a42a408306e 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -307,6 +307,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, case NETDEV_UNREGISTER: caifd = caif_get(dev); + if (caifd == NULL) + break; netdev_info(dev, "unregister\n"); atomic_set(&caifd->state, what); caif_device_destroy(dev); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 2eca2dd0000..1bf0cf50379 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -716,8 +716,7 @@ static int setsockopt(struct socket *sock, { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); - int prio, linksel; - struct ifreq ifreq; + int linksel; if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) return -ENOPROTOOPT; @@ -735,33 +734,6 @@ static int setsockopt(struct socket *sock, release_sock(&cf_sk->sk); return 0; - case SO_PRIORITY: - if (lvl != SOL_SOCKET) - goto bad_sol; - if (ol < sizeof(int)) - return -EINVAL; - if (copy_from_user(&prio, ov, sizeof(int))) - return -EINVAL; - lock_sock(&(cf_sk->sk)); - cf_sk->conn_req.priority = prio; - release_sock(&cf_sk->sk); - return 0; - - case SO_BINDTODEVICE: - if (lvl != SOL_SOCKET) - goto bad_sol; - if (ol < sizeof(struct ifreq)) - return -EINVAL; - if (copy_from_user(&ifreq, ov, sizeof(ifreq))) - return -EFAULT; - lock_sock(&(cf_sk->sk)); - strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name, - sizeof(cf_sk->conn_req.link_name)); - cf_sk->conn_req.link_name - [sizeof(cf_sk->conn_req.link_name)-1] = 0; - release_sock(&cf_sk->sk); - return 0; - case CAIFSO_REQ_PARAM: if (lvl != SOL_CAIF) goto bad_sol; @@ -880,6 +852,18 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, sock->state = SS_CONNECTING; sk->sk_state = CAIF_CONNECTING; + /* Check priority value comming from socket */ + /* if priority value is out of range it will be ajusted */ + if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) + cf_sk->conn_req.priority = CAIF_PRIO_MAX; + else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) + cf_sk->conn_req.priority = CAIF_PRIO_MIN; + else + cf_sk->conn_req.priority = cf_sk->sk.sk_priority; + + /*ifindex = id of the interface.*/ + cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; + dbfs_atomic_inc(&cnt.num_connect_req); cf_sk->layer.receive = caif_sktrecv_cb; err = caif_connect_client(&cf_sk->conn_req, @@ -905,6 +889,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, cf_sk->maxframe = mtu - (headroom + tailroom); if (cf_sk->maxframe < 1) { pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); + err = -ENODEV; goto out; } @@ -1142,7 +1127,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, set_rx_flow_on(cf_sk); /* Set default options on configuration */ - cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; + cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; /* Increase the number of sockets created. */ diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 41adafd1891..21ede141018 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -173,18 +173,15 @@ static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, return NULL; } -int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) + +int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) { int i; - - /* Try to match with specified name */ - for (i = 0; i < MAX_PHY_LAYERS; i++) { - if (cnfg->phy_layers[i].frm_layer != NULL - && strcmp(cnfg->phy_layers[i].phy_layer->name, - name) == 0) - return cnfg->phy_layers[i].frm_layer->id; - } - return 0; + for (i = 0; i < MAX_PHY_LAYERS; i++) + if (cnfg->phy_layers[i].frm_layer != NULL && + cnfg->phy_layers[i].ifindex == ifi) + return i; + return -ENODEV; } int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 08f267a109a..3cd8f978e30 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -361,11 +361,10 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) struct cfctrl_request_info *p, *tmp; struct cfctrl *ctrl = container_obj(layr); spin_lock(&ctrl->info_list_lock); - pr_warn("enter\n"); list_for_each_entry_safe(p, tmp, &ctrl->list, list) { if (p->client_layer == adap_layer) { - pr_warn("cancel req :%d\n", p->sequence_no); + pr_debug("cancel req :%d\n", p->sequence_no); list_del(&p->list); kfree(p); } diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c index 496fda9ac66..11a2af4c162 100644 --- a/net/caif/cfdbgl.c +++ b/net/caif/cfdbgl.c @@ -12,6 +12,8 @@ #include #include +#define container_obj(layr) ((struct cfsrvl *) layr) + static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); @@ -38,5 +40,17 @@ static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt) static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) { + struct cfsrvl *service = container_obj(layr); + struct caif_payload_info *info; + int ret; + + if (!cfsrvl_ready(service, &ret)) + return ret; + + /* Add info for MUX-layer to route the packet out */ + info = cfpkt_info(pkt); + info->channel_id = service->layer.id; + info->dev_info = &service->dev_info; + return layr->dn->transmit(layr->dn, pkt); } diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index bde8481e8d2..e2fb5fa7579 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -193,7 +193,7 @@ out: static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) { - caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); + caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size); /* Add info for MUX-layer to route the packet out. */ cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; diff --git a/net/compat.c b/net/compat.c index 63d260e8147..3649d589536 100644 --- a/net/compat.c +++ b/net/compat.c @@ -41,10 +41,12 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov, compat_size_t len; if (get_user(len, &uiov32->iov_len) || - get_user(buf, &uiov32->iov_base)) { - tot_len = -EFAULT; - break; - } + get_user(buf, &uiov32->iov_base)) + return -EFAULT; + + if (len > INT_MAX - tot_len) + len = INT_MAX - tot_len; + tot_len += len; kiov->iov_base = compat_ptr(buf); kiov->iov_len = (__kernel_size_t) len; diff --git a/net/core/dev.c b/net/core/dev.c index 35dfb831848..0dd54a69dac 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2131,7 +2131,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, } else { struct sock *sk = skb->sk; queue_index = sk_tx_queue_get(sk); - if (queue_index < 0) { + if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) { queue_index = 0; if (dev->real_num_tx_queues > 1) diff --git a/net/core/iovec.c b/net/core/iovec.c index 72aceb1fe4f..c40f27e7d20 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@ -35,10 +35,9 @@ * in any case. */ -long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) +int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode) { - int size, ct; - long err; + int size, ct, err; if (m->msg_namelen) { if (mode == VERIFY_READ) { @@ -62,14 +61,13 @@ long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { - err += iov[ct].iov_len; - /* - * Goal is not to verify user data, but to prevent returning - * negative value, which is interpreted as errno. - * Overflow is still possible, but it is harmless. - */ - if (err < 0) - return -EMSGSIZE; + size_t len = iov[ct].iov_len; + + if (len > INT_MAX - err) { + len = INT_MAX - err; + iov[ct].iov_len = len; + } + err += len; } return err; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 679b797d06b..fbce4b05a53 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -887,10 +887,11 @@ static ssize_t pktgen_if_write(struct file *file, i += len; if (debug) { - char tb[count + 1]; - if (copy_from_user(tb, user_buffer, count)) + size_t copy = min(count, 1023); + char tb[copy + 1]; + if (copy_from_user(tb, user_buffer, copy)) return -EFAULT; - tb[count] = 0; + tb[copy] = 0; printk(KERN_DEBUG "pktgen: %s,%lu buffer -:%s:-\n", name, (unsigned long)count, tb); } diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 117fb093dca..75c3582a767 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -134,13 +134,41 @@ static inline int ccid_get_current_tx_ccid(struct dccp_sock *dp) extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk); extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk); +/* + * Congestion control of queued data packets via CCID decision. + * + * The TX CCID performs its congestion-control by indicating whether and when a + * queued packet may be sent, using the return code of ccid_hc_tx_send_packet(). + * The following modes are supported via the symbolic constants below: + * - timer-based pacing (CCID returns a delay value in milliseconds); + * - autonomous dequeueing (CCID internally schedules dccps_xmitlet). + */ + +enum ccid_dequeueing_decision { + CCID_PACKET_SEND_AT_ONCE = 0x00000, /* "green light": no delay */ + CCID_PACKET_DELAY_MAX = 0x0FFFF, /* maximum delay in msecs */ + CCID_PACKET_DELAY = 0x10000, /* CCID msec-delay mode */ + CCID_PACKET_WILL_DEQUEUE_LATER = 0x20000, /* CCID autonomous mode */ + CCID_PACKET_ERR = 0xF0000, /* error condition */ +}; + +static inline int ccid_packet_dequeue_eval(const int return_code) +{ + if (return_code < 0) + return CCID_PACKET_ERR; + if (return_code == 0) + return CCID_PACKET_SEND_AT_ONCE; + if (return_code <= CCID_PACKET_DELAY_MAX) + return CCID_PACKET_DELAY; + return return_code; +} + static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk, struct sk_buff *skb) { - int rc = 0; if (ccid->ccid_ops->ccid_hc_tx_send_packet != NULL) - rc = ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); - return rc; + return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb); + return CCID_PACKET_SEND_AT_ONCE; } static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk, diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c index d850e291f87..6576eae9e77 100644 --- a/net/dccp/ccids/ccid2.c +++ b/net/dccp/ccids/ccid2.c @@ -78,12 +78,9 @@ static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc) static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) { - struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); - - if (hc->tx_pipe < hc->tx_cwnd) - return 0; - - return 1; /* XXX CCID should dequeue when ready instead of polling */ + if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk))) + return CCID_PACKET_WILL_DEQUEUE_LATER; + return CCID_PACKET_SEND_AT_ONCE; } static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val) @@ -115,6 +112,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) { struct sock *sk = (struct sock *)data; struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); + const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { @@ -129,8 +127,6 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) if (hc->tx_rto > DCCP_RTO_MAX) hc->tx_rto = DCCP_RTO_MAX; - sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); - /* adjust pipe, cwnd etc */ hc->tx_ssthresh = hc->tx_cwnd / 2; if (hc->tx_ssthresh < 2) @@ -146,6 +142,12 @@ static void ccid2_hc_tx_rto_expire(unsigned long data) hc->tx_rpseq = 0; hc->tx_rpdupack = -1; ccid2_change_l_ack_ratio(sk, 1); + + /* if we were blocked before, we may now send cwnd=1 packet */ + if (sender_was_blocked) + tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); + /* restart backed-off timer */ + sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); out: bh_unlock_sock(sk); sock_put(sk); @@ -434,6 +436,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) { struct dccp_sock *dp = dccp_sk(sk); struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); + const bool sender_was_blocked = ccid2_cwnd_network_limited(hc); u64 ackno, seqno; struct ccid2_seq *seqp; unsigned char *vector; @@ -631,6 +634,10 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) sk_stop_timer(sk, &hc->tx_rtotimer); else sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto); + + /* check if incoming Acks allow pending packets to be sent */ + if (sender_was_blocked && !ccid2_cwnd_network_limited(hc)) + tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet); } static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h index 9731c2dc148..25cb6b216ed 100644 --- a/net/dccp/ccids/ccid2.h +++ b/net/dccp/ccids/ccid2.h @@ -81,6 +81,11 @@ struct ccid2_hc_tx_sock { u64 tx_high_ack; }; +static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc) +{ + return hc->tx_pipe >= hc->tx_cwnd; +} + struct ccid2_hc_rx_sock { int rx_data; }; diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 3060a60ed5a..3d604e1349c 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -268,11 +268,11 @@ out: sock_put(sk); } -/* - * returns - * > 0: delay (in msecs) that should pass before actually sending - * = 0: can send immediately - * < 0: error condition; do not send packet +/** + * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets + * @skb: next packet candidate to send on @sk + * This function uses the convention of ccid_packet_dequeue_eval() and + * returns a millisecond-delay value between 0 and t_mbi = 64000 msec. */ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) { @@ -348,7 +348,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) /* set the nominal send time for the next following packet */ hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi); - return 0; + return CCID_PACKET_SEND_AT_ONCE; } static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len) diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 3eb264b6082..a8ed459508b 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -243,8 +243,9 @@ extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, extern void dccp_send_sync(struct sock *sk, const u64 seq, const enum dccp_pkt_type pkt_type); -extern void dccp_write_xmit(struct sock *sk, int block); -extern void dccp_write_space(struct sock *sk); +extern void dccp_write_xmit(struct sock *sk); +extern void dccp_write_space(struct sock *sk); +extern void dccp_flush_write_queue(struct sock *sk, long *time_budget); extern void dccp_init_xmit_timers(struct sock *sk); static inline void dccp_clear_xmit_timers(struct sock *sk) diff --git a/net/dccp/output.c b/net/dccp/output.c index a988fe9ffcb..45b91853f5a 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -209,108 +209,150 @@ void dccp_write_space(struct sock *sk) } /** - * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet + * dccp_wait_for_ccid - Await CCID send permission * @sk: socket to wait for - * @skb: current skb to pass on for waiting - * @delay: sleep timeout in milliseconds (> 0) - * This function is called by default when the socket is closed, and - * when a non-zero linger time is set on the socket. For consistency + * @delay: timeout in jiffies + * This is used by CCIDs which need to delay the send time in process context. */ -static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) +static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay) { - struct dccp_sock *dp = dccp_sk(sk); DEFINE_WAIT(wait); - unsigned long jiffdelay; - int rc; + long remaining; + + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + sk->sk_write_pending++; + release_sock(sk); + + remaining = schedule_timeout(delay); + + lock_sock(sk); + sk->sk_write_pending--; + finish_wait(sk_sleep(sk), &wait); + + if (signal_pending(current) || sk->sk_err) + return -1; + return remaining; +} + +/** + * dccp_xmit_packet - Send data packet under control of CCID + * Transmits next-queued payload and informs CCID to account for the packet. + */ +static void dccp_xmit_packet(struct sock *sk) +{ + int err, len; + struct dccp_sock *dp = dccp_sk(sk); + struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue); - do { - dccp_pr_debug("delayed send by %d msec\n", delay); - jiffdelay = msecs_to_jiffies(delay); + if (unlikely(skb == NULL)) + return; + len = skb->len; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + if (sk->sk_state == DCCP_PARTOPEN) { + const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; + /* + * See 8.1.5 - Handshake Completion. + * + * For robustness we resend Confirm options until the client has + * entered OPEN. During the initial feature negotiation, the MPS + * is smaller than usual, reduced by the Change/Confirm options. + */ + if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { + DCCP_WARN("Payload too large (%d) for featneg.\n", len); + dccp_send_ack(sk); + dccp_feat_list_purge(&dp->dccps_featneg); + } - sk->sk_write_pending++; - release_sock(sk); - schedule_timeout(jiffdelay); - lock_sock(sk); - sk->sk_write_pending--; + inet_csk_schedule_ack(sk); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + inet_csk(sk)->icsk_rto, + DCCP_RTO_MAX); + DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; + } else if (dccp_ack_pending(sk)) { + DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK; + } else { + DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA; + } + + err = dccp_transmit_skb(sk, skb); + if (err) + dccp_pr_debug("transmit_skb() returned err=%d\n", err); + /* + * Register this one as sent even if an error occurred. To the remote + * end a local packet drop is indistinguishable from network loss, i.e. + * any local drop will eventually be reported via receiver feedback. + */ + ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); +} - if (sk->sk_err) - goto do_error; - if (signal_pending(current)) - goto do_interrupted; +/** + * dccp_flush_write_queue - Drain queue at end of connection + * Since dccp_sendmsg queues packets without waiting for them to be sent, it may + * happen that the TX queue is not empty at the end of a connection. We give the + * HC-sender CCID a grace period of up to @time_budget jiffies. If this function + * returns with a non-empty write queue, it will be purged later. + */ +void dccp_flush_write_queue(struct sock *sk, long *time_budget) +{ + struct dccp_sock *dp = dccp_sk(sk); + struct sk_buff *skb; + long delay, rc; + while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) { rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); - } while ((delay = rc) > 0); -out: - finish_wait(sk_sleep(sk), &wait); - return rc; - -do_error: - rc = -EPIPE; - goto out; -do_interrupted: - rc = -EINTR; - goto out; + + switch (ccid_packet_dequeue_eval(rc)) { + case CCID_PACKET_WILL_DEQUEUE_LATER: + /* + * If the CCID determines when to send, the next sending + * time is unknown or the CCID may not even send again + * (e.g. remote host crashes or lost Ack packets). + */ + DCCP_WARN("CCID did not manage to send all packets\n"); + return; + case CCID_PACKET_DELAY: + delay = msecs_to_jiffies(rc); + if (delay > *time_budget) + return; + rc = dccp_wait_for_ccid(sk, delay); + if (rc < 0) + return; + *time_budget -= (delay - rc); + /* check again if we can send now */ + break; + case CCID_PACKET_SEND_AT_ONCE: + dccp_xmit_packet(sk); + break; + case CCID_PACKET_ERR: + skb_dequeue(&sk->sk_write_queue); + kfree_skb(skb); + dccp_pr_debug("packet discarded due to err=%ld\n", rc); + } + } } -void dccp_write_xmit(struct sock *sk, int block) +void dccp_write_xmit(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); struct sk_buff *skb; while ((skb = skb_peek(&sk->sk_write_queue))) { - int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); - - if (err > 0) { - if (!block) { - sk_reset_timer(sk, &dp->dccps_xmit_timer, - msecs_to_jiffies(err)+jiffies); - break; - } else - err = dccp_wait_for_ccid(sk, skb, err); - if (err && err != -EINTR) - DCCP_BUG("err=%d after dccp_wait_for_ccid", err); - } + int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); - skb_dequeue(&sk->sk_write_queue); - if (err == 0) { - struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); - const int len = skb->len; - - if (sk->sk_state == DCCP_PARTOPEN) { - const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; - /* - * See 8.1.5 - Handshake Completion. - * - * For robustness we resend Confirm options until the client has - * entered OPEN. During the initial feature negotiation, the MPS - * is smaller than usual, reduced by the Change/Confirm options. - */ - if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { - DCCP_WARN("Payload too large (%d) for featneg.\n", len); - dccp_send_ack(sk); - dccp_feat_list_purge(&dp->dccps_featneg); - } - - inet_csk_schedule_ack(sk); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - inet_csk(sk)->icsk_rto, - DCCP_RTO_MAX); - dcb->dccpd_type = DCCP_PKT_DATAACK; - } else if (dccp_ack_pending(sk)) - dcb->dccpd_type = DCCP_PKT_DATAACK; - else - dcb->dccpd_type = DCCP_PKT_DATA; - - err = dccp_transmit_skb(sk, skb); - ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len); - if (err) - DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", - err); - } else { - dccp_pr_debug("packet discarded due to err=%d\n", err); + switch (ccid_packet_dequeue_eval(rc)) { + case CCID_PACKET_WILL_DEQUEUE_LATER: + return; + case CCID_PACKET_DELAY: + sk_reset_timer(sk, &dp->dccps_xmit_timer, + jiffies + msecs_to_jiffies(rc)); + return; + case CCID_PACKET_SEND_AT_ONCE: + dccp_xmit_packet(sk); + break; + case CCID_PACKET_ERR: + skb_dequeue(&sk->sk_write_queue); kfree_skb(skb); + dccp_pr_debug("packet discarded due to err=%d\n", rc); } } } @@ -622,7 +664,6 @@ void dccp_send_close(struct sock *sk, const int active) DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; if (active) { - dccp_write_xmit(sk, 1); dccp_skb_entail(sk, skb); dccp_transmit_skb(sk, skb_clone(skb, prio)); /* diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 7e5fc04eb6d..ef343d53fce 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -726,7 +726,13 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, goto out_discard; skb_queue_tail(&sk->sk_write_queue, skb); - dccp_write_xmit(sk,0); + /* + * The xmit_timer is set if the TX CCID is rate-based and will expire + * when congestion control permits to release further packets into the + * network. Window-based CCIDs do not use this timer. + */ + if (!timer_pending(&dp->dccps_xmit_timer)) + dccp_write_xmit(sk); out_release: release_sock(sk); return rc ? : len; @@ -951,9 +957,22 @@ void dccp_close(struct sock *sk, long timeout) /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); } else if (sk->sk_state != DCCP_CLOSED) { + /* + * Normal connection termination. May need to wait if there are + * still packets in the TX queue that are delayed by the CCID. + */ + dccp_flush_write_queue(sk, &timeout); dccp_terminate_connection(sk); } + /* + * Flush write queue. This may be necessary in several cases: + * - we have been closed by the peer but still have application data; + * - abortive termination (unread data or zero linger time), + * - normal termination but queue could not be flushed within time limit + */ + __skb_queue_purge(&sk->sk_write_queue); + sk_stream_wait_close(sk, timeout); adjudge_to_death: diff --git a/net/dccp/timer.c b/net/dccp/timer.c index 1a9aa05d4dc..7587870b704 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -237,32 +237,35 @@ out: sock_put(sk); } -/* Transmit-delay timer: used by the CCIDs to delay actual send time */ -static void dccp_write_xmit_timer(unsigned long data) +/** + * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface + * See the comments above %ccid_dequeueing_decision for supported modes. + */ +static void dccp_write_xmitlet(unsigned long data) { struct sock *sk = (struct sock *)data; - struct dccp_sock *dp = dccp_sk(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) - sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); + sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1); else - dccp_write_xmit(sk, 0); + dccp_write_xmit(sk); bh_unlock_sock(sk); - sock_put(sk); } -static void dccp_init_write_xmit_timer(struct sock *sk) +static void dccp_write_xmit_timer(unsigned long data) { - struct dccp_sock *dp = dccp_sk(sk); - - setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, - (unsigned long)sk); + dccp_write_xmitlet(data); + sock_put((struct sock *)data); } void dccp_init_xmit_timers(struct sock *sk) { - dccp_init_write_xmit_timer(sk); + struct dccp_sock *dp = dccp_sk(sk); + + tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk); + setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, + (unsigned long)sk); inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, &dccp_keepalive_timer); } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 36e27c2107d..eb6f69a8f27 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1052,7 +1052,7 @@ static void ip_fib_net_exit(struct net *net) hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) { hlist_del(node); fib_table_flush(tb); - kfree(tb); + fib_free_table(tb); } } kfree(net->ipv4.fib_table_hash); diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index b232375a0b7..b3acb0417b2 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c @@ -716,6 +716,24 @@ int fib_table_flush(struct fib_table *tb) return found; } +void fib_free_table(struct fib_table *tb) +{ + struct fn_hash *table = (struct fn_hash *) tb->tb_data; + struct fn_zone *fz, *next; + + next = table->fn_zone_list; + while (next != NULL) { + fz = next; + next = fz->fz_next; + + if (fz->fz_hash != fz->fz_embedded_hash) + fz_hash_free(fz->fz_hash, fz->fz_divisor); + + kfree(fz); + } + + kfree(tb); +} static inline int fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb, diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index b1445089510..200eb538fbb 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1797,6 +1797,11 @@ int fib_table_flush(struct fib_table *tb) return found; } +void fib_free_table(struct fib_table *tb) +{ + kfree(tb); +} + void fib_table_select_default(struct fib_table *tb, const struct flowi *flp, struct fib_result *res) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 01087e035b7..70ff77f02ee 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1325,7 +1325,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; - struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id); tunnel->dev = dev; strcpy(tunnel->parms.name, dev->name); @@ -1336,7 +1335,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev) tunnel->hlen = sizeof(struct iphdr) + 4; dev_hold(dev); - rcu_assign_pointer(ign->tunnels_wc[0], tunnel); } @@ -1383,10 +1381,12 @@ static int __net_init ipgre_init_net(struct net *net) if ((err = register_netdev(ign->fb_tunnel_dev))) goto err_reg_dev; + rcu_assign_pointer(ign->tunnels_wc[0], + netdev_priv(ign->fb_tunnel_dev)); return 0; err_reg_dev: - free_netdev(ign->fb_tunnel_dev); + ipgre_dev_free(ign->fb_tunnel_dev); err_alloc_dev: return err; } diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index d082eaeefa2..24b3558b8e6 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -126,6 +126,8 @@ static const struct snmp_mib snmp6_udp6_list[] = { SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS), + SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS), + SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_SENTINEL }; @@ -134,6 +136,8 @@ static const struct snmp_mib snmp6_udplite6_list[] = { SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), + SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS), + SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 25661f968f3..fc328339be9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2741,6 +2741,7 @@ static void __net_exit ip6_route_net_exit(struct net *net) kfree(net->ipv6.ip6_prohibit_entry); kfree(net->ipv6.ip6_blk_hole_entry); #endif + dst_entries_destroy(&net->ipv6.ip6_dst_ops); } static struct pernet_operations ip6_route_net_ops = { @@ -2832,5 +2833,6 @@ void ip6_route_cleanup(void) xfrm6_fini(); fib6_gc_cleanup(); unregister_pernet_subsys(&ip6_route_net_ops); + dst_entries_destroy(&ip6_dst_blackhole_ops); kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); } diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 104ec3b283d..b8dbae82fab 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -249,7 +249,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) struct seq_file *seq; int rc = -ENOMEM; - pd = kzalloc(GFP_KERNEL, sizeof(*pd)); + pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (pd == NULL) goto out; diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 4aa47d074a7..1243d1db5c5 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c @@ -203,9 +203,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct ieee80211_key *key = file->private_data; - int i, res, bufsize = 2 * key->conf.keylen + 2; + int i, bufsize = 2 * key->conf.keylen + 2; char *buf = kmalloc(bufsize, GFP_KERNEL); char *p = buf; + ssize_t res; + + if (!buf) + return -ENOMEM; for (i = 0; i < key->conf.keylen; i++) p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 6b322fa681f..107a0cbe52a 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -677,10 +677,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) /* * Calculate scan IE length -- we need this to alloc * memory and to subtract from the driver limit. It - * includes the (extended) supported rates and HT + * includes the DS Params, (extended) supported rates, and HT * information -- SSID is the driver's responsibility. */ - local->scan_ies_len = 4 + max_bitrates; /* (ext) supp rates */ + local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ + + 3 /* DS Params */; if (supp_ht) local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap); diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index d94a858dc52..00d6ae83830 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -195,7 +195,7 @@ socket_mt4_v1(const struct sk_buff *skb, struct xt_action_param *par) static int extract_icmp6_fields(const struct sk_buff *skb, unsigned int outside_hdrlen, - u8 *protocol, + int *protocol, struct in6_addr **raddr, struct in6_addr **laddr, __be16 *rport, @@ -252,8 +252,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) struct sock *sk; struct in6_addr *daddr, *saddr; __be16 dport, sport; - int thoff; - u8 tproto; + int thoff, tproto; const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; tproto = ipv6_find_hdr(skb, &thoff, -1, NULL); @@ -305,7 +304,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) sk = NULL; } - pr_debug("proto %hhu %pI6:%hu -> %pI6:%hu " + pr_debug("proto %hhd %pI6:%hu -> %pI6:%hu " "(orig %pI6:%hu) sock %p\n", tproto, saddr, ntohs(sport), daddr, ntohs(dport), diff --git a/net/rds/loop.c b/net/rds/loop.c index c390156b426..aeec1d483b1 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -134,8 +134,12 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp) static void rds_loop_conn_free(void *arg) { struct rds_loop_connection *lc = arg; + unsigned long flags; + rdsdebug("lc %p\n", lc); + spin_lock_irqsave(&loop_conns_lock, flags); list_del(&lc->loop_node); + spin_unlock_irqrestore(&loop_conns_lock, flags); kfree(lc); } diff --git a/net/rds/message.c b/net/rds/message.c index a84545dae37..848cff45183 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -224,6 +224,9 @@ struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); WARN_ON(!nents); + if (rm->m_used_sgs + nents > rm->m_total_sgs) + return NULL; + sg_ret = &sg_first[rm->m_used_sgs]; sg_init_table(sg_ret, nents); rm->m_used_sgs += nents; @@ -246,6 +249,8 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); rm->data.op_nents = ceil(total_len, PAGE_SIZE); rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); + if (!rm->data.op_sg) + return ERR_PTR(-ENOMEM); for (i = 0; i < rm->data.op_nents; ++i) { sg_set_page(&rm->data.op_sg[i], diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 1a41debca1c..8920f2a8332 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -479,13 +479,38 @@ void rds_atomic_free_op(struct rm_atomic_op *ao) /* - * Count the number of pages needed to describe an incoming iovec. + * Count the number of pages needed to describe an incoming iovec array. */ -static int rds_rdma_pages(struct rds_rdma_args *args) +static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) +{ + int tot_pages = 0; + unsigned int nr_pages; + unsigned int i; + + /* figure out the number of pages in the vector */ + for (i = 0; i < nr_iovecs; i++) { + nr_pages = rds_pages_in_vec(&iov[i]); + if (nr_pages == 0) + return -EINVAL; + + tot_pages += nr_pages; + + /* + * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, + * so tot_pages cannot overflow without first going negative. + */ + if (tot_pages < 0) + return -EINVAL; + } + + return tot_pages; +} + +int rds_rdma_extra_size(struct rds_rdma_args *args) { struct rds_iovec vec; struct rds_iovec __user *local_vec; - unsigned int tot_pages = 0; + int tot_pages = 0; unsigned int nr_pages; unsigned int i; @@ -502,14 +527,16 @@ static int rds_rdma_pages(struct rds_rdma_args *args) return -EINVAL; tot_pages += nr_pages; - } - return tot_pages; -} + /* + * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, + * so tot_pages cannot overflow without first going negative. + */ + if (tot_pages < 0) + return -EINVAL; + } -int rds_rdma_extra_size(struct rds_rdma_args *args) -{ - return rds_rdma_pages(args) * sizeof(struct scatterlist); + return tot_pages * sizeof(struct scatterlist); } /* @@ -520,13 +547,12 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct rds_rdma_args *args; - struct rds_iovec vec; struct rm_rdma_op *op = &rm->rdma; int nr_pages; unsigned int nr_bytes; struct page **pages = NULL; - struct rds_iovec __user *local_vec; - unsigned int nr; + struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; + int iov_size; unsigned int i, j; int ret = 0; @@ -546,9 +572,26 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, goto out; } - nr_pages = rds_rdma_pages(args); - if (nr_pages < 0) + /* Check whether to allocate the iovec area */ + iov_size = args->nr_local * sizeof(struct rds_iovec); + if (args->nr_local > UIO_FASTIOV) { + iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); + if (!iovs) { + ret = -ENOMEM; + goto out; + } + } + + if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { + ret = -EFAULT; + goto out; + } + + nr_pages = rds_rdma_pages(iovs, args->nr_local); + if (nr_pages < 0) { + ret = -EINVAL; goto out; + } pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { @@ -564,6 +607,10 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, op->op_recverr = rs->rs_recverr; WARN_ON(!nr_pages); op->op_sg = rds_message_alloc_sgs(rm, nr_pages); + if (!op->op_sg) { + ret = -ENOMEM; + goto out; + } if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because @@ -597,50 +644,40 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, (unsigned long long)args->remote_vec.addr, op->op_rkey); - local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; - for (i = 0; i < args->nr_local; i++) { - if (copy_from_user(&vec, &local_vec[i], - sizeof(struct rds_iovec))) { - ret = -EFAULT; - goto out; - } - - nr = rds_pages_in_vec(&vec); - if (nr == 0) { - ret = -EINVAL; - goto out; - } + struct rds_iovec *iov = &iovs[i]; + /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ + unsigned int nr = rds_pages_in_vec(iov); - rs->rs_user_addr = vec.addr; - rs->rs_user_bytes = vec.bytes; + rs->rs_user_addr = iov->addr; + rs->rs_user_bytes = iov->bytes; /* If it's a WRITE operation, we want to pin the pages for reading. * If it's a READ operation, we need to pin the pages for writing. */ - ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write); + ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); if (ret < 0) goto out; - rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n", - nr_bytes, nr, vec.bytes, vec.addr); + rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", + nr_bytes, nr, iov->bytes, iov->addr); - nr_bytes += vec.bytes; + nr_bytes += iov->bytes; for (j = 0; j < nr; j++) { - unsigned int offset = vec.addr & ~PAGE_MASK; + unsigned int offset = iov->addr & ~PAGE_MASK; struct scatterlist *sg; sg = &op->op_sg[op->op_nents + j]; sg_set_page(sg, pages[j], - min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), + min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), offset); - rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n", - sg->offset, sg->length, vec.addr, vec.bytes); + rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", + sg->offset, sg->length, iov->addr, iov->bytes); - vec.addr += sg->length; - vec.bytes -= sg->length; + iov->addr += sg->length; + iov->bytes -= sg->length; } op->op_nents += nr; @@ -655,13 +692,14 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, } op->op_bytes = nr_bytes; - ret = 0; out: + if (iovs != iovstack) + sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); kfree(pages); if (ret) rds_rdma_free_op(op); - - rds_stats_inc(s_send_rdma); + else + rds_stats_inc(s_send_rdma); return ret; } @@ -773,6 +811,10 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, rm->atomic.op_active = 1; rm->atomic.op_recverr = rs->rs_recverr; rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); + if (!rm->atomic.op_sg) { + ret = -ENOMEM; + goto err; + } /* verify 8 byte-aligned */ if (args->local_addr & 0x7) { diff --git a/net/rds/send.c b/net/rds/send.c index 0bc9db17a87..35b9c2e9caf 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -973,6 +973,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, /* Attach data to the rm */ if (payload_len) { rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); + if (!rm->data.op_sg) { + ret = -ENOMEM; + goto out; + } ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); if (ret) goto out; diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 08a8c6cf2d1..8e0a32001c9 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -221,7 +221,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) static void rds_tcp_conn_free(void *arg) { struct rds_tcp_connection *tc = arg; + unsigned long flags; rdsdebug("freeing tc %p\n", tc); + + spin_lock_irqsave(&rds_tcp_conn_lock, flags); + list_del(&tc->t_tcp_node); + spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); + kmem_cache_free(rds_tcp_conn_slab, tc); } diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 37dff78e9cb..d49c40fb7e0 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = { .populate = cgrp_populate, #ifdef CONFIG_NET_CLS_CGROUP .subsys_id = net_cls_subsys_id, -#else -#define net_cls_subsys_id net_cls_subsys.subsys_id #endif .module = THIS_MODULE, }; diff --git a/net/sched/em_text.c b/net/sched/em_text.c index 76325325741..ea8f566e720 100644 --- a/net/sched/em_text.c +++ b/net/sched/em_text.c @@ -103,7 +103,8 @@ retry: static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) { - textsearch_destroy(EM_TEXT_PRIV(m)->config); + if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) + textsearch_destroy(EM_TEXT_PRIV(m)->config); } static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) diff --git a/net/socket.c b/net/socket.c index abf3e256152..2808b4db46e 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1652,6 +1652,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, struct iovec iov; int fput_needed; + if (len > INT_MAX) + len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; @@ -1709,6 +1711,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, int err, err2; int fput_needed; + if (size > INT_MAX) + size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 771bab00754..3a8c4c419cd 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -134,15 +134,15 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, case X25_FAC_CLASS_D: switch (*p) { case X25_FAC_CALLING_AE: - if (p[1] > X25_MAX_DTE_FACIL_LEN) - break; + if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) + return 0; dte_facs->calling_len = p[2]; memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLING_AE; break; case X25_FAC_CALLED_AE: - if (p[1] > X25_MAX_DTE_FACIL_LEN) - break; + if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) + return 0; dte_facs->called_len = p[2]; memcpy(dte_facs->called_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLED_AE; diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 63178961efa..f729f022be6 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp &x25->vc_facil_mask); if (len > 0) skb_pull(skb, len); + else + return -1; /* * Copy any Call User Data. */