The following commit has been merged in the master branch: commit d98cae64e4a733ff377184d78aa0b1f2b54faede Merge: 646093a29f85630d8efe2aa38fa585d2c3ea2e46 4067c666f2dccf56f5db5c182713e68c40d46013 Author: David S. Miller davem@davemloft.net Date: Wed Jun 19 16:49:39 2013 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/wireless/ath/ath9k/Kconfig drivers/net/xen-netback/netback.c net/batman-adv/bat_iv_ogm.c net/wireless/nl80211.c
The ath9k Kconfig conflict was a change of a Kconfig option name right next to the deletion of another option.
The xen-netback conflict was overlapping changes involving the handling of the notify list in xen_netbk_rx_action().
Batman conflict resolution provided by Antonio Quartulli, basically keep everything in both conflict hunks.
The nl80211 conflict is a little more involved. In 'net' we added a dynamic memory allocation to nl80211_dump_wiphy() to fix a race that Linus reported. Meanwhile in 'net-next' the handlers were converted to use pre and post doit handlers which use a flag to determine whether to hold the RTNL mutex around the operation.
However, the dump handlers to not use this logic. Instead they have to explicitly do the locking. There were apparent bugs in the conversion of nl80211_dump_wiphy() in that we were not dropping the RTNL mutex in all the return paths, and it seems we very much should be doing so. So I fixed that whilst handling the overlapping changes.
To simplify the initial returns, I take the RTNL mutex after we try to allocate 'tb'.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index 0518ec4,5be702c..60d6a33 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -2299,11 -2299,6 +2299,11 @@@ M: Jaya Kumar <jayakumar.alsa@gmail.com S: Maintained F: sound/pci/cs5535audio/
+CW1200 WLAN driver +M: Solomon Peachy pizza@shaftnet.org +S: Maintained +F: drivers/net/wireless/cw1200/ + CX18 VIDEO4LINUX DRIVER M: Andy Walls awalls@md.metrocast.net L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers) @@@ -2895,8 -2890,8 +2895,8 @@@ F: drivers/media/dvb-frontends/ec100
ECRYPT FILE SYSTEM M: Tyler Hicks tyhicks@canonical.com - M: Dustin Kirkland dustin.kirkland@gazzang.com L: ecryptfs@vger.kernel.org + W: http://ecryptfs.org W: https://launchpad.net/ecryptfs S: Supported F: Documentation/filesystems/ecryptfs.txt @@@ -4453,6 -4448,16 +4453,16 @@@ S: Maintaine F: drivers/scsi/*iscsi* F: include/scsi/*iscsi*
+ ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR + M: Or Gerlitz ogerlitz@mellanox.com + M: Roi Dayan roid@mellanox.com + L: linux-rdma@vger.kernel.org + S: Supported + W: http://www.openfabrics.org + W: www.open-iscsi.org + Q: http://patchwork.kernel.org/project/linux-rdma/list/ + F: drivers/infiniband/ulp/iser + ISDN SUBSYSTEM M: Karsten Keil isdn@linux-pingi.de L: isdn4linux@listserv.isdn4linux.de (subscribers-only) @@@ -5761,7 -5766,7 +5771,7 @@@ M: Matthew Wilcox <willy@linux.intel.co L: linux-nvme@lists.infradead.org T: git git://git.infradead.org/users/willy/linux-nvme.git S: Supported - F: drivers/block/nvme.c + F: drivers/block/nvme* F: include/linux/nvme.h
OMAP SUPPORT @@@ -7619,7 -7624,7 +7629,7 @@@ F: drivers/clk/spear SPI SUBSYSTEM M: Mark Brown broonie@kernel.org M: Grant Likely grant.likely@linaro.org - L: spi-devel-general@lists.sourceforge.net + L: linux-spi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git Q: http://patchwork.kernel.org/project/spi-devel-general/list/ S: Maintained @@@ -8999,7 -9004,7 +9009,7 @@@ S: Maintaine F: drivers/net/wireless/wl3501*
WM97XX TOUCHSCREEN DRIVERS - M: Mark Brown broonie@opensource.wolfsonmicro.com + M: Mark Brown broonie@kernel.org M: Liam Girdwood lrg@slimlogic.co.uk L: linux-input@vger.kernel.org T: git git://opensource.wolfsonmicro.com/linux-2.6-touch @@@ -9009,7 -9014,6 +9019,6 @@@ F: drivers/input/touchscreen/*wm97 F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS - M: Mark Brown broonie@opensource.wolfsonmicro.com L: patches@opensource.wolfsonmicro.com T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus diff --combined drivers/net/bonding/bond_main.c index bc1246f,02d9ae7..3b31c19 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -706,6 -706,45 +706,6 @@@ static int bond_set_allmulti(struct bon return err; }
-/* - * Add a Multicast address to slaves - * according to mode - */ -static void bond_mc_add(struct bonding *bond, void *addr) -{ - if (USES_PRIMARY(bond->params.mode)) { - /* write lock already acquired */ - if (bond->curr_active_slave) - dev_mc_add(bond->curr_active_slave->dev, addr); - } else { - struct slave *slave; - int i; - - bond_for_each_slave(bond, slave, i) - dev_mc_add(slave->dev, addr); - } -} - -/* - * Remove a multicast address from slave - * according to mode - */ -static void bond_mc_del(struct bonding *bond, void *addr) -{ - if (USES_PRIMARY(bond->params.mode)) { - /* write lock already acquired */ - if (bond->curr_active_slave) - dev_mc_del(bond->curr_active_slave->dev, addr); - } else { - struct slave *slave; - int i; - bond_for_each_slave(bond, slave, i) { - dev_mc_del(slave->dev, addr); - } - } -} - - static void __bond_resend_igmp_join_requests(struct net_device *dev) { struct in_device *in_dev; @@@ -725,8 -764,8 +725,8 @@@ static void bond_resend_igmp_join_reque struct net_device *bond_dev, *vlan_dev, *upper_dev; struct vlan_entry *vlan;
- rcu_read_lock(); read_lock(&bond->lock); + rcu_read_lock();
bond_dev = bond->dev;
@@@ -748,12 -787,19 +748,19 @@@ if (vlan_dev) __bond_resend_igmp_join_requests(vlan_dev); } + rcu_read_unlock();
- if (--bond->igmp_retrans > 0) + /* We use curr_slave_lock to protect against concurrent access to + * igmp_retrans from multiple running instances of this function and + * bond_change_active_slave + */ + write_lock_bh(&bond->curr_slave_lock); + if (bond->igmp_retrans > 1) { + bond->igmp_retrans--; queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); - + } + write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); - rcu_read_unlock(); }
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) @@@ -764,15 -810,17 +771,15 @@@ bond_resend_igmp_join_requests(bond); }
-/* - * flush all members of flush->mc_list from device dev->mc_list +/* Flush bond's hardware addresses from slave */ -static void bond_mc_list_flush(struct net_device *bond_dev, +static void bond_hw_addr_flush(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(ha, bond_dev) - dev_mc_del(slave_dev, ha->addr); + dev_uc_unsync(slave_dev, bond_dev); + dev_mc_unsync(slave_dev, bond_dev);
if (bond->params.mode == BOND_MODE_8023AD) { /* del lacpdu mc addr from mc list */ @@@ -784,14 -832,22 +791,14 @@@
/*--------------------------- Active slave change ---------------------------*/
-/* - * Update the mc list and multicast-related flags for the new and - * old active slaves (if any) according to the multicast mode, and - * promiscuous flags unconditionally. +/* Update the hardware address list and promisc/allmulti for the new and + * old active slaves (if any). Modes that are !USES_PRIMARY keep all + * slaves up date at all times; only the USES_PRIMARY modes need to call + * this function to swap these settings during a failover. */ -static void bond_mc_swap(struct bonding *bond, struct slave *new_active, - struct slave *old_active) +static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, + struct slave *old_active) { - struct netdev_hw_addr *ha; - - if (!USES_PRIMARY(bond->params.mode)) - /* nothing to do - mc list is already up-to-date on - * all slaves - */ - return; - if (old_active) { if (bond->dev->flags & IFF_PROMISC) dev_set_promiscuity(old_active->dev, -1); @@@ -799,7 -855,10 +806,7 @@@ if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1);
- netif_addr_lock_bh(bond->dev); - netdev_for_each_mc_addr(ha, bond->dev) - dev_mc_del(old_active->dev, ha->addr); - netif_addr_unlock_bh(bond->dev); + bond_hw_addr_flush(bond->dev, old_active->dev); }
if (new_active) { @@@ -811,8 -870,8 +818,8 @@@ dev_set_allmulti(new_active->dev, 1);
netif_addr_lock_bh(bond->dev); - netdev_for_each_mc_addr(ha, bond->dev) - dev_mc_add(new_active->dev, ha->addr); + dev_uc_sync(new_active->dev, bond->dev); + dev_mc_sync(new_active->dev, bond->dev); netif_addr_unlock_bh(bond->dev); } } @@@ -1031,7 -1090,7 +1038,7 @@@ void bond_change_active_slave(struct bo }
if (USES_PRIMARY(bond->params.mode)) - bond_mc_swap(bond, new_active, old_active); + bond_hw_addr_swap(bond, new_active, old_active);
if (bond_is_lb(bond)) { bond_alb_handle_active_change(bond, new_active); @@@ -1474,6 -1533,7 +1481,6 @@@ int bond_enslave(struct net_device *bon struct bonding *bond = netdev_priv(bond_dev); const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct slave *new_slave = NULL; - struct netdev_hw_addr *ha; struct sockaddr addr; int link_reporting; int res = 0; @@@ -1653,8 -1713,10 +1660,8 @@@ goto err_close; }
- /* If the mode USES_PRIMARY, then the new slave gets the - * master's promisc (and mc) settings only if it becomes the - * curr_active_slave, and that is taken care of later when calling - * bond_change_active() + /* If the mode USES_PRIMARY, then the following is handled by + * bond_change_active_slave(). */ if (!USES_PRIMARY(bond->params.mode)) { /* set promiscuity level to new slave */ @@@ -1672,10 -1734,9 +1679,10 @@@ }
netif_addr_lock_bh(bond_dev); - /* upload master's mc_list to new slave */ - netdev_for_each_mc_addr(ha, bond_dev) - dev_mc_add(slave_dev, ha->addr); + + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); }
@@@ -1854,9 -1915,11 +1861,9 @@@ err_dest_symlinks bond_destroy_slave_symlinks(bond_dev, slave_dev);
err_detach: - if (!USES_PRIMARY(bond->params.mode)) { - netif_addr_lock_bh(bond_dev); - bond_mc_list_flush(bond_dev, slave_dev); - netif_addr_unlock_bh(bond_dev); - } + if (!USES_PRIMARY(bond->params.mode)) + bond_hw_addr_flush(bond_dev, slave_dev); + bond_del_vlans_from_slave(bond, slave_dev); write_lock_bh(&bond->lock); bond_detach_slave(bond, new_slave); @@@ -1901,6 -1964,10 +1908,10 @@@ err_free
err_undo_flags: bond_compute_features(bond); + /* Enslave of first slave has failed and we need to fix master's mac */ + if (bond->slave_cnt == 0 && + ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr)) + eth_hw_addr_random(bond_dev);
return res; } @@@ -2051,8 -2118,9 +2062,8 @@@ static int __bond_release_one(struct ne
bond_del_vlans_from_slave(bond, slave_dev);
- /* If the mode USES_PRIMARY, then we should only remove its - * promisc and mc settings if it was the curr_active_slave, but that was - * already taken care of above when we detached the slave + /* If the mode USES_PRIMARY, then this cases was handled above by + * bond_change_active_slave(..., NULL) */ if (!USES_PRIMARY(bond->params.mode)) { /* unset promiscuity level from slave */ @@@ -2063,7 -2131,10 +2074,7 @@@ if (bond_dev->flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1);
- /* flush master's mc_list from slave */ - netif_addr_lock_bh(bond_dev); - bond_mc_list_flush(bond_dev, slave_dev); - netif_addr_unlock_bh(bond_dev); + bond_hw_addr_flush(bond_dev, slave_dev); }
bond_upper_dev_unlink(bond_dev, slave_dev); @@@ -3217,7 -3288,7 +3228,7 @@@ static int bond_slave_netdev_event(unsi static int bond_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *event_dev = (struct net_device *)ptr; + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
pr_debug("event_dev: %s, event: %lx\n", event_dev ? event_dev->name : "None", @@@ -3600,6 -3671,19 +3611,6 @@@ static int bond_do_ioctl(struct net_dev return res; }
-static bool bond_addr_in_mc_list(unsigned char *addr, - struct netdev_hw_addr_list *list, - int addrlen) -{ - struct netdev_hw_addr *ha; - - netdev_hw_addr_list_for_each(ha, list) - if (!memcmp(ha->addr, addr, addrlen)) - return true; - - return false; -} - static void bond_change_rx_flags(struct net_device *bond_dev, int change) { struct bonding *bond = netdev_priv(bond_dev); @@@ -3613,29 -3697,35 +3624,29 @@@ bond_dev->flags & IFF_ALLMULTI ? 1 : -1); }
-static void bond_set_multicast_list(struct net_device *bond_dev) +static void bond_set_rx_mode(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct netdev_hw_addr *ha; - bool found; + struct slave *slave; + int i;
read_lock(&bond->lock);
- /* looking for addresses to add to slaves' mc list */ - netdev_for_each_mc_addr(ha, bond_dev) { - found = bond_addr_in_mc_list(ha->addr, &bond->mc_list, - bond_dev->addr_len); - if (!found) - bond_mc_add(bond, ha->addr); - } - - /* looking for addresses to delete from slaves' list */ - netdev_hw_addr_list_for_each(ha, &bond->mc_list) { - found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc, - bond_dev->addr_len); - if (!found) - bond_mc_del(bond, ha->addr); + if (USES_PRIMARY(bond->params.mode)) { + read_lock(&bond->curr_slave_lock); + slave = bond->curr_active_slave; + if (slave) { + dev_uc_sync(slave->dev, bond_dev); + dev_mc_sync(slave->dev, bond_dev); + } + read_unlock(&bond->curr_slave_lock); + } else { + bond_for_each_slave(bond, slave, i) { + dev_uc_sync_multiple(slave->dev, bond_dev); + dev_mc_sync_multiple(slave->dev, bond_dev); + } }
- /* save master's multicast list */ - __hw_addr_flush(&bond->mc_list); - __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc, - bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST); - read_unlock(&bond->lock); }
@@@ -3780,10 -3870,11 +3791,10 @@@ static int bond_set_mac_address(struct pr_debug("bond=%p, name=%s\n", bond, bond_dev ? bond_dev->name : "None");
- /* - * If fail_over_mac is set to active, do nothing and return - * success. Returning an error causes ifenslave to fail. + /* If fail_over_mac is enabled, do nothing and return success. + * Returning an error causes ifenslave to fail. */ - if (bond->params.fail_over_mac == BOND_FOM_ACTIVE) + if (bond->params.fail_over_mac) return 0;
if (!is_valid_ether_addr(sa->sa_data)) @@@ -4241,7 -4332,7 +4252,7 @@@ static const struct net_device_ops bond .ndo_get_stats64 = bond_get_stats, .ndo_do_ioctl = bond_do_ioctl, .ndo_change_rx_flags = bond_change_rx_flags, - .ndo_set_rx_mode = bond_set_multicast_list, + .ndo_set_rx_mode = bond_set_rx_mode, .ndo_change_mtu = bond_change_mtu, .ndo_set_mac_address = bond_set_mac_address, .ndo_neigh_setup = bond_neigh_setup, @@@ -4346,6 -4437,8 +4357,6 @@@ static void bond_uninit(struct net_devi
bond_debug_unregister(bond);
- __hw_addr_flush(&bond->mc_list); - list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { list_del(&vlan->vlan_list); kfree(vlan); @@@ -4756,6 -4849,7 +4767,6 @@@ static int bond_init(struct net_device bond->dev_addr_from_first = true; }
- __hw_addr_init(&bond->mc_list); return 0; }
diff --combined drivers/net/bonding/bonding.h index b38609b,f989e15..c990b42 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@@ -225,12 -225,13 +225,12 @@@ struct bonding rwlock_t curr_slave_lock; u8 send_peer_notif; s8 setup_by_slave; - s8 igmp_retrans; + u8 igmp_retrans; #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_entry; char proc_file_name[IFNAMSIZ]; #endif /* CONFIG_PROC_FS */ struct list_head bond_list; - struct netdev_hw_addr_list mc_list; int (*xmit_hash_policy)(struct sk_buff *, int); u16 rr_tx_counter; struct ad_bond_info ad_info; diff --combined drivers/net/ethernet/broadcom/tg3.c index 297fc13,c777b90..986df04 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -965,6 -965,9 +965,6 @@@ static void tg3_ape_driver_state_change
event = APE_EVENT_STATUS_STATE_UNLOAD; break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; default: return; } @@@ -1311,8 -1314,8 +1311,8 @@@ static int tg3_phy_toggle_auxctl_smdsp(
if (err) return err; - if (enable)
+ if (enable) val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; else val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; @@@ -1736,6 -1739,10 +1736,6 @@@ static void tg3_write_sig_pre_reset(str break; } } - - if (kind == RESET_KIND_INIT || - kind == RESET_KIND_SUSPEND) - tg3_ape_driver_state_change(tp, kind); }
/* tp->lock is held. */ @@@ -1757,6 -1764,9 +1757,6 @@@ static void tg3_write_sig_post_reset(st break; } } - - if (kind == RESET_KIND_SHUTDOWN) - tg3_ape_driver_state_change(tp, kind); }
/* tp->lock is held. */ @@@ -1790,6 -1800,9 +1790,9 @@@ static int tg3_poll_fw(struct tg3 *tp int i; u32 val;
+ if (tg3_flag(tp, NO_FWARE_REPORTED)) + return 0; + if (tg3_flag(tp, IS_SSB_CORE)) { /* We don't use firmware. */ return 0; @@@ -2310,46 -2323,6 +2313,46 @@@ static void tg3_phy_apply_otp(struct tg tg3_phy_toggle_auxctl_smdsp(tp, false); }
+static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) +{ + u32 val; + struct ethtool_eee *dest = &tp->eee; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + if (eee) + dest = eee; + + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) + return; + + /* Pull eee_active */ + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { + dest->eee_active = 1; + } else + dest->eee_active = 0; + + /* Pull lp advertised settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) + return; + dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull advertised and eee_enabled settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) + return; + dest->eee_enabled = !!val; + dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull tx_lpi_enabled */ + val = tr32(TG3_CPMU_EEE_MODE); + dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); + + /* Pull lpi timer value */ + dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; +} + static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) { u32 val; @@@ -2373,8 -2346,11 +2376,8 @@@
tw32(TG3_CPMU_EEE_CTRL, eeectl);
- tg3_phy_cl45_read(tp, MDIO_MMD_AN, - TG3_CL45_D7_EEERES_STAT, &val); - - if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || - val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tg3_eee_pull_config(tp, NULL); + if (tp->eee.eee_active) tp->setlpicnt = 2; }
@@@ -4196,8 -4172,6 +4199,8 @@@ static int tg3_power_down_prepare(struc
tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+ tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); + return 0; }
@@@ -4298,16 -4272,6 +4301,16 @@@ static int tg3_phy_autoneg_cfg(struct t /* Advertise 1000-BaseT EEE ability */ if (advertise & ADVERTISED_1000baseT_Full) val |= MDIO_AN_EEE_ADV_1000T; + + if (!tp->eee.eee_enabled) { + val = 0; + tp->eee.advertised = 0; + } else { + tp->eee.advertised = advertise & + (ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); if (err) val = 0; @@@ -4552,23 -4516,26 +4555,23 @@@ static int tg3_init_5401phy_dsp(struct
static bool tg3_phy_eee_config_ok(struct tg3 *tp) { - u32 val; - u32 tgtadv = 0; - u32 advertising = tp->link_config.advertising; + struct ethtool_eee eee;
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) return true;
- if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) - return false; - - val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); - - - if (advertising & ADVERTISED_100baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_100TX; - if (advertising & ADVERTISED_1000baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_1000T; + tg3_eee_pull_config(tp, &eee);
- if (val != tgtadv) - return false; + if (tp->eee.eee_enabled) { + if (tp->eee.advertised != eee.advertised || + tp->eee.tx_lpi_timer != eee.tx_lpi_timer || + tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) + return false; + } else { + /* EEE is disabled but we're advertising */ + if (eee.advertised) + return false; + }
return true; } @@@ -4669,42 -4636,6 +4672,42 @@@ static void tg3_clear_mac_status(struc udelay(40); }
+static void tg3_setup_eee(struct tg3 *tp) +{ + u32 val; + + val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) + val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; + + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (tg3_asic_rev(tp) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + (tp->eee.tx_lpi_timer & 0xffff)); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); +} + static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) { bool current_link_up; @@@ -4871,10 -4802,8 +4874,10 @@@ */ if (!eee_config_ok && (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && - !force_reset) + !force_reset) { + tg3_setup_eee(tp); tg3_phy_reset(tp); + } } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && @@@ -6386,7 -6315,9 +6389,7 @@@ static void tg3_tx_recover(struct tg3 * "Please report the problem to the driver maintainer " "and include system chipset information.\n");
- spin_lock(&tp->lock); tg3_flag_set(tp, TX_RECOVERY_PENDING); - spin_unlock(&tp->lock); }
static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) @@@ -9238,9 -9169,11 +9241,9 @@@ static void __tg3_set_coalesce(struct t }
/* tp->lock is held. */ -static void tg3_rings_reset(struct tg3 *tp) +static void tg3_tx_rcbs_disable(struct tg3 *tp) { - int i; - u32 stblk, txrcb, rxrcb, limit; - struct tg3_napi *tnapi = &tp->napi[0]; + u32 txrcb, limit;
/* Disable all transmit rings but the first. */ if (!tg3_flag(tp, 5705_PLUS)) @@@ -9257,33 -9190,7 +9260,33 @@@ txrcb < limit; txrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_tx_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 txrcb = NIC_SRAM_SEND_RCB; + + if (tg3_flag(tp, ENABLE_TSS)) + i++; + + for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->tx_ring) + continue; + + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + } +}
+/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) +{ + u32 rxrcb, limit;
/* Disable all receive return rings but the first. */ if (tg3_flag(tp, 5717_PLUS)) @@@ -9301,39 -9208,6 +9304,39 @@@ rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); +} + +/* tp->lock is held. */ +static void tg3_rx_ret_rcbs_init(struct tg3 *tp) +{ + int i = 0; + u32 rxrcb = NIC_SRAM_RCV_RET_RCB; + + if (tg3_flag(tp, ENABLE_RSS)) + i++; + + for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->rx_rcb) + continue; + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + } +} + +/* tp->lock is held. */ +static void tg3_rings_reset(struct tg3 *tp) +{ + int i; + u32 stblk; + struct tg3_napi *tnapi = &tp->napi[0]; + + tg3_tx_rcbs_disable(tp); + + tg3_rx_ret_rcbs_disable(tp);
/* Disable interrupts */ tw32_mailbox_f(tp->napi[0].int_mbox, 1); @@@ -9370,6 -9244,9 +9373,6 @@@ tw32_tx_mbox(mbox + i * 8, 0); }
- txrcb = NIC_SRAM_SEND_RCB; - rxrcb = NIC_SRAM_RCV_RET_RCB; - /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
@@@ -9379,20 -9256,46 +9382,20 @@@ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tnapi->status_mapping & 0xffffffff));
- if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - if (tnapi->rx_rcb) { - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - (tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT, 0); - rxrcb += TG3_BDINFO_SIZE; - } - stblk = HOSTCC_STATBLCK_RING1;
for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { u64 mapping = (u64)tnapi->status_mapping; tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + stblk += 8;
/* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - ((tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT), 0); - - stblk += 8; - rxrcb += TG3_BDINFO_SIZE; } + + tg3_tx_rcbs_init(tp); + tg3_rx_ret_rcbs_init(tp); }
static void tg3_setup_rxbd_thresholds(struct tg3 *tp) @@@ -9592,17 -9495,46 +9595,17 @@@ static int tg3_reset_hw(struct tg3 *tp if (tg3_flag(tp, INIT_COMPLETE)) tg3_abort_hw(tp, 1);
if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { tg3_phy_pull_config(tp); + tg3_eee_pull_config(tp, NULL); tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; }
+ /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + tg3_setup_eee(tp); + if (reset_phy) tg3_phy_reset(tp);
@@@ -10475,6 -10407,13 +10478,13 @@@ */ static int tg3_init_hw(struct tg3 *tp, bool reset_phy) { + /* Chip may have been just powered on. If so, the boot code may still + * be running initialization. Wait for it to finish to avoid races in + * accessing the hardware. + */ + tg3_enable_register_access(tp); + tg3_poll_fw(tp); + tg3_switch_clocks(tp);
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); @@@ -11251,7 -11190,7 +11261,7 @@@ static int tg3_start(struct tg3 *tp, bo */ err = tg3_alloc_consistent(tp); if (err) - goto err_out1; + goto out_ints_fini;
tg3_napi_init(tp);
@@@ -11265,15 -11204,12 +11275,15 @@@ tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } - goto err_out2; + goto out_napi_fini; } }
tg3_full_lock(tp, 0);
+ if (init) + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + err = tg3_init_hw(tp, reset_phy); if (err) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@@ -11283,7 -11219,7 +11293,7 @@@ tg3_full_unlock(tp);
if (err) - goto err_out3; + goto out_free_irq;
if (test_irq && tg3_flag(tp, USING_MSI)) { err = tg3_test_msi(tp); @@@ -11294,7 -11230,7 +11304,7 @@@ tg3_free_rings(tp); tg3_full_unlock(tp);
- goto err_out2; + goto out_napi_fini; }
if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { @@@ -11334,18 -11270,18 +11344,18 @@@
return 0;
-err_out3: +out_free_irq: for (i = tp->irq_cnt - 1; i >= 0; i--) { struct tg3_napi *tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); }
-err_out2: +out_napi_fini: tg3_napi_disable(tp); tg3_napi_fini(tp); tg3_free_consistent(tp);
-err_out1: +out_ints_fini: tg3_ints_fini(tp);
return err; @@@ -13390,13 -13326,11 +13400,13 @@@ static void tg3_self_test(struct net_de struct tg3 *tp = netdev_priv(dev); bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
- if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && - tg3_power_up(tp)) { - etest->flags |= ETH_TEST_FL_FAILED; - memset(data, 1, sizeof(u64) * TG3_NUM_TEST); - return; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + if (tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); }
memset(data, 0, sizeof(u64) * TG3_NUM_TEST); @@@ -13687,57 -13621,6 +13697,57 @@@ static int tg3_set_coalesce(struct net_ return 0; }
+static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + if (edata->advertised != tp->eee.advertised) { + netdev_warn(tp->dev, + "Direct manipulation of EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { + netdev_warn(tp->dev, + "Maximal Tx Lpi timer supported is %#x(u)\n", + TG3_CPMU_DBTMR1_LNKIDLE_MAX); + return -EINVAL; + } + + tp->eee = *edata; + + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + tg3_warn_mgmt_link_flap(tp); + + if (netif_running(tp->dev)) { + tg3_full_lock(tp, 0); + tg3_setup_eee(tp); + tg3_phy_reset(tp); + tg3_full_unlock(tp); + } + + return 0; +} + +static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, + "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + *edata = tp->eee; + return 0; +} + static const struct ethtool_ops tg3_ethtool_ops = { .get_settings = tg3_get_settings, .set_settings = tg3_set_settings, @@@ -13771,8 -13654,6 +13781,8 @@@ .get_channels = tg3_get_channels, .set_channels = tg3_set_channels, .get_ts_info = tg3_get_ts_info, + .get_eee = tg3_get_eee, + .set_eee = tg3_set_eee, };
static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, @@@ -15121,18 -15002,9 +15131,18 @@@ static int tg3_phy_probe(struct tg3 *tp (tg3_asic_rev(tp) == ASIC_REV_5717 && tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || (tg3_asic_rev(tp) == ASIC_REV_57765 && - tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) + tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+ tp->eee.supported = SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full; + tp->eee.advertised = ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full; + tp->eee.eee_enabled = 1; + tp->eee.tx_lpi_enabled = 1; + tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; + } + tg3_phy_init_link_config(tp);
if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && @@@ -17204,7 -17076,7 +17214,7 @@@ static int tg3_init_one(struct pci_dev { struct net_device *dev; struct tg3 *tp; - int i, err, pm_cap; + int i, err; u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; @@@ -17226,10 -17098,25 +17236,10 @@@
pci_set_master(pdev);
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { err = -ENOMEM; - goto err_out_power_down; + goto err_out_free_res; }
SET_NETDEV_DEV(dev, &pdev->dev); @@@ -17237,7 -17124,7 +17247,7 @@@ tp = netdev_priv(dev); tp->pdev = pdev; tp->dev = dev; - tp->pm_cap = pm_cap; + tp->pm_cap = pdev->pm_cap; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; tp->irq_sync = 1; @@@ -17575,6 -17462,9 +17585,6 @@@ err_out_iounmap err_out_free_dev: free_netdev(dev);
-err_out_power_down: - pci_set_power_state(pdev, PCI_D3hot); - err_out_free_res: pci_release_regions(pdev);
@@@ -17684,8 -17574,6 +17694,8 @@@ static int tg3_resume(struct device *de
tg3_full_lock(tp, 0);
+ tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); @@@ -17747,13 -17635,10 +17757,13 @@@ static pci_ers_result_t tg3_io_error_de tg3_full_unlock(tp);
done: - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + tg3_napi_enable(tp); + dev_close(netdev); err = PCI_ERS_RESULT_DISCONNECT; - else + } else { pci_disable_device(pdev); + }
rtnl_unlock();
@@@ -17799,10 -17684,6 +17809,10 @@@ static pci_ers_result_t tg3_io_slot_res rc = PCI_ERS_RESULT_RECOVERED;
done: + if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { + tg3_napi_enable(tp); + dev_close(netdev); + } rtnl_unlock();
return rc; @@@ -17827,7 -17708,6 +17837,7 @@@ static void tg3_io_resume(struct pci_de goto done;
tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, true); if (err) { @@@ -17865,4 -17745,15 +17875,4 @@@ static struct pci_driver tg3_driver = .driver.pm = &tg3_pm_ops, };
-static int __init tg3_init(void) -{ - return pci_register_driver(&tg3_driver); -} - -static void __exit tg3_cleanup(void) -{ - pci_unregister_driver(&tg3_driver); -} - -module_init(tg3_init); -module_exit(tg3_cleanup); +module_pci_driver(tg3_driver); diff --combined drivers/net/ethernet/emulex/benet/be_main.c index 9aef457,a0b4be5..98efc29 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@@ -834,39 -834,32 +834,39 @@@ static int be_vlan_tag_tx_chk(struct be return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; }
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) +static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, + struct sk_buff *skb) { - return BE3_chip(adapter) && - be_ipv6_exthdr_check(skb); + return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); }
-static netdev_tx_t be_xmit(struct sk_buff *skb, - struct net_device *netdev) +static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, + struct sk_buff *skb, + bool *skip_hw_vlan) { - struct be_adapter *adapter = netdev_priv(netdev); - struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; - struct be_queue_info *txq = &txo->q; - struct iphdr *ip = NULL; - u32 wrb_cnt = 0, copied = 0; - u32 start = txq->head, eth_hdr_len; - bool dummy_wrb, stopped = false; - bool skip_hw_vlan = false; struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + unsigned int eth_hdr_len; + struct iphdr *ip;
- eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? - VLAN_ETH_HLEN : ETH_HLEN; + /* Lancer ASIC has a bug wherein packets that are 32 bytes or less + * may cause a transmit stall on that port. So the work-around is to + * pad such packets to a 36-byte length. + */ + if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { + if (skb_padto(skb, 36)) + goto tx_drop; + skb->len = 36; + }
/* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. + * For padded packets, Lancer computes incorrect checksum. */ - if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) { + eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? + VLAN_ETH_HLEN : ETH_HLEN; + if (skb->len <= 60 && + (lancer_chip(adapter) || vlan_tx_tag_present(skb)) && + is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); } @@@ -876,15 -869,15 +876,15 @@@ */ if ((adapter->function_mode & UMC_ENABLED) && veh->h_vlan_proto == htons(ETH_P_8021Q)) - skip_hw_vlan = true; + *skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. * Manually insert VLAN in pkt. */ if (skb->ip_summed != CHECKSUM_PARTIAL && - vlan_tx_tag_present(skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); + vlan_tx_tag_present(skb)) { + skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto tx_drop; } @@@ -894,8 -887,8 +894,8 @@@ * skip HW tagging is not enabled by FW. */ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && - (adapter->pvid || adapter->qnq_vid) && - !qnq_async_evt_rcvd(adapter))) + (adapter->pvid || adapter->qnq_vid) && + !qnq_async_evt_rcvd(adapter))) goto tx_drop;
/* Manual VLAN tag insertion to prevent: @@@ -906,31 -899,11 +906,31 @@@ */ if (be_ipv6_tx_stall_chk(adapter, skb) && be_vlan_tag_tx_chk(adapter, skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto tx_drop; }
+ return skb; +tx_drop: + dev_kfree_skb_any(skb); + return NULL; +} + +static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct be_adapter *adapter = netdev_priv(netdev); + struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; + struct be_queue_info *txq = &txo->q; + bool dummy_wrb, stopped = false; + u32 wrb_cnt = 0, copied = 0; + bool skip_hw_vlan = false; + u32 start = txq->head; + + skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); + if (!skb) + return NETDEV_TX_OK; + wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, @@@ -960,6 -933,7 +960,6 @@@ txq->head = start; dev_kfree_skb_any(skb); } -tx_drop: return NETDEV_TX_OK; }
@@@ -3210,7 -3184,7 +3210,7 @@@ static int be_setup(struct be_adapter * if (status) goto err;
- be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); + be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
if (adapter->vlans_added) be_vid_config(adapter); @@@ -3556,6 -3530,40 +3556,6 @@@ static int be_flash_skyhawk(struct be_a return 0; }
-static int lancer_wait_idle(struct be_adapter *adapter) -{ -#define SLIPORT_IDLE_TIMEOUT 30 - u32 reg_val; - int status = 0, i; - - for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { - reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); - if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) - break; - - ssleep(1); - } - - if (i == SLIPORT_IDLE_TIMEOUT) - status = -1; - - return status; -} - -static int lancer_fw_reset(struct be_adapter *adapter) -{ - int status = 0; - - status = lancer_wait_idle(adapter); - if (status) - return status; - - iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db + - PHYSDEV_CONTROL_OFFSET); - - return status; -} - static int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw) { @@@ -3633,8 -3641,7 +3633,8 @@@ }
if (change_status == LANCER_FW_RESET_NEEDED) { - status = lancer_fw_reset(adapter); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { dev_err(&adapter->pdev->dev, "Adapter busy for FW reset.\n" @@@ -3769,10 -3776,6 +3769,10 @@@ int be_load_fw(struct be_adapter *adapt else status = be_fw_download(adapter, fw);
+ if (!status) + be_cmd_get_fw_ver(adapter, adapter->fw_ver, + adapter->fw_on_flash); + fw_exit: release_firmware(fw); return status; @@@ -4259,6 -4262,9 +4259,9 @@@ static int be_probe(struct pci_dev *pde netdev->features |= NETIF_F_HIGHDMA; } else { status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (!status) + status = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; diff --combined drivers/net/ethernet/renesas/sh_eth.c index a2eadc0,5e3982f..8cb600c --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@@ -313,14 -313,9 +313,14 @@@ static const u16 sh_eth_offset_fast_sh3 [TSU_ADRL31] = 0x01fc, };
-#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ - defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_ARCH_R8A7740) +static int sh_eth_is_gether(struct sh_eth_private *mdp) +{ + if (mdp->reg_offset == sh_eth_offset_gigabit) + return 1; + else + return 0; +} + static void sh_eth_select_mii(struct net_device *ndev) { u32 value = 0x0; @@@ -344,7 -339,11 +344,7 @@@
sh_eth_write(ndev, value, RMII_MII); } -#endif
-/* There is CPU dependent code */ -#if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) -#define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@@ -355,8 -354,7 +355,8 @@@ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); }
-static void sh_eth_set_rate(struct net_device *ndev) +/* There is CPU dependent code */ +static void sh_eth_set_rate_r8a777x(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -373,9 -371,9 +373,9 @@@ }
/* R8A7778/9 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data r8a777x_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_r8a777x,
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, @@@ -391,8 -389,19 +391,8 @@@ .tpauser = 1, .hw_swap = 1, }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7724) -#define SH_ETH_RESET_DEFAULT 1 -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -}
-static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_sh7724(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -409,9 -418,9 +409,9 @@@ }
/* SH7724 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data sh7724_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_sh7724,
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, @@@ -429,8 -438,22 +429,8 @@@ .rpadir = 1, .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7757) -#define SH_ETH_HAS_BOTH_MODULES 1 -#define SH_ETH_HAS_TSU 1 -static int sh_eth_check_reset(struct net_device *ndev); - -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -}
-static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_sh7757(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -447,9 -470,9 +447,9 @@@ }
/* SH7757 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { - .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, +static struct sh_eth_cpu_data sh7757_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7757,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .rmcr_value = 0x00000001, @@@ -459,7 -482,6 +459,7 @@@ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+ .irq_flags = IRQF_SHARED, .apr = 1, .mpr = 1, .tpauser = 1, @@@ -469,7 -491,7 +469,7 @@@ .rpadir_value = 2 << 16, };
-#define SH_GIGA_ETH_BASE 0xfee00000 +#define SH_GIGA_ETH_BASE 0xfee00000UL #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) static void sh_eth_chip_reset_giga(struct net_device *ndev) @@@ -494,6 -516,52 +494,6 @@@ } }
-static int sh_eth_is_gether(struct sh_eth_private *mdp); -static int sh_eth_reset(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - int ret = 0; - - if (sh_eth_is_gether(mdp)) { - sh_eth_write(ndev, 0x03, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, - EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - } else { - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, - EDMR); - mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, - EDMR); - } - -out: - return ret; -} - -static void sh_eth_set_duplex_giga(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - static void sh_eth_set_rate_giga(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@@ -514,9 -582,9 +514,9 @@@ }
/* SH7757(GETHERC) */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { +static struct sh_eth_cpu_data sh7757_data_giga = { .chip_reset = sh_eth_chip_reset_giga, - .set_duplex = sh_eth_set_duplex_giga, + .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_giga,
.ecsr_value = ECSR_ICD | ECSR_MPD, @@@ -532,7 -600,6 +532,7 @@@ .fdr_value = 0x0000072f, .rmcr_value = 0x00000001,
+ .irq_flags = IRQF_SHARED, .apr = 1, .mpr = 1, .tpauser = 1, @@@ -545,6 -612,19 +545,6 @@@ .tsu = 1, };
-static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) -{ - if (sh_eth_is_gether(mdp)) - return &sh_eth_my_cpu_data_giga; - else - return &sh_eth_my_cpu_data; -} - -#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) -#define SH_ETH_HAS_TSU 1 -static int sh_eth_check_reset(struct net_device *ndev); -static void sh_eth_reset_hw_crc(struct net_device *ndev); - static void sh_eth_chip_reset(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@@ -554,7 -634,17 +554,7 @@@ mdelay(1); }
-static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - -static void sh_eth_set_rate(struct net_device *ndev) +static void sh_eth_set_rate_gether(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -573,11 -663,11 +573,11 @@@ } }
-/* sh7763 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +/* SH7734 */ +static struct sh_eth_cpu_data sh7734_data = { .chip_reset = sh_eth_chip_reset, .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_gether,
.ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@@ -598,39 -688,54 +598,39 @@@ .no_trimd = 1, .no_ade = 1, .tsu = 1, -#if defined(CONFIG_CPU_SUBTYPE_SH7734) - .hw_crc = 1, - .select_mii = 1, -#endif + .hw_crc = 1, + .select_mii = 1, };
-static int sh_eth_reset(struct net_device *ndev) -{ - int ret = 0; - - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; +/* SH7763 */ +static struct sh_eth_cpu_data sh7763_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether,
- /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - - /* Reset HW CRC register */ - sh_eth_reset_hw_crc(ndev); - - /* Select MII mode */ - if (sh_eth_my_cpu_data.select_mii) - sh_eth_select_mii(ndev); -out: - return ret; -} + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
-static void sh_eth_reset_hw_crc(struct net_device *ndev) -{ - if (sh_eth_my_cpu_data.hw_crc) - sh_eth_write(ndev, 0x0, CSMR); -} + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, + .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ + EESR_TFE,
-#elif defined(CONFIG_ARCH_R8A7740) -#define SH_ETH_HAS_TSU 1 -static int sh_eth_check_reset(struct net_device *ndev); + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .irq_flags = IRQF_SHARED, +};
-static void sh_eth_chip_reset(struct net_device *ndev) +static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -641,11 -746,65 +641,11 @@@ sh_eth_select_mii(ndev); }
-static int sh_eth_reset(struct net_device *ndev) -{ - int ret = 0; - - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - -out: - return ret; -} - -static void sh_eth_set_duplex(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); -} - -static void sh_eth_set_rate(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - - switch (mdp->speed) { - case 10: /* 10BASE */ - sh_eth_write(ndev, GECMR_10, GECMR); - break; - case 100:/* 100BASE */ - sh_eth_write(ndev, GECMR_100, GECMR); - break; - case 1000: /* 1000BASE */ - sh_eth_write(ndev, GECMR_1000, GECMR); - break; - default: - break; - } -} - /* R8A7740 */ -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { - .chip_reset = sh_eth_chip_reset, +static struct sh_eth_cpu_data r8a7740_data = { + .chip_reset = sh_eth_chip_reset_r8a7740, .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_gether,
.ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@@ -669,7 -828,9 +669,7 @@@ .select_mii = 1, };
-#elif defined(CONFIG_CPU_SUBTYPE_SH7619) -#define SH_ETH_RESET_DEFAULT 1 -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { +static struct sh_eth_cpu_data sh7619_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.apr = 1, @@@ -677,11 -838,14 +677,11 @@@ .tpauser = 1, .hw_swap = 1, }; -#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) -#define SH_ETH_RESET_DEFAULT 1 -#define SH_ETH_HAS_TSU 1 -static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + +static struct sh_eth_cpu_data sh771x_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tsu = 1, }; -#endif
static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) { @@@ -711,6 -875,17 +711,6 @@@ cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; }
-#if defined(SH_ETH_RESET_DEFAULT) -/* Chip Reset */ -static int sh_eth_reset(struct net_device *ndev) -{ - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); - mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); - - return 0; -} -#else static int sh_eth_check_reset(struct net_device *ndev) { int ret = 0; @@@ -722,55 -897,13 +722,55 @@@ mdelay(1); cnt--; } - if (cnt < 0) { - pr_err("Device reset fail\n"); + if (cnt <= 0) { + pr_err("Device reset failed\n"); ret = -ETIMEDOUT; } return ret; } -#endif + +static int sh_eth_reset(struct net_device *ndev) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret = 0; + + if (sh_eth_is_gether(mdp)) { + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, + EDMR); + + ret = sh_eth_check_reset(ndev); + if (ret) + goto out; + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); + + /* Reset HW CRC register */ + if (mdp->cd->hw_crc) + sh_eth_write(ndev, 0x0, CSMR); + + /* Select MII mode */ + if (mdp->cd->select_mii) + sh_eth_select_mii(ndev); + } else { + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, + EDMR); + mdelay(3); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, + EDMR); + } + +out: + return ret; +}
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) static void sh_eth_set_receive_align(struct sk_buff *skb) @@@ -846,6 -979,14 +846,6 @@@ static void read_mac_address(struct net } }
-static int sh_eth_is_gether(struct sh_eth_private *mdp) -{ - if (mdp->reg_offset == sh_eth_offset_gigabit) - return 1; - else - return 0; -} - static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) { if (sh_eth_is_gether(mdp)) @@@ -1260,16 -1401,23 +1260,23 @@@ static int sh_eth_rx(struct net_device desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length;
- #if defined(CONFIG_ARCH_R8A7740) - desc_status >>= 16; - #endif - if (--boguscnt < 0) break;
if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++;
+ #if defined(CONFIG_ARCH_R8A7740) + /* + * In case of almost all GETHER/ETHERs, the Receive Frame State + * (RFS) bits in the Receive Descriptor 0 are from bit 9 to + * bit 0. However, in case of the R8A7740's GETHER, the RFS + * bits are from bit 25 to bit 16. So, the driver needs right + * shifting by 16. + */ + desc_status >>= 16; + #endif + if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { ndev->stats.rx_errors++; @@@ -1820,7 -1968,14 +1827,7 @@@ static int sh_eth_open(struct net_devic pm_runtime_get_sync(&mdp->pdev->dev);
ret = request_irq(ndev->irq, sh_eth_interrupt, -#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7764) || \ - defined(CONFIG_CPU_SUBTYPE_SH7757) - IRQF_SHARED, -#else - 0, -#endif - ndev->name, ndev); + mdp->cd->irq_flags, ndev->name, ndev); if (ret) { dev_err(&ndev->dev, "Can not assign IRQ number\n"); return ret; @@@ -2006,6 -2161,7 +2013,6 @@@ static int sh_eth_do_ioctl(struct net_d return phy_mii_ioctl(phydev, rq, cmd); }
-#if defined(SH_ETH_HAS_TSU) /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, int entry) @@@ -2348,6 -2504,7 +2355,6 @@@ static int sh_eth_vlan_rx_kill_vid(stru
return 0; } -#endif /* SH_ETH_HAS_TSU */
/* SuperH's TSU register init function */ static void sh_eth_tsu_init(struct sh_eth_private *mdp) @@@ -2491,21 -2648,11 +2498,21 @@@ static const struct net_device_ops sh_e .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, .ndo_get_stats = sh_eth_get_stats, -#if defined(SH_ETH_HAS_TSU) + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static const struct net_device_ops sh_eth_netdev_ops_tsu = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, .ndo_set_rx_mode = sh_eth_set_multicast_list, .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, -#endif .ndo_tx_timeout = sh_eth_tx_timeout, .ndo_do_ioctl = sh_eth_do_ioctl, .ndo_validate_addr = eth_validate_addr, @@@ -2520,7 -2667,6 +2527,7 @@@ static int sh_eth_drv_probe(struct plat struct net_device *ndev = NULL; struct sh_eth_private *mdp = NULL; struct sh_eth_plat_data *pd = pdev->dev.platform_data; + const struct platform_device_id *id = platform_get_device_id(pdev);
/* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@@ -2579,14 -2725,15 +2586,14 @@@ mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
/* set cpu data */ -#if defined(SH_ETH_HAS_BOTH_MODULES) - mdp->cd = sh_eth_get_cpu_data(mdp); -#else - mdp->cd = &sh_eth_my_cpu_data; -#endif + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; sh_eth_set_default_cpu_data(mdp->cd);
/* set function */ - ndev->netdev_ops = &sh_eth_netdev_ops; + if (mdp->cd->tsu) + ndev->netdev_ops = &sh_eth_netdev_ops_tsu; + else + ndev->netdev_ops = &sh_eth_netdev_ops; SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); ndev->watchdog_timeo = TX_TIMEOUT;
@@@ -2663,11 -2810,11 +2670,11 @@@ static int sh_eth_drv_remove(struct pla unregister_netdev(ndev); pm_runtime_disable(&pdev->dev); free_netdev(ndev); - platform_set_drvdata(pdev, NULL);
return 0; }
+#ifdef CONFIG_PM static int sh_eth_runtime_nop(struct device *dev) { /* @@@ -2681,36 -2828,17 +2688,36 @@@ return 0; }
-static struct dev_pm_ops sh_eth_dev_pm_ops = { +static const struct dev_pm_ops sh_eth_dev_pm_ops = { .runtime_suspend = sh_eth_runtime_nop, .runtime_resume = sh_eth_runtime_nop, }; +#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) +#else +#define SH_ETH_PM_OPS NULL +#endif + +static struct platform_device_id sh_eth_id_table[] = { + { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, + { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, + { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, + { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, + { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, + { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, + { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, + { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
static struct platform_driver sh_eth_driver = { .probe = sh_eth_drv_probe, .remove = sh_eth_drv_remove, + .id_table = sh_eth_id_table, .driver = { .name = CARDNAME, - .pm = &sh_eth_dev_pm_ops, + .pm = SH_ETH_PM_OPS, }, };
diff --combined drivers/net/ethernet/ti/cpsw.c index a45f64e,2fd69db..101b037 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@@ -1554,8 -1554,6 +1554,8 @@@ static int cpsw_probe_dt(struct cpsw_pl if (mac_addr) memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+ slave_data->phy_if = of_get_phy_mode(slave_node); + if (data->dual_emac) { if (of_property_read_u32(slave_node, "dual_emac_res_vlan", &prop)) { @@@ -1681,7 -1679,7 +1681,7 @@@ static int cpsw_probe(struct platform_d priv->rx_packet_max = max(rx_packet_max, 128); priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); priv->irq_enabled = true; - if (!ndev) { + if (!priv->cpts) { pr_err("error allocating cpts\n"); goto clean_ndev_ret; } @@@ -1942,6 -1940,7 +1942,6 @@@ static int cpsw_remove(struct platform_ struct cpsw_priv *priv = netdev_priv(ndev); int i;
- platform_set_drvdata(pdev, NULL); if (priv->data.dual_emac) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); unregister_netdev(ndev); diff --combined drivers/net/macvlan.c index edfddc5,6e91931..d811b06 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@@ -853,18 -853,24 +853,24 @@@ static int macvlan_changelink(struct ne struct nlattr *tb[], struct nlattr *data[]) { struct macvlan_dev *vlan = netdev_priv(dev); - if (data && data[IFLA_MACVLAN_MODE]) - vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); + if (data && data[IFLA_MACVLAN_FLAGS]) { __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; - - if (promisc && (flags & MACVLAN_FLAG_NOPROMISC)) - dev_set_promiscuity(vlan->lowerdev, -1); - else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC)) - dev_set_promiscuity(vlan->lowerdev, 1); + if (vlan->port->passthru && promisc) { + int err; + + if (flags & MACVLAN_FLAG_NOPROMISC) + err = dev_set_promiscuity(vlan->lowerdev, -1); + else + err = dev_set_promiscuity(vlan->lowerdev, 1); + if (err < 0) + return err; + } vlan->flags = flags; } + if (data && data[IFLA_MACVLAN_MODE]) + vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); return 0; }
@@@ -921,7 -927,7 +927,7 @@@ static struct rtnl_link_ops macvlan_lin static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvlan_dev *vlan, *next; struct macvlan_port *port; LIST_HEAD(list_kill); diff --combined drivers/net/team/team.c index e46fef3,b305105..bff7e0b --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -525,26 -525,31 +525,26 @@@ static void team_set_no_mode(struct tea team->mode = &__team_no_mode; }
-static void __team_adjust_ops(struct team *team, int en_port_count) +static void team_adjust_ops(struct team *team) { /* * To avoid checks in rx/tx skb paths, ensure here that non-null and * correct ops are always set. */
- if (!en_port_count || !team_is_mode_set(team) || + if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->transmit) team->ops.transmit = team_dummy_transmit; else team->ops.transmit = team->mode->ops->transmit;
- if (!en_port_count || !team_is_mode_set(team) || + if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->receive) team->ops.receive = team_dummy_receive; else team->ops.receive = team->mode->ops->receive; }
-static void team_adjust_ops(struct team *team) -{ - __team_adjust_ops(team, team->en_port_count); -} - /* * We can benefit from the fact that it's ensured no port is present * at the time of mode change. Therefore no packets are in fly so there's no @@@ -720,9 -725,9 +720,9 @@@ static bool team_queue_override_transmi static void __team_queue_override_port_del(struct team *team, struct team_port *port) { + if (!port->queue_id) + return; list_del_rcu(&port->qom_list); - synchronize_rcu(); - INIT_LIST_HEAD(&port->qom_list); }
static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, @@@ -744,8 -749,9 +744,8 @@@ static void __team_queue_override_port_ struct list_head *qom_list; struct list_head *node;
- if (!port->queue_id || !team_port_enabled(port)) + if (!port->queue_id) return; - qom_list = __team_get_qom_list(team, port->queue_id); node = qom_list; list_for_each_entry(cur, qom_list, qom_list) { @@@ -762,7 -768,7 +762,7 @@@ static void __team_queue_override_enabl bool enabled = false;
list_for_each_entry(port, &team->port_list, list) { - if (!list_empty(&port->qom_list)) { + if (port->queue_id) { enabled = true; break; } @@@ -774,44 -780,14 +774,44 @@@ team->queue_override_enabled = enabled; }
-static void team_queue_override_port_refresh(struct team *team, - struct team_port *port) +static void team_queue_override_port_prio_changed(struct team *team, + struct team_port *port) { + if (!port->queue_id || team_port_enabled(port)) + return; __team_queue_override_port_del(team, port); __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); }
+static void team_queue_override_port_change_queue_id(struct team *team, + struct team_port *port, + u16 new_queue_id) +{ + if (team_port_enabled(port)) { + __team_queue_override_port_del(team, port); + port->queue_id = new_queue_id; + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); + } else { + port->queue_id = new_queue_id; + } +} + +static void team_queue_override_port_add(struct team *team, + struct team_port *port) +{ + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); +} + +static void team_queue_override_port_del(struct team *team, + struct team_port *port) +{ + __team_queue_override_port_del(team, port); + __team_queue_override_enabled_check(team); +} +
/**************** * Port handling @@@ -843,7 -819,7 +843,7 @@@ static void team_port_enable(struct tea hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); - team_queue_override_port_refresh(team, port); + team_queue_override_port_add(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); } @@@ -872,9 -848,14 +872,9 @@@ static void team_port_disable(struct te hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; - team_queue_override_port_refresh(team, port); - __team_adjust_ops(team, team->en_port_count - 1); - /* - * Wait until readers see adjusted ops. This ensures that - * readers never see team->en_port_count == 0 - */ - synchronize_rcu(); team->en_port_count--; + team_queue_override_port_del(team, port); + team_adjust_ops(team); }
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ @@@ -1111,8 -1092,8 +1111,8 @@@ static int team_port_add(struct team *t }
port->index = -1; - team_port_enable(team, port); list_add_tail_rcu(&port->list, &team->port_list); + team_port_enable(team, port); __team_compute_features(team); __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); __team_options_change_check(team); @@@ -1182,7 -1163,8 +1182,7 @@@ static int team_port_del(struct team *t
team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); - synchronize_rcu(); - kfree(port); + kfree_rcu(port, rcu); netdev_info(dev, "Port device %s removed\n", portname); __team_compute_features(team);
@@@ -1277,12 -1259,9 +1277,12 @@@ static int team_priority_option_set(str struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; + s32 priority = ctx->data.s32_val;
- port->priority = ctx->data.s32_val; - team_queue_override_port_refresh(team, port); + if (port->priority == priority) + return 0; + port->priority = priority; + team_queue_override_port_prio_changed(team, port); return 0; }
@@@ -1299,16 -1278,17 +1299,16 @@@ static int team_queue_id_option_set(str struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; + u16 new_queue_id = ctx->data.u32_val;
- if (port->queue_id == ctx->data.u32_val) + if (port->queue_id == new_queue_id) return 0; - if (ctx->data.u32_val >= team->dev->real_num_tx_queues) + if (new_queue_id >= team->dev->real_num_tx_queues) return -EINVAL; - port->queue_id = ctx->data.u32_val; - team_queue_override_port_refresh(team, port); + team_queue_override_port_change_queue_id(team, port, new_queue_id); return 0; }
- static const struct team_option team_options[] = { { .name = "mode", @@@ -2668,7 -2648,7 +2668,7 @@@ static void team_port_change_check(stru static int team_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *) ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct team_port *port;
port = team_port_get_rtnl(dev); diff --combined drivers/net/team/team_mode_roundrobin.c index 90311c73,472623f..5366585 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@@ -30,9 -30,10 +30,11 @@@ static bool rr_transmit(struct team *te struct team_port *port; int port_index;
- port_index = rr_priv(team)->sent_packets++ % team->en_port_count; + port_index = team_num_to_port_index(team, + rr_priv(team)->sent_packets++); port = team_get_port_by_index_rcu(team, port_index); + if (unlikely(!port)) + goto drop; port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) goto drop; diff --combined drivers/net/tun.c index a344270,bfa9bb4..cea2fe4 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -352,7 -352,7 +352,7 @@@ static u16 tun_select_queue(struct net_ u32 numqueues = 0;
rcu_read_lock(); - numqueues = tun->numqueues; + numqueues = ACCESS_ONCE(tun->numqueues);
txq = skb_get_rxhash(skb); if (txq) { @@@ -841,7 -841,7 +841,7 @@@ static const struct net_device_ops tap_ #endif };
-static int tun_flow_init(struct tun_struct *tun) +static void tun_flow_init(struct tun_struct *tun) { int i;
@@@ -852,6 -852,8 +852,6 @@@ setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun); mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + tun->ageing_time)); - - return 0; }
static void tun_flow_uninit(struct tun_struct *tun) @@@ -1528,9 -1530,6 +1528,9 @@@ static int tun_flags(struct tun_struct if (tun->flags & TUN_TAP_MQ) flags |= IFF_MULTI_QUEUE;
+ if (tun->flags & TUN_PERSIST) + flags |= IFF_PERSIST; + return flags; }
@@@ -1660,7 -1659,10 +1660,7 @@@ static int tun_set_iff(struct net *net goto err_free_dev;
tun_net_init(dev); - - err = tun_flow_init(tun); - if (err < 0) - goto err_free_dev; + tun_flow_init(tun);
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES; @@@ -2157,6 -2159,8 +2157,8 @@@ static int tun_chr_open(struct inode *i set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); INIT_LIST_HEAD(&tfile->next);
+ sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); + return 0; }
diff --combined drivers/net/vxlan.c index 8111565,57325f3..f6dce13 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@@ -44,8 -44,6 +44,8 @@@
#define VXLAN_VERSION "0.1"
+#define PORT_HASH_BITS 8 +#define PORT_HASH_SIZE (1<<PORT_HASH_BITS) #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) #define FDB_HASH_BITS 8 @@@ -78,25 -76,15 +78,25 @@@ static bool log_ecn_error = true module_param(log_ecn_error, bool, 0644); MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
-/* per-net private data for this module */ static unsigned int vxlan_net_id; -struct vxlan_net { - struct socket *sock; /* UDP encap socket */ + +/* per UDP socket information */ +struct vxlan_sock { + struct hlist_node hlist; + struct rcu_head rcu; + struct work_struct del_work; + unsigned int refcnt; + struct socket *sock; struct hlist_head vni_list[VNI_HASH_SIZE]; };
+/* per-network namespace private data for this module */ +struct vxlan_net { + struct list_head vxlan_list; + struct hlist_head sock_list[PORT_HASH_SIZE]; +}; + struct vxlan_rdst { - struct rcu_head rcu; __be32 remote_ip; __be16 remote_port; u32 remote_vni; @@@ -118,9 -106,7 +118,9 @@@ struct vxlan_fdb
/* Pseudo network device */ struct vxlan_dev { - struct hlist_node hlist; + struct hlist_node hlist; /* vni hash table */ + struct list_head next; /* vxlan's per namespace list */ + struct vxlan_sock *vn_sock; /* listening socket */ struct net_device *dev; struct vxlan_rdst default_dst; /* default destination */ __be32 saddr; /* source address */ @@@ -149,43 -135,19 +149,43 @@@ /* salt for hash table */ static u32 vxlan_salt __read_mostly;
-static inline struct hlist_head *vni_head(struct net *net, u32 id) +/* Virtual Network hash table head */ +static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id) +{ + return &vs->vni_list[hash_32(id, VNI_HASH_BITS)]; +} + +/* Socket hash table head */ +static inline struct hlist_head *vs_head(struct net *net, __be16 port) { struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- return &vn->vni_list[hash_32(id, VNI_HASH_BITS)]; + return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)]; +} + +/* Find VXLAN socket based on network namespace and UDP port */ +static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port) +{ + struct vxlan_sock *vs; + + hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { + if (inet_sk(vs->sock->sk)->inet_sport == port) + return vs; + } + return NULL; }
/* Look up VNI in a per net namespace table */ -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id) +static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) { + struct vxlan_sock *vs; struct vxlan_dev *vxlan;
- hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) { + vs = vxlan_find_port(net, port); + if (!vs) + return NULL; + + hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) { if (vxlan->default_dst.remote_vni == id) return vxlan; } @@@ -603,18 -565,22 +603,22 @@@ skip
/* Watch incoming packets to learn mapping between Ethernet address * and Tunnel endpoint. + * Return true if packet is bogus and should be droppped. */ - static void vxlan_snoop(struct net_device *dev, + static bool vxlan_snoop(struct net_device *dev, __be32 src_ip, const u8 *src_mac) { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_fdb *f; - int err;
f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { if (likely(f->remote.remote_ip == src_ip)) - return; + return false; + + /* Don't migrate static entries, drop packets */ + if (f->state & NUD_NOARP) + return true;
if (net_ratelimit()) netdev_info(dev, @@@ -626,14 -592,19 +630,19 @@@ } else { /* learned new entry */ spin_lock(&vxlan->hash_lock); - err = vxlan_fdb_create(vxlan, src_mac, src_ip, - NUD_REACHABLE, - NLM_F_EXCL|NLM_F_CREATE, - vxlan->dst_port, - vxlan->default_dst.remote_vni, - 0, NTF_SELF); + + /* close off race between vxlan_flush and incoming packets */ + if (netif_running(dev)) + vxlan_fdb_create(vxlan, src_mac, src_ip, + NUD_REACHABLE, + NLM_F_EXCL|NLM_F_CREATE, + vxlan->dst_port, + vxlan->default_dst.remote_vni, + 0, NTF_SELF); spin_unlock(&vxlan->hash_lock); } + + return false; }
@@@ -641,18 -612,20 +650,18 @@@ static bool vxlan_group_used(struct vxlan_net *vn, const struct vxlan_dev *this) { - const struct vxlan_dev *vxlan; - unsigned h; + struct vxlan_dev *vxlan;
- for (h = 0; h < VNI_HASH_SIZE; ++h) - hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) { - if (vxlan == this) - continue; + list_for_each_entry(vxlan, &vn->vxlan_list, next) { + if (vxlan == this) + continue;
- if (!netif_running(vxlan->dev)) - continue; + if (!netif_running(vxlan->dev)) + continue;
- if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip) - return true; - } + if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip) + return true; + }
return false; } @@@ -662,7 -635,7 +671,7 @@@ static int vxlan_join_group(struct net_ { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); - struct sock *sk = vn->sock->sk; + struct sock *sk = vxlan->vn_sock->sock->sk; struct ip_mreqn mreq = { .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, .imr_ifindex = vxlan->default_dst.remote_ifindex, @@@ -690,7 -663,7 +699,7 @@@ static int vxlan_leave_group(struct net struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); int err = 0; - struct sock *sk = vn->sock->sk; + struct sock *sk = vxlan->vn_sock->sock->sk; struct ip_mreqn mreq = { .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip, .imr_ifindex = vxlan->default_dst.remote_ifindex, @@@ -717,7 -690,6 +726,7 @@@ static int vxlan_udp_encap_recv(struct struct vxlanhdr *vxh; struct vxlan_dev *vxlan; struct pcpu_tstats *stats; + __be16 port; __u32 vni; int err;
@@@ -741,11 -713,9 +750,11 @@@
/* Is this VNI defined? */ vni = ntohl(vxh->vx_vni) >> 8; - vxlan = vxlan_find_vni(sock_net(sk), vni); + port = inet_sk(sk)->inet_sport; + vxlan = vxlan_find_vni(sock_net(sk), vni, port); if (!vxlan) { - netdev_dbg(skb->dev, "unknown vni %d\n", vni); + netdev_dbg(skb->dev, "unknown vni %d port %u\n", + vni, ntohs(port)); goto drop; }
@@@ -766,8 -736,9 +775,9 @@@ vxlan->dev->dev_addr) == 0) goto drop;
- if (vxlan->flags & VXLAN_F_LEARN) - vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source); + if ((vxlan->flags & VXLAN_F_LEARN) && + vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source)) + goto drop;
__skb_tunnel_rx(skb, vxlan->dev); skb_reset_network_header(skb); @@@ -925,7 -896,7 +935,7 @@@ static bool route_shortcircuit(struct n return false; }
-static void vxlan_sock_free(struct sk_buff *skb) +static void vxlan_sock_put(struct sk_buff *skb) { sock_put(skb->sk); } @@@ -933,13 -904,13 +943,13 @@@ /* On transmit, associate with the tunnel socket */ static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb) { - struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); - struct sock *sk = vn->sock->sk; + struct vxlan_dev *vxlan = netdev_priv(dev); + struct sock *sk = vxlan->vn_sock->sock->sk;
skb_orphan(skb); sock_hold(sk); skb->sk = sk; - skb->destructor = vxlan_sock_free; + skb->destructor = vxlan_sock_put; }
/* Compute source port for outgoing packet @@@ -1081,7 -1052,7 +1091,7 @@@ static netdev_tx_t vxlan_xmit_one(struc struct vxlan_dev *dst_vxlan;
ip_rt_put(rt); - dst_vxlan = vxlan_find_vni(dev_net(dev), vni); + dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); if (!dst_vxlan) goto tx_error; vxlan_encap_bypass(skb, vxlan, dst_vxlan); @@@ -1190,9 -1161,11 +1200,11 @@@ static netdev_tx_t vxlan_xmit(struct sk struct sk_buff *skb1;
skb1 = skb_clone(skb, GFP_ATOMIC); - rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); - if (rc == NETDEV_TX_OK) - rc = rc1; + if (skb1) { + rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc); + if (rc == NETDEV_TX_OK) + rc = rc1; + } }
rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc); @@@ -1269,7 -1242,7 +1281,7 @@@ static int vxlan_open(struct net_devic /* Purge the forwarding table */ static void vxlan_flush(struct vxlan_dev *vxlan) { - unsigned h; + unsigned int h;
spin_lock_bh(&vxlan->hash_lock); for (h = 0; h < FDB_HASH_SIZE; ++h) { @@@ -1333,7 -1306,7 +1345,7 @@@ static void vxlan_free(struct net_devic static void vxlan_setup(struct net_device *dev) { struct vxlan_dev *vxlan = netdev_priv(dev); - unsigned h; + unsigned int h; int low, high;
eth_hw_addr_random(dev); @@@ -1356,7 -1329,6 +1368,7 @@@ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ INIT_LIST_HEAD(&vxlan->next); spin_lock_init(&vxlan->hash_lock);
init_timer_deferrable(&vxlan->age_timer); @@@ -1441,78 -1413,11 +1453,78 @@@ static const struct ethtool_ops vxlan_e .get_link = ethtool_op_get_link, };
+static void vxlan_del_work(struct work_struct *work) +{ + struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work); + + sk_release_kernel(vs->sock->sk); + kfree_rcu(vs, rcu); +} + +/* Create new listen socket if needed */ +static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port) +{ + struct vxlan_sock *vs; + struct sock *sk; + struct sockaddr_in vxlan_addr = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_ANY), + }; + int rc; + unsigned int h; + + vs = kmalloc(sizeof(*vs), GFP_KERNEL); + if (!vs) + return ERR_PTR(-ENOMEM); + + for (h = 0; h < VNI_HASH_SIZE; ++h) + INIT_HLIST_HEAD(&vs->vni_list[h]); + + INIT_WORK(&vs->del_work, vxlan_del_work); + + /* Create UDP socket for encapsulation receive. */ + rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock); + if (rc < 0) { + pr_debug("UDP socket create failed\n"); + kfree(vs); + return ERR_PTR(rc); + } + + /* Put in proper namespace */ + sk = vs->sock->sk; + sk_change_net(sk, net); + + vxlan_addr.sin_port = port; + + rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr, + sizeof(vxlan_addr)); + if (rc < 0) { + pr_debug("bind for UDP socket %pI4:%u (%d)\n", + &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc); + sk_release_kernel(sk); + kfree(vs); + return ERR_PTR(rc); + } + + /* Disable multicast loopback */ + inet_sk(sk)->mc_loop = 0; + + /* Mark socket as an encapsulation socket. */ + udp_sk(sk)->encap_type = 1; + udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; + udp_encap_enable(); + + vs->refcnt = 1; + return vs; +} + static int vxlan_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { + struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; + struct vxlan_sock *vs; __u32 vni; int err;
@@@ -1520,6 -1425,10 +1532,6 @@@ return -EINVAL;
vni = nla_get_u32(data[IFLA_VXLAN_ID]); - if (vxlan_find_vni(net, vni)) { - pr_info("duplicate VNI %u\n", vni); - return -EEXIST; - } dst->remote_vni = vni;
if (data[IFLA_VXLAN_GROUP]) @@@ -1585,58 -1494,22 +1597,58 @@@ if (data[IFLA_VXLAN_PORT]) vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+ if (vxlan_find_vni(net, vni, vxlan->dst_port)) { + pr_info("duplicate VNI %u\n", vni); + return -EEXIST; + } + + vs = vxlan_find_port(net, vxlan->dst_port); + if (vs) + ++vs->refcnt; + else { + /* Drop lock because socket create acquires RTNL lock */ + rtnl_unlock(); + vs = vxlan_socket_create(net, vxlan->dst_port); + rtnl_lock(); + if (IS_ERR(vs)) + return PTR_ERR(vs); + + hlist_add_head_rcu(&vs->hlist, vs_head(net, vxlan->dst_port)); + } + vxlan->vn_sock = vs; + SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
err = register_netdevice(dev); - if (!err) - hlist_add_head_rcu(&vxlan->hlist, vni_head(net, dst->remote_vni)); + if (err) { + if (--vs->refcnt == 0) { + rtnl_unlock(); + sk_release_kernel(vs->sock->sk); + kfree(vs); + rtnl_lock(); + } + return err; + }
- return err; + list_add(&vxlan->next, &vn->vxlan_list); + hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); + + return 0; }
static void vxlan_dellink(struct net_device *dev, struct list_head *head) { struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_sock *vs = vxlan->vn_sock;
hlist_del_rcu(&vxlan->hlist); - + list_del(&vxlan->next); unregister_netdevice_queue(dev, head); + + if (--vs->refcnt == 0) { + hlist_del_rcu(&vs->hlist); + schedule_work(&vs->del_work); + } }
static size_t vxlan_get_size(const struct net_device *dev) @@@ -1722,12 -1595,46 +1734,12 @@@ static struct rtnl_link_ops vxlan_link_ static __net_init int vxlan_init_net(struct net *net) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); - struct sock *sk; - struct sockaddr_in vxlan_addr = { - .sin_family = AF_INET, - .sin_addr.s_addr = htonl(INADDR_ANY), - }; - int rc; - unsigned h; - - /* Create UDP socket for encapsulation receive. */ - rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock); - if (rc < 0) { - pr_debug("UDP socket create failed\n"); - return rc; - } - /* Put in proper namespace */ - sk = vn->sock->sk; - sk_change_net(sk, net); - - vxlan_addr.sin_port = htons(vxlan_port); - - rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr, - sizeof(vxlan_addr)); - if (rc < 0) { - pr_debug("bind for UDP socket %pI4:%u (%d)\n", - &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc); - sk_release_kernel(sk); - vn->sock = NULL; - return rc; - } + unsigned int h;
- /* Disable multicast loopback */ - inet_sk(sk)->mc_loop = 0; + INIT_LIST_HEAD(&vn->vxlan_list);
- /* Mark socket as an encapsulation socket. */ - udp_sk(sk)->encap_type = 1; - udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv; - udp_encap_enable(); - - for (h = 0; h < VNI_HASH_SIZE; ++h) - INIT_HLIST_HEAD(&vn->vni_list[h]); + for (h = 0; h < PORT_HASH_SIZE; ++h) + INIT_HLIST_HEAD(&vn->sock_list[h]);
return 0; } @@@ -1736,11 -1643,18 +1748,11 @@@ static __net_exit void vxlan_exit_net(s { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan; - unsigned h;
rtnl_lock(); - for (h = 0; h < VNI_HASH_SIZE; ++h) - hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) - dev_close(vxlan->dev); + list_for_each_entry(vxlan, &vn->vxlan_list, next) + dev_close(vxlan->dev); rtnl_unlock(); - - if (vn->sock) { - sk_release_kernel(vn->sock->sk); - vn->sock = NULL; - } }
static struct pernet_operations vxlan_net_ops = { @@@ -1771,7 -1685,7 +1783,7 @@@ out2 out1: return rc; } -module_init(vxlan_init_module); +late_initcall(vxlan_init_module);
static void __exit vxlan_cleanup_module(void) { diff --combined drivers/net/wireless/ath/ath9k/Kconfig index 3b07851,3c2cbc9..760ab3f --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@@ -84,13 -84,25 +84,17 @@@ config ATH9K_DFS_CERTIFIE developed. At this point enabling this option won't do anything except increase code size.
- config ATH9K_RATE_CONTROL -config ATH9K_MAC_DEBUG - bool "Atheros MAC statistics" - depends on ATH9K_DEBUGFS - default y - ---help--- - This option enables collection of statistics for Rx/Tx status - data and some other MAC related statistics - + config ATH9K_LEGACY_RATE_CONTROL bool "Atheros ath9k rate control" depends on ATH9K - default y + default n ---help--- Say Y, if you want to use the ath9k specific rate control - module instead of minstrel_ht. + module instead of minstrel_ht. Be warned that there are various + issues with the ath9k RC and minstrel is a more robust algorithm. + Note that even if this option is selected, "ath9k_rate_control" + has to be passed to mac80211 using the module parameter, + ieee80211_default_rc_algo.
config ATH9K_HTC tristate "Atheros HTC based wireless cards support" diff --combined drivers/net/wireless/ath/ath9k/init.c index daba841,2ba4945..389ee1b --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@@ -21,7 -21,6 +21,7 @@@ #include <linux/ath9k_platform.h> #include <linux/module.h> #include <linux/relay.h> +#include <net/ieee80211_radiotap.h>
#include "ath9k.h"
@@@ -614,6 -613,9 +614,6 @@@ static int ath9k_init_softc(u16 devid, spin_lock_init(&sc->sc_serial_rw); spin_lock_init(&sc->sc_pm_lock); mutex_init(&sc->mutex); -#ifdef CONFIG_ATH9K_MAC_DEBUG - spin_lock_init(&sc->debug.samp_lock); -#endif tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, (unsigned long)sc); @@@ -767,19 -769,12 +767,19 @@@ void ath9k_set_hw_capab(struct ath_soft IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_RC_TABLE;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) - hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { + hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + + if (AR_SREV_9280_20_OR_LATER(ah)) + hw->radiotap_mcs_details |= + IEEE80211_RADIOTAP_MCS_HAVE_STBC; + }
if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@@ -792,25 -787,28 +792,24 @@@ hw->wiphy->iface_combinations = if_comb; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
- if (AR_SREV_5416(sc->sc_ah)) - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
#ifdef CONFIG_PM_SLEEP - if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && device_can_wakeup(sc->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT; hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN; hw->wiphy->wowlan.pattern_min_len = 1; hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE; - }
atomic_set(&sc->wow_sleep_proc_intr, -1); atomic_set(&sc->wow_got_bmiss_intr, -1); - #endif
hw->queues = 4; @@@ -831,10 -829,6 +830,6 @@@ sc->ant_rx = hw->wiphy->available_antennas_rx; sc->ant_tx = hw->wiphy->available_antennas_tx;
- #ifdef CONFIG_ATH9K_RATE_CONTROL - hw->rate_control_algorithm = "ath9k_rate_control"; - #endif - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ]; diff --combined drivers/net/wireless/iwlwifi/dvm/rs.c index 94314a8,10fbb17..8fe76dc --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@@ -1088,7 -1088,7 +1088,7 @@@ done (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) rs_program_fix_rate(priv, lq_sta); #endif - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) + if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) rs_bt_update_lq(priv, ctx, lq_sta); }
@@@ -2799,7 -2799,7 +2799,7 @@@ static void rs_get_rate(void *priv_r, s info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - + info->control.rates[0].count = 1; }
static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, @@@ -3064,11 -3064,11 +3064,11 @@@ static void rs_fill_link_cmd(struct iwl * overwrite if needed, pass aggregation time limit * to uCode in uSec */ - if (priv && priv->cfg->bt_params && - priv->cfg->bt_params->agg_time_limit && + if (priv && priv->lib->bt_params && + priv->lib->bt_params->agg_time_limit && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(priv->cfg->bt_params->agg_time_limit); + cpu_to_le16(priv->lib->bt_params->agg_time_limit); }
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) diff --combined drivers/net/wireless/iwlwifi/iwl-drv.c index 4f88613,40fed1f..2f690e5 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@@ -1000,10 -1000,12 +1000,12 @@@ static void iwl_req_fw_callback(const s */ if (load_module) { err = request_module("%s", op->name); + #ifdef CONFIG_IWLWIFI_OPMODE_MODULAR if (err) IWL_ERR(drv, "failed to load module %s (error %d), is dynamic loading enabled?\n", op->name, err); + #endif } return;
@@@ -1234,9 -1236,6 +1236,9 @@@ MODULE_PARM_DESC(wd_disable "Disable stuck queue watchdog timer 0=system default, " "1=disable, 2=enable (default: 0)");
+module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); +MODULE_PARM_DESC(nvm_file, "NVM file name"); + /* * set bt_coex_active to true, uCode will do kill/defer * every time the priority line is asserted (BT is sending signals on the diff --combined drivers/net/wireless/iwlwifi/mvm/rs.c index d6beec7,b99fe31..31587a3 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@@ -401,17 -401,6 +401,17 @@@ static int rs_tl_turn_on_agg_for_tid(st
load = rs_tl_get_load(lq_data, tid);
+ /* + * Don't create TX aggregation sessions when in high + * BT traffic, as they would just be disrupted by BT. + */ + if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) { + IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n", + BT_MBOX_MSG(&mvm->last_bt_notif, + 3, TRAFFIC_LOAD)); + return ret; + } + if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); @@@ -1530,29 -1519,6 +1530,29 @@@ static int rs_move_siso_to_other(struc u8 update_search_tbl_counter = 0; int ret;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) + tbl->action = IWL_SISO_SWITCH_MIMO2_AB; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + valid_tx_ant = + first_antenna(iwl_fw_valid_tx_ant(mvm->fw)); + if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@@ -1566,9 -1532,7 +1566,9 @@@ tx_chains_num <= 2)) break;
- if (window->success_ratio >= IWL_RS_GOOD_RATIO) + if (window->success_ratio >= IWL_RS_GOOD_RATIO && + BT_MBOX_MSG(&mvm->last_bt_notif, 3, + TRAFFIC_LOAD) == 0) break;
memcpy(search_tbl, tbl, sz); @@@ -1690,28 -1654,6 +1690,28 @@@ static int rs_move_mimo2_to_other(struc u8 update_search_tbl_counter = 0; int ret;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + if (tbl->action != IWL_MIMO2_SWITCH_SISO_A) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || + tbl->action == IWL_MIMO2_SWITCH_SISO_C) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@@ -1849,28 -1791,6 +1849,28 @@@ static int rs_move_mimo3_to_other(struc int ret; u8 update_search_tbl_counter = 0;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + if (tbl->action != IWL_MIMO3_SWITCH_SISO_A) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_MIMO3_SWITCH_SISO_B || + tbl->action == IWL_MIMO3_SWITCH_SISO_C) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@@ -2382,32 -2302,6 +2382,32 @@@ static void rs_rate_scale_perform(struc (current_tpt > (100 * tbl->expected_tpt[low])))) scale_action = 0;
+ if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= + IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + if (lq_sta->last_bt_traffic > + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + /* + * don't set scale_action, don't want to scale up if + * the rate scale doesn't otherwise think that is a + * good idea. + */ + } else if (lq_sta->last_bt_traffic <= + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + scale_action = -1; + } + } + lq_sta->last_bt_traffic = + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD); + + if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= + IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + /* search for a new modulation */ + rs_stay_in_table(lq_sta, true); + goto lq_update; + } + switch (scale_action) { case -1: /* Decrease starting rate, update uCode's rate table */ @@@ -2652,6 -2546,7 +2652,7 @@@ static void rs_get_rate(void *mvm_r, st info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; + info->control.rates[0].count = 1; }
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, @@@ -2888,13 -2783,6 +2889,13 @@@ static void rs_fill_link_cmd(struct iwl
lq_cmd->agg_time_limit = cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + /* + * overwrite if needed, pass aggregation time limit + * to uCode in uSec - This is racy - but heh, at least it helps... + */ + if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) + lq_cmd->agg_time_limit = cpu_to_le16(1200); }
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) @@@ -3193,29 -3081,3 +3194,29 @@@ void iwl_mvm_rate_control_unregister(vo { ieee80211_rate_control_unregister(&rs_mvm_ops); } + +/** + * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable + * Tx protection, according to this rquest and previous requests, + * and send the LQ command. + * @lq: The LQ command + * @mvmsta: The station + * @enable: Enable Tx protection? + */ +int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, + struct iwl_mvm_sta *mvmsta, bool enable) +{ + lockdep_assert_held(&mvm->mutex); + + if (enable) { + if (mvmsta->tx_protection == 0) + lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK; + mvmsta->tx_protection++; + } else { + mvmsta->tx_protection--; + if (mvmsta->tx_protection == 0) + lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK; + } + + return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false); +} diff --combined drivers/net/wireless/iwlwifi/mvm/tx.c index a830eb6,48c1891..b9ba4e7 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@@ -175,12 -175,13 +175,13 @@@ static void iwl_mvm_set_tx_cmd_rate(str * table is controlled by LINK_QUALITY commands */
- if (ieee80211_is_data(fc)) { + if (ieee80211_is_data(fc) && sta) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } else if (ieee80211_is_back_req(fc)) { - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); + tx_cmd->tx_flags |= + cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); }
/* HT rate doesn't make sense for a non data frame */ diff --combined drivers/net/wireless/rt2x00/rt2800lib.c index ead3a3e,72f32e5..3aa30dd --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@@ -840,7 -840,7 +840,7 @@@ static inline void rt2800_clear_beacon_ unsigned int beacon_base) { int i; - const int txwi_desc_size = rt2x00dev->ops->bcn->winfo_size; + const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
/* * For the Beacon base registers we only need to clear @@@ -3027,19 -3027,26 +3027,26 @@@ static void rt2800_config_txpower(struc * TODO: we do not use +6 dBm option to do not increase power beyond * regulatory limit, however this could be utilized for devices with * CAPABILITY_POWER_LIMIT. - */ - rt2800_bbp_read(rt2x00dev, 1, &r1); - if (delta <= -12) { - power_ctrl = 2; - delta += 12; - } else if (delta <= -6) { - power_ctrl = 1; - delta += 6; - } else { - power_ctrl = 0; + * + * TODO: add different temperature compensation code for RT3290 & RT5390 + * to allow to use BBP_R1 for those chips. + */ + if (!rt2x00_rt(rt2x00dev, RT3290) && + !rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_bbp_read(rt2x00dev, 1, &r1); + if (delta <= -12) { + power_ctrl = 2; + delta += 12; + } else if (delta <= -6) { + power_ctrl = 1; + delta += 6; + } else { + power_ctrl = 0; + } + rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); + rt2800_bbp_write(rt2x00dev, 1, r1); } - rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); - rt2800_bbp_write(rt2x00dev, 1, r1); + offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { @@@ -3953,577 -3960,379 +3960,577 @@@ static void rt2800_init_bbp_early(struc rt2800_bbp_write(rt2x00dev, 106, 0x35); }
-static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) +static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev) { u16 eeprom; u8 value;
- rt2800_init_bbp_early(rt2x00dev); + rt2800_bbp_read(rt2x00dev, 138, &value); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) + value |= 0x20; + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) + value &= ~0x02; + rt2800_bbp_write(rt2x00dev, 138, value); +}
- rt2800_bbp_read(rt2x00dev, 105, &value); - rt2x00_set_field8(&value, BBP105_MLD, - rt2x00dev->default_ant.rx_chain_num == 2); - rt2800_bbp_write(rt2x00dev, 105, value); +static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 105, 0x01); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); +} + +static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { + rt2800_bbp_write(rt2x00dev, 69, 0x16); + rt2800_bbp_write(rt2x00dev, 73, 0x12); + } else { + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + } + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 81, 0x37); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) + rt2800_bbp_write(rt2x00dev, 84, 0x19); + else + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); +} + +static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || + rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || + rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + else + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + if (rt2x00_rt(rt2x00dev, RT3071) || + rt2x00_rt(rt2x00dev, RT3090)) + rt2800_disable_unused_dac_adc(rt2x00dev); +} + +static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev) +{ + u8 value;
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- rt2800_bbp_write(rt2x00dev, 20, 0x06); rt2800_bbp_write(rt2x00dev, 31, 0x08); - rt2800_bbp_write(rt2x00dev, 65, 0x2C); - rt2800_bbp_write(rt2x00dev, 68, 0xDD); - rt2800_bbp_write(rt2x00dev, 69, 0x1A); - rt2800_bbp_write(rt2x00dev, 70, 0x05); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x13); - rt2800_bbp_write(rt2x00dev, 74, 0x0F); - rt2800_bbp_write(rt2x00dev, 75, 0x4F); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + + rt2800_bbp_write(rt2x00dev, 77, 0x58); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 74, 0x0b); + rt2800_bbp_write(rt2x00dev, 79, 0x18); + rt2800_bbp_write(rt2x00dev, 80, 0x09); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x7a); + + rt2800_bbp_write(rt2x00dev, 84, 0x9a); + + rt2800_bbp_write(rt2x00dev, 86, 0x38); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x02); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 104, 0x92); + + rt2800_bbp_write(rt2x00dev, 105, 0x1c); + + rt2800_bbp_write(rt2x00dev, 106, 0x03); + + rt2800_bbp_write(rt2x00dev, 128, 0x12); + + rt2800_bbp_write(rt2x00dev, 67, 0x24); + rt2800_bbp_write(rt2x00dev, 143, 0x04); + rt2800_bbp_write(rt2x00dev, 142, 0x99); + rt2800_bbp_write(rt2x00dev, 150, 0x30); + rt2800_bbp_write(rt2x00dev, 151, 0x2e); + rt2800_bbp_write(rt2x00dev, 152, 0x20); + rt2800_bbp_write(rt2x00dev, 153, 0x34); + rt2800_bbp_write(rt2x00dev, 154, 0x40); + rt2800_bbp_write(rt2x00dev, 155, 0x3b); + rt2800_bbp_write(rt2x00dev, 253, 0x04); + + rt2800_bbp_read(rt2x00dev, 47, &value); + rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); + rt2800_bbp_write(rt2x00dev, 47, value); + + /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ + rt2800_bbp_read(rt2x00dev, 3, &value); + rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); + rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); + rt2800_bbp_write(rt2x00dev, 3, value); +} + +static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 3, 0x00); + rt2800_bbp_write(rt2x00dev, 4, 0x50); + + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + rt2800_bbp_write(rt2x00dev, 47, 0x48); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); - rt2800_bbp_write(rt2x00dev, 84, 0x9A); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + rt2800_bbp_write(rt2x00dev, 81, 0x37); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); - rt2800_bbp_write(rt2x00dev, 95, 0x9a); - rt2800_bbp_write(rt2x00dev, 98, 0x12); - rt2800_bbp_write(rt2x00dev, 103, 0xC0); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); - /* FIXME BBP105 owerwrite */ - rt2800_bbp_write(rt2x00dev, 105, 0x3C); - rt2800_bbp_write(rt2x00dev, 106, 0x35); - rt2800_bbp_write(rt2x00dev, 128, 0x12); - rt2800_bbp_write(rt2x00dev, 134, 0xD0); - rt2800_bbp_write(rt2x00dev, 135, 0xF6); - rt2800_bbp_write(rt2x00dev, 137, 0x0F);
- /* Initialize GLRT (Generalized Likehood Radio Test) */ - rt2800_init_bbp_5592_glrt(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 105, 0x34); + + rt2800_bbp_write(rt2x00dev, 106, 0x05); + + rt2800_bbp_write(rt2x00dev, 120, 0x50); + + rt2800_bbp_write(rt2x00dev, 137, 0x0f); + + rt2800_bbp_write(rt2x00dev, 163, 0xbd); + /* Set ITxBF timeout to 0x9c40=1000msec */ + rt2800_bbp_write(rt2x00dev, 179, 0x02); + rt2800_bbp_write(rt2x00dev, 180, 0x00); + rt2800_bbp_write(rt2x00dev, 182, 0x40); + rt2800_bbp_write(rt2x00dev, 180, 0x01); + rt2800_bbp_write(rt2x00dev, 182, 0x9c); + rt2800_bbp_write(rt2x00dev, 179, 0x00); + /* Reprogram the inband interface to put right values in RXWI */ + rt2800_bbp_write(rt2x00dev, 142, 0x04); + rt2800_bbp_write(rt2x00dev, 143, 0x3b); + rt2800_bbp_write(rt2x00dev, 142, 0x06); + rt2800_bbp_write(rt2x00dev, 143, 0xa0); + rt2800_bbp_write(rt2x00dev, 142, 0x07); + rt2800_bbp_write(rt2x00dev, 143, 0xa1); + rt2800_bbp_write(rt2x00dev, 142, 0x08); + rt2800_bbp_write(rt2x00dev, 143, 0xa2); + + rt2800_bbp_write(rt2x00dev, 148, 0xc8); +}
- rt2800_bbp4_mac_if_ctrl(rt2x00dev); +static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev) +{ + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); - div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); - ant = (div_mode == 3) ? 1 : 0; - rt2800_bbp_read(rt2x00dev, 152, &value); - if (ant == 0) { - /* Main antenna */ - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); - } else { - /* Auxiliary antenna */ - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); - } - rt2800_bbp_write(rt2x00dev, 152, value); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { - rt2800_bbp_read(rt2x00dev, 254, &value); - rt2x00_set_field8(&value, BBP254_BIT7, 1); - rt2800_bbp_write(rt2x00dev, 254, value); - } + rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_init_freq_calibration(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33);
- rt2800_bbp_write(rt2x00dev, 84, 0x19); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); + else + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + rt2800_disable_unused_dac_adc(rt2x00dev); }
-static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) +static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev) { - unsigned int i; - u16 eeprom; - u8 reg_id; - u8 value; + rt2800_bbp_write(rt2x00dev, 31, 0x08);
- if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || - rt2800_wait_bbp_ready(rt2x00dev))) - return -EACCES; + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT5592)) { - rt2800_init_bbp_5592(rt2x00dev); - return 0; - } + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10);
- if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 3, 0x00); - rt2800_bbp_write(rt2x00dev, 4, 0x50); - } + rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp4_mac_if_ctrl(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33);
- if (rt2800_is_305x_soc(rt2x00dev) || - rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + rt2800_disable_unused_dac_adc(rt2x00dev); +} + +static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) +{ + int ant, div_mode; + u16 eeprom; + u8 value; + + rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 47, 0x48); + rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 68, 0x0b); + rt2800_bbp_write(rt2x00dev, 68, 0x0b);
- if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { - rt2800_bbp_write(rt2x00dev, 69, 0x16); - rt2800_bbp_write(rt2x00dev, 73, 0x12); - } else if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_write(rt2x00dev, 69, 0x12); - rt2800_bbp_write(rt2x00dev, 73, 0x13); - rt2800_bbp_write(rt2x00dev, 75, 0x46); - rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28);
- if (rt2x00_rt(rt2x00dev, RT3290)) - rt2800_bbp_write(rt2x00dev, 77, 0x58); - else - rt2800_bbp_write(rt2x00dev, 77, 0x59); - } else { - rt2800_bbp_write(rt2x00dev, 69, 0x12); - rt2800_bbp_write(rt2x00dev, 73, 0x10); - } + rt2800_bbp_write(rt2x00dev, 77, 0x59);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- if (rt2x00_rt(rt2x00dev, RT3070) || - rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_write(rt2x00dev, 79, 0x13); - rt2800_bbp_write(rt2x00dev, 80, 0x05); - rt2800_bbp_write(rt2x00dev, 81, 0x33); - } else if (rt2800_is_305x_soc(rt2x00dev)) { - rt2800_bbp_write(rt2x00dev, 78, 0x0e); - rt2800_bbp_write(rt2x00dev, 80, 0x08); - } else if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_bbp_write(rt2x00dev, 74, 0x0b); - rt2800_bbp_write(rt2x00dev, 79, 0x18); - rt2800_bbp_write(rt2x00dev, 80, 0x09); - rt2800_bbp_write(rt2x00dev, 81, 0x33); - } else if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 78, 0x0e); - rt2800_bbp_write(rt2x00dev, 80, 0x08); - rt2800_bbp_write(rt2x00dev, 81, 0x37); - } else { - rt2800_bbp_write(rt2x00dev, 81, 0x37); - } + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 82, 0x62); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 83, 0x7a); - else - rt2800_bbp_write(rt2x00dev, 83, 0x6a);
- if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) - rt2800_bbp_write(rt2x00dev, 84, 0x19); - else if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 84, 0x9a); - else - rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 83, 0x7a);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 86, 0x38); - else - rt2800_bbp_write(rt2x00dev, 86, 0x00); + rt2800_bbp_write(rt2x00dev, 84, 0x9a); + + rt2800_bbp_write(rt2x00dev, 86, 0x38);
- if (rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5392)) + if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 88, 0x90);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 92, 0x02); - else - rt2800_bbp_write(rt2x00dev, 92, 0x00); + rt2800_bbp_write(rt2x00dev, 92, 0x02);
if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 95, 0x9a); rt2800_bbp_write(rt2x00dev, 98, 0x12); }
- if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || - rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || - rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || - rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || - rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392) || - rt2800_is_305x_soc(rt2x00dev)) - rt2800_bbp_write(rt2x00dev, 103, 0xc0); - else - rt2800_bbp_write(rt2x00dev, 103, 0x00); + rt2800_bbp_write(rt2x00dev, 103, 0xc0);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 104, 0x92); + rt2800_bbp_write(rt2x00dev, 104, 0x92);
- if (rt2800_is_305x_soc(rt2x00dev)) - rt2800_bbp_write(rt2x00dev, 105, 0x01); - else if (rt2x00_rt(rt2x00dev, RT3290)) - rt2800_bbp_write(rt2x00dev, 105, 0x1c); - else if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 105, 0x34); - else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 105, 0x3c); - else - rt2800_bbp_write(rt2x00dev, 105, 0x05); + rt2800_bbp_write(rt2x00dev, 105, 0x3c);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390)) + if (rt2x00_rt(rt2x00dev, RT5390)) rt2800_bbp_write(rt2x00dev, 106, 0x03); else if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 106, 0x12); else - rt2800_bbp_write(rt2x00dev, 106, 0x35); - - if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 120, 0x50); + WARN_ON(1);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 128, 0x12); + rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 134, 0xd0); rt2800_bbp_write(rt2x00dev, 135, 0xf6); }
- if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 137, 0x0f); + rt2800_disable_unused_dac_adc(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_read(rt2x00dev, 138, &value); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) - value |= 0x20; - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) - value &= ~0x02; + /* check if this is a Bluetooth combo card */ + if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { + u32 reg;
- rt2800_bbp_write(rt2x00dev, 138, value); + rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); + rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0); + rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0); + rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0); + rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0); + if (ant == 0) + rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1); + else if (ant == 1) + rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1); + rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); }
- if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_bbp_write(rt2x00dev, 67, 0x24); - rt2800_bbp_write(rt2x00dev, 143, 0x04); - rt2800_bbp_write(rt2x00dev, 142, 0x99); - rt2800_bbp_write(rt2x00dev, 150, 0x30); - rt2800_bbp_write(rt2x00dev, 151, 0x2e); - rt2800_bbp_write(rt2x00dev, 152, 0x20); - rt2800_bbp_write(rt2x00dev, 153, 0x34); - rt2800_bbp_write(rt2x00dev, 154, 0x40); - rt2800_bbp_write(rt2x00dev, 155, 0x3b); - rt2800_bbp_write(rt2x00dev, 253, 0x04); - - rt2800_bbp_read(rt2x00dev, 47, &value); - rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); - rt2800_bbp_write(rt2x00dev, 47, value); - - /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ - rt2800_bbp_read(rt2x00dev, 3, &value); - rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); - rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); - rt2800_bbp_write(rt2x00dev, 3, value); + /* This chip has hardware antenna diversity*/ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ + rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ + rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ }
- if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 163, 0xbd); - /* Set ITxBF timeout to 0x9c40=1000msec */ - rt2800_bbp_write(rt2x00dev, 179, 0x02); - rt2800_bbp_write(rt2x00dev, 180, 0x00); - rt2800_bbp_write(rt2x00dev, 182, 0x40); - rt2800_bbp_write(rt2x00dev, 180, 0x01); - rt2800_bbp_write(rt2x00dev, 182, 0x9c); - rt2800_bbp_write(rt2x00dev, 179, 0x00); - /* Reprogram the inband interface to put right values in RXWI */ - rt2800_bbp_write(rt2x00dev, 142, 0x04); - rt2800_bbp_write(rt2x00dev, 143, 0x3b); - rt2800_bbp_write(rt2x00dev, 142, 0x06); - rt2800_bbp_write(rt2x00dev, 143, 0xa0); - rt2800_bbp_write(rt2x00dev, 142, 0x07); - rt2800_bbp_write(rt2x00dev, 143, 0xa1); - rt2800_bbp_write(rt2x00dev, 142, 0x08); - rt2800_bbp_write(rt2x00dev, 143, 0xa2); - - rt2800_bbp_write(rt2x00dev, 148, 0xc8); + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + else + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); + rt2800_bbp_write(rt2x00dev, 152, value); + + rt2800_init_freq_calibration(rt2x00dev); +} + +static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) +{ + int ant, div_mode; + u16 eeprom; + u8 value; + + rt2800_init_bbp_early(rt2x00dev); + + rt2800_bbp_read(rt2x00dev, 105, &value); + rt2x00_set_field8(&value, BBP105_MLD, + rt2x00dev->default_ant.rx_chain_num == 2); + rt2800_bbp_write(rt2x00dev, 105, value); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 20, 0x06); + rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 65, 0x2C); + rt2800_bbp_write(rt2x00dev, 68, 0xDD); + rt2800_bbp_write(rt2x00dev, 69, 0x1A); + rt2800_bbp_write(rt2x00dev, 70, 0x05); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 74, 0x0F); + rt2800_bbp_write(rt2x00dev, 75, 0x4F); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); + rt2800_bbp_write(rt2x00dev, 84, 0x9A); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); + rt2800_bbp_write(rt2x00dev, 95, 0x9a); + rt2800_bbp_write(rt2x00dev, 98, 0x12); + rt2800_bbp_write(rt2x00dev, 103, 0xC0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); + /* FIXME BBP105 owerwrite */ + rt2800_bbp_write(rt2x00dev, 105, 0x3C); + rt2800_bbp_write(rt2x00dev, 106, 0x35); + rt2800_bbp_write(rt2x00dev, 128, 0x12); + rt2800_bbp_write(rt2x00dev, 134, 0xD0); + rt2800_bbp_write(rt2x00dev, 135, 0xF6); + rt2800_bbp_write(rt2x00dev, 137, 0x0F); + + /* Initialize GLRT (Generalized Likehood Radio Test) */ + rt2800_init_bbp_5592_glrt(rt2x00dev); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0; + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) { + /* Main antenna */ + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + } else { + /* Auxiliary antenna */ + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); } + rt2800_bbp_write(rt2x00dev, 152, value);
- if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - int ant, div_mode; + if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { + rt2800_bbp_read(rt2x00dev, 254, &value); + rt2x00_set_field8(&value, BBP254_BIT7, 1); + rt2800_bbp_write(rt2x00dev, 254, value); + }
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); - div_mode = rt2x00_get_field16(eeprom, - EEPROM_NIC_CONF1_ANT_DIVERSITY); - ant = (div_mode == 3) ? 1 : 0; + rt2800_init_freq_calibration(rt2x00dev);
- /* check if this is a Bluetooth combo card */ - if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { - u32 reg; - - rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); - rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0); - rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0); - rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0); - rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0); - if (ant == 0) - rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1); - else if (ant == 1) - rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1); - rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); - } + rt2800_bbp_write(rt2x00dev, 84, 0x19); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); +}
- /* This chip has hardware antenna diversity*/ - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { - rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ - rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ - rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ - } +static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) +{ + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value;
- rt2800_bbp_read(rt2x00dev, 152, &value); - if (ant == 0) - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); - else - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); - rt2800_bbp_write(rt2x00dev, 152, value); + if (rt2800_is_305x_soc(rt2x00dev)) + rt2800_init_bbp_305x_soc(rt2x00dev);
- rt2800_init_freq_calibration(rt2x00dev); + switch (rt2x00dev->chip.rt) { + case RT2860: + case RT2872: + case RT2883: + rt2800_init_bbp_28xx(rt2x00dev); + break; + case RT3070: + case RT3071: + case RT3090: + rt2800_init_bbp_30xx(rt2x00dev); + break; + case RT3290: + rt2800_init_bbp_3290(rt2x00dev); + break; + case RT3352: + rt2800_init_bbp_3352(rt2x00dev); + break; + case RT3390: + rt2800_init_bbp_3390(rt2x00dev); + break; + case RT3572: + rt2800_init_bbp_3572(rt2x00dev); + break; + case RT5390: + case RT5392: + rt2800_init_bbp_53xx(rt2x00dev); + break; + case RT5592: + rt2800_init_bbp_5592(rt2x00dev); + return; }
for (i = 0; i < EEPROM_BBP_SIZE; i++) { @@@ -4535,6 -4344,8 +4542,6 @@@ rt2800_bbp_write(rt2x00dev, reg_id, value); } } - - return 0; }
static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev) @@@ -5385,11 -5196,9 +5392,11 @@@ int rt2800_enable_radio(struct rt2x00_d } msleep(1);
- if (unlikely(rt2800_init_bbp(rt2x00dev))) + if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || + rt2800_wait_bbp_ready(rt2x00dev))) return -EIO;
+ rt2800_init_bbp(rt2x00dev); rt2800_init_rfcsr(rt2x00dev);
if (rt2x00_is_usb(rt2x00dev) && diff --combined drivers/net/xen-netback/netback.c index 82576ff,8c20935..a0b50ad --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@@ -47,13 -47,6 +47,13 @@@ #include <asm/xen/hypercall.h> #include <asm/xen/page.h>
+/* Provide an option to disable split event channels at load time as + * event channels are limited resource. Split event channels are + * enabled by default. + */ +bool separate_tx_rx_irq = 1; +module_param(separate_tx_rx_irq, bool, 0644); + /* * This is the maximum slots a skb can have. If a guest sends a skb * which exceeds this limit it is considered malicious. @@@ -778,19 -771,21 +778,21 @@@ static void xen_netbk_rx_action(struct sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
xenvif_notify_tx_completion(vif);
- xenvif_put(vif); + if (ret && list_empty(&vif->notify_list)) + list_add_tail(&vif->notify_list, ¬ify); + else + xenvif_put(vif); npo.meta_cons += sco->meta_slots_used; dev_kfree_skb(skb); }
list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) { - notify_remote_via_irq(vif->irq); + notify_remote_via_irq(vif->rx_irq); list_del_init(&vif->notify_list); + xenvif_put(vif); }
/* More work to do? */ @@@ -1768,7 -1763,7 +1770,7 @@@ static void make_tx_response(struct xen vif->tx.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); if (notify) - notify_remote_via_irq(vif->irq); + notify_remote_via_irq(vif->tx_irq); }
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, @@@ -1945,6 -1940,10 +1947,6 @@@ static int __init netback_init(void failed_init: while (--group >= 0) { struct xen_netbk *netbk = &xen_netbk[group]; - for (i = 0; i < MAX_PENDING_REQS; i++) { - if (netbk->mmap_pages[i]) - __free_page(netbk->mmap_pages[i]); - } del_timer(&netbk->net_timer); kthread_stop(netbk->task); } @@@ -1955,25 -1954,5 +1957,25 @@@
module_init(netback_init);
+static void __exit netback_fini(void) +{ + int i, j; + + xenvif_xenbus_fini(); + + for (i = 0; i < xen_netbk_group_nr; i++) { + struct xen_netbk *netbk = &xen_netbk[i]; + del_timer_sync(&netbk->net_timer); + kthread_stop(netbk->task); + for (j = 0; j < MAX_PENDING_REQS; j++) { + if (netbk->mmap_pages[i]) + __free_page(netbk->mmap_pages[i]); + } + } + + vfree(xen_netbk); +} +module_exit(netback_fini); + MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vif"); diff --combined include/linux/filter.h index 56a6b7f,f65f5a6..a6ac848 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -46,6 -46,7 +46,7 @@@ extern int sk_attach_filter(struct sock extern int sk_detach_filter(struct sock *sk); extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); + extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
#ifdef CONFIG_BPF_JIT #include <stdarg.h> @@@ -58,10 -59,10 +59,10 @@@ extern void bpf_jit_free(struct sk_filt static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { - pr_err("flen=%u proglen=%u pass=%u image=%p\n", + pr_err("flen=%u proglen=%u pass=%u image=%pK\n", flen, proglen, pass, image); if (image) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) diff --combined include/linux/if_team.h index b662045,16fae64..f6156f9 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@@ -69,7 -69,6 +69,7 @@@ struct team_port s32 priority; /* lower number ~ higher priority */ u16 queue_id; struct list_head qom_list; /* node in queue override mapping list */ + struct rcu_head rcu; long mode_priv[0]; };
@@@ -229,16 -228,6 +229,16 @@@ static inline struct team_port *team_ge return port; return NULL; } + +static inline int team_num_to_port_index(struct team *team, int num) +{ + int en_port_count = ACCESS_ONCE(team->en_port_count); + + if (unlikely(!en_port_count)) + return 0; + return num % en_port_count; +} + static inline struct team_port *team_get_port_by_index_rcu(struct team *team, int port_index) { @@@ -260,12 -249,12 +260,12 @@@ team_get_first_port_txable_rcu(struct t return port; cur = port; list_for_each_entry_continue_rcu(cur, &team->port_list, list) - if (team_port_txable(port)) + if (team_port_txable(cur)) return cur; list_for_each_entry_rcu(cur, &team->port_list, list) { if (cur == port) break; - if (team_port_txable(port)) + if (team_port_txable(cur)) return cur; } return NULL; diff --combined include/net/ip_tunnels.h index 40b4dfc,09b1360..1be442f --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@@ -95,13 -95,13 +95,13 @@@ struct ip_tunnel_net int ip_tunnel_init(struct net_device *dev); void ip_tunnel_uninit(struct net_device *dev); void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); - int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, - struct rtnl_link_ops *ops, char *devname); + int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, + struct rtnl_link_ops *ops, char *devname);
- void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn); + void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, - const struct iphdr *tnl_params); + const struct iphdr *tnl_params, const u8 protocol); int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --combined net/batman-adv/bat_iv_ogm.c index d07323b,f680ee1..62da527 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@@ -19,6 -19,7 +19,6 @@@
#include "main.h" #include "translation-table.h" -#include "ring_buffer.h" #include "originator.h" #include "routing.h" #include "gateway_common.h" @@@ -29,56 -30,30 +29,72 @@@ #include "network-coding.h"
/** + * batadv_ring_buffer_set - update the ring buffer with the given value + * @lq_recv: pointer to the ring buffer + * @lq_index: index to store the value at + * @value: value to store in the ring buffer + */ +static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, + uint8_t value) +{ + lq_recv[*lq_index] = value; + *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE; +} + +/** + * batadv_ring_buffer_set - compute the average of all non-zero values stored + * in the given ring buffer + * @lq_recv: pointer to the ring buffer + * + * Returns computed average value. + */ +static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]) +{ + const uint8_t *ptr; + uint16_t count = 0, i = 0, sum = 0; + + ptr = lq_recv; + + while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) { + if (*ptr != 0) { + count++; + sum += *ptr; + } + + i++; + ptr++; + } + + if (count == 0) + return 0; + + return (uint8_t)(sum / count); +} ++ ++/* + * batadv_dup_status - duplicate status + * @BATADV_NO_DUP: the packet is a duplicate + * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the + * neighbor) + * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor + * @BATADV_PROTECTED: originator is currently protected (after reboot) + */ + enum batadv_dup_status { + BATADV_NO_DUP = 0, + BATADV_ORIG_DUP, + BATADV_NEIGH_DUP, + BATADV_PROTECTED, + }; + static struct batadv_neigh_node * batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, const uint8_t *neigh_addr, struct batadv_orig_node *orig_node, - struct batadv_orig_node *orig_neigh, __be32 seqno) + struct batadv_orig_node *orig_neigh) { struct batadv_neigh_node *neigh_node;
- neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, - ntohl(seqno)); + neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr); if (!neigh_node) goto out;
@@@ -453,16 -428,18 +469,16 @@@ static void batadv_iv_ogm_aggregate_new else skb_size = packet_len;
- skb_size += ETH_HLEN + NET_IP_ALIGN; + skb_size += ETH_HLEN;
- forw_packet_aggr->skb = dev_alloc_skb(skb_size); + forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size); if (!forw_packet_aggr->skb) { if (!own_packet) atomic_inc(&bat_priv->batman_queue_left); kfree(forw_packet_aggr); goto out; } - skb_reserve(forw_packet_aggr->skb, ETH_HLEN + NET_IP_ALIGN); - - INIT_HLIST_NODE(&forw_packet_aggr->list); + skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
skb_buff = skb_put(forw_packet_aggr->skb, packet_len); forw_packet_aggr->packet_len = packet_len; @@@ -628,41 -605,6 +644,41 @@@ static void batadv_iv_ogm_forward(struc if_incoming, 0, batadv_iv_ogm_fwd_send_time()); }
+/** + * batadv_iv_ogm_slide_own_bcast_window - bitshift own OGM broadcast windows for + * the given interface + * @hard_iface: the interface for which the windows have to be shifted + */ +static void +batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) +{ + struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *head; + struct batadv_orig_node *orig_node; + unsigned long *word; + uint32_t i; + size_t word_index; + uint8_t *w; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, head, hash_entry) { + spin_lock_bh(&orig_node->ogm_cnt_lock); + word_index = hard_iface->if_num * BATADV_NUM_WORDS; + word = &(orig_node->bcast_own[word_index]); + + batadv_bit_get_packet(bat_priv, word, 1, 0); + w = &orig_node->bcast_own_sum[hard_iface->if_num]; + *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE); + spin_unlock_bh(&orig_node->ogm_cnt_lock); + } + rcu_read_unlock(); + } +} + static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); @@@ -707,7 -649,7 +723,7 @@@ batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS; }
- batadv_slide_own_bcast_window(hard_iface); + batadv_iv_ogm_slide_own_bcast_window(hard_iface); batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff, hard_iface->bat_iv.ogm_buff_len, hard_iface, 1, batadv_iv_ogm_emit_send_time(bat_priv)); @@@ -723,7 -665,7 +739,7 @@@ batadv_iv_ogm_orig_update(struct batadv const struct batadv_ogm_packet *batadv_ogm_packet, struct batadv_hard_iface *if_incoming, const unsigned char *tt_buff, - int is_duplicate) + enum batadv_dup_status dup_status) { struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; struct batadv_neigh_node *router = NULL; @@@ -743,13 -685,13 +759,13 @@@ if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && tmp_neigh_node->if_incoming == if_incoming && atomic_inc_not_zero(&tmp_neigh_node->refcount)) { - if (neigh_node) + if (WARN(neigh_node, "too many matching neigh_nodes")) batadv_neigh_node_free_ref(neigh_node); neigh_node = tmp_neigh_node; continue; }
- if (is_duplicate) + if (dup_status != BATADV_NO_DUP) continue;
spin_lock_bh(&tmp_neigh_node->lq_update_lock); @@@ -769,7 -711,8 +785,7 @@@
neigh_node = batadv_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, - orig_node, orig_tmp, - batadv_ogm_packet->seqno); + orig_node, orig_tmp);
batadv_orig_node_free_ref(orig_tmp); if (!neigh_node) @@@ -790,7 -733,7 +806,7 @@@ neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv); spin_unlock_bh(&neigh_node->lq_update_lock);
- if (!is_duplicate) { + if (dup_status == BATADV_NO_DUP) { orig_node->last_ttl = batadv_ogm_packet->header.ttl; neigh_node->last_ttl = batadv_ogm_packet->header.ttl; } @@@ -901,7 -844,8 +917,7 @@@ static int batadv_iv_ogm_calc_tq(struc neigh_node = batadv_iv_ogm_neigh_new(if_incoming, orig_neigh_node->orig, orig_neigh_node, - orig_neigh_node, - batadv_ogm_packet->seqno); + orig_neigh_node);
if (!neigh_node) goto out; @@@ -973,15 -917,16 +989,16 @@@ out return ret; }
- /* processes a batman packet for all interfaces, adjusts the sequence number and - * finds out whether it is a duplicate. - * returns: - * 1 the packet is a duplicate - * 0 the packet has not yet been received - * -1 the packet is old and has been received while the seqno window - * was protected. Caller should drop it. + /** + * batadv_iv_ogm_update_seqnos - process a batman packet for all interfaces, + * adjust the sequence number and find out whether it is a duplicate + * @ethhdr: ethernet header of the packet + * @batadv_ogm_packet: OGM packet to be considered + * @if_incoming: interface on which the OGM packet was received + * + * Returns duplicate status as enum batadv_dup_status */ - static int + static enum batadv_dup_status batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, const struct batadv_ogm_packet *batadv_ogm_packet, const struct batadv_hard_iface *if_incoming) @@@ -989,17 -934,18 +1006,18 @@@ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_orig_node *orig_node; struct batadv_neigh_node *tmp_neigh_node; - int is_duplicate = 0; + int is_dup; int32_t seq_diff; int need_update = 0; - int set_mark, ret = -1; + int set_mark; + enum batadv_dup_status ret = BATADV_NO_DUP; uint32_t seqno = ntohl(batadv_ogm_packet->seqno); uint8_t *neigh_addr; uint8_t packet_count;
orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); if (!orig_node) - return 0; + return BATADV_NO_DUP;
spin_lock_bh(&orig_node->ogm_cnt_lock); seq_diff = seqno - orig_node->last_real_seqno; @@@ -1007,22 -953,29 +1025,29 @@@ /* signalize caller that the packet is to be dropped. */ if (!hlist_empty(&orig_node->neigh_list) && batadv_window_protected(bat_priv, seq_diff, - &orig_node->batman_seqno_reset)) + &orig_node->batman_seqno_reset)) { + ret = BATADV_PROTECTED; goto out; + }
rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { - is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, - orig_node->last_real_seqno, - seqno); - neigh_addr = tmp_neigh_node->addr; + is_dup = batadv_test_bit(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + seqno); + if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && - tmp_neigh_node->if_incoming == if_incoming) + tmp_neigh_node->if_incoming == if_incoming) { set_mark = 1; - else + if (is_dup) + ret = BATADV_NEIGH_DUP; + } else { set_mark = 0; + if (is_dup && (ret != BATADV_NEIGH_DUP)) + ret = BATADV_ORIG_DUP; + }
/* if the window moved, set the update flag. */ need_update |= batadv_bit_get_packet(bat_priv, @@@ -1042,8 -995,6 +1067,6 @@@ orig_node->last_real_seqno = seqno; }
- ret = is_duplicate; - out: spin_unlock_bh(&orig_node->ogm_cnt_lock); batadv_orig_node_free_ref(orig_node); @@@ -1062,10 -1013,11 +1085,11 @@@ static void batadv_iv_ogm_process(cons struct batadv_neigh_node *orig_neigh_router = NULL; int has_directlink_flag; int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; - int is_broadcast = 0, is_bidirect; + int is_bidirect; bool is_single_hop_neigh = false; bool is_from_best_next_hop = false; - int is_duplicate, sameseq, simlar_ttl; + int sameseq, similar_ttl; + enum batadv_dup_status dup_status; uint32_t if_incoming_seqno; uint8_t *prev_sender;
@@@ -1125,9 -1077,19 +1149,9 @@@ if (batadv_compare_eth(batadv_ogm_packet->prev_sender, hard_iface->net_dev->dev_addr)) is_my_oldorig = 1; } rcu_read_unlock();
- if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Drop packet: incompatible batman version (%i)\n", - batadv_ogm_packet->header.version); - return; - } - if (is_my_addr) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: received my own broadcast (sender: %pM)\n", @@@ -1135,6 -1097,13 +1159,6 @@@ return; }
- if (is_broadcast) { - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n", - ethhdr->h_source); - return; - } - if (is_my_orig) { unsigned long *word; int offset; @@@ -1192,10 -1161,10 +1216,10 @@@ if (!orig_node) return;
- is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, - if_incoming); + dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, + if_incoming);
- if (is_duplicate == -1) { + if (dup_status == BATADV_PROTECTED) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: packet within seqno protection time (sender: %pM)\n", ethhdr->h_source); @@@ -1265,11 -1234,12 +1289,12 @@@ * seqno and similar ttl as the non-duplicate */ sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); - simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; - if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl))) + similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; + if (is_bidirect && ((dup_status == BATADV_NO_DUP) || + (sameseq && similar_ttl))) batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, batadv_ogm_packet, if_incoming, - tt_buff, is_duplicate); + tt_buff, dup_status);
/* is single hop (direct) neighbor */ if (is_single_hop_neigh) { @@@ -1290,7 -1260,7 +1315,7 @@@ goto out_neigh; }
- if (is_duplicate) { + if (dup_status == BATADV_NEIGH_DUP) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: duplicate packet received\n"); goto out_neigh; @@@ -1342,7 -1312,7 +1367,7 @@@ static int batadv_iv_ogm_receive(struc skb->len + ETH_HLEN);
packet_len = skb_headlen(skb); - ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb); packet_buff = skb->data; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
diff --combined net/batman-adv/bridge_loop_avoidance.c index e9d8e0b,de27b31..e14531f --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -180,7 -180,7 +180,7 @@@ static struct batadv_bla_clai */ static struct batadv_bla_backbone_gw * batadv_backbone_hash_find(struct batadv_priv *bat_priv, - uint8_t *addr, short vid) + uint8_t *addr, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; struct hlist_head *head; @@@ -257,7 -257,7 +257,7 @@@ batadv_bla_del_backbone_claims(struct b * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) */ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, - short vid, int claimtype) + unsigned short vid, int claimtype) { struct sk_buff *skb; struct ethhdr *ethhdr; @@@ -307,8 -307,7 +307,8 @@@ */ memcpy(ethhdr->h_source, mac, ETH_ALEN); batadv_dbg(BATADV_DBG_BLA, bat_priv, - "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); + "bla_send_claim(): CLAIM %pM on vid %d\n", mac, + BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_UNCLAIM: /* unclaim frame @@@ -317,7 -316,7 +317,7 @@@ memcpy(hw_src, mac, ETH_ALEN); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, - vid); + BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_ANNOUNCE: /* announcement frame @@@ -326,7 -325,7 +326,7 @@@ memcpy(hw_src, mac, ETH_ALEN); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", - ethhdr->h_source, vid); + ethhdr->h_source, BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_REQUEST: /* request frame @@@ -336,15 -335,13 +336,15 @@@ memcpy(hw_src, mac, ETH_ALEN); memcpy(ethhdr->h_dest, mac, ETH_ALEN); batadv_dbg(BATADV_DBG_BLA, bat_priv, - "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", - ethhdr->h_source, ethhdr->h_dest, vid); + "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n", + ethhdr->h_source, ethhdr->h_dest, + BATADV_PRINT_VID(vid)); break; }
- if (vid != -1) - skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid); + if (vid & BATADV_VLAN_HAS_TAG) + skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), + vid & VLAN_VID_MASK);
skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, soft_iface); @@@ -370,7 -367,7 +370,7 @@@ out */ static struct batadv_bla_backbone_gw * batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, - short vid, bool own_backbone) + unsigned short vid, bool own_backbone) { struct batadv_bla_backbone_gw *entry; struct batadv_orig_node *orig_node; @@@ -383,7 -380,7 +383,7 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", - orig, vid); + orig, BATADV_PRINT_VID(vid));
entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) @@@ -437,7 -434,7 +437,7 @@@ static void batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - short vid) + unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw;
@@@ -459,7 -456,7 +459,7 @@@ */ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - short vid) + unsigned short vid) { struct hlist_head *head; struct batadv_hashtable *hash; @@@ -550,7 -547,7 +550,7 @@@ static void batadv_bla_send_announce(st * @backbone_gw: the backbone gateway which claims it */ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, - const uint8_t *mac, const short vid, + const uint8_t *mac, const unsigned short vid, struct batadv_bla_backbone_gw *backbone_gw) { struct batadv_bla_claim *claim; @@@ -575,7 -572,7 +575,7 @@@ atomic_set(&claim->refcount, 2); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", - mac, vid); + mac, BATADV_PRINT_VID(vid)); hash_added = batadv_hash_add(bat_priv->bla.claim_hash, batadv_compare_claim, batadv_choose_claim, claim, @@@ -594,7 -591,7 +594,7 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_add_claim(): changing ownership for %pM, vid %d\n", - mac, vid); + mac, BATADV_PRINT_VID(vid));
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); batadv_backbone_gw_free_ref(claim->backbone_gw); @@@ -614,7 -611,7 +614,7 @@@ claim_free_ref * given mac address and vid. */ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, - const uint8_t *mac, const short vid) + const uint8_t *mac, const unsigned short vid) { struct batadv_bla_claim search_claim, *claim;
@@@ -625,7 -622,7 +625,7 @@@ return;
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", - mac, vid); + mac, BATADV_PRINT_VID(vid));
batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, batadv_choose_claim, claim); @@@ -640,7 -637,7 +640,7 @@@ /* check for ANNOUNCE frame, return 1 if handled */ static int batadv_handle_announce(struct batadv_priv *bat_priv, uint8_t *an_addr, uint8_t *backbone_addr, - short vid) + unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; uint16_t crc; @@@ -661,13 -658,12 +661,13 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n", - vid, backbone_gw->orig, crc); + BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
if (backbone_gw->crc != crc) { batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n", - backbone_gw->orig, backbone_gw->vid, + backbone_gw->orig, + BATADV_PRINT_VID(backbone_gw->vid), backbone_gw->crc, crc);
batadv_bla_send_request(backbone_gw); @@@ -689,7 -685,7 +689,7 @@@ static int batadv_handle_request(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, uint8_t *backbone_addr, - struct ethhdr *ethhdr, short vid) + struct ethhdr *ethhdr, unsigned short vid) { /* check for REQUEST frame */ if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest)) @@@ -703,7 -699,7 +703,7 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_request(): REQUEST vid %d (sent by %pM)...\n", - vid, ethhdr->h_source); + BATADV_PRINT_VID(vid), ethhdr->h_source);
batadv_bla_answer_request(bat_priv, primary_if, vid); return 1; @@@ -713,7 -709,7 +713,7 @@@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, uint8_t *backbone_addr, - uint8_t *claim_addr, short vid) + uint8_t *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw;
@@@ -731,7 -727,7 +731,7 @@@ /* this must be an UNCLAIM frame */ batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", - claim_addr, vid, backbone_gw->orig); + claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
batadv_bla_del_claim(bat_priv, claim_addr, vid); batadv_backbone_gw_free_ref(backbone_gw); @@@ -742,7 -738,7 +742,7 @@@ static int batadv_handle_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, uint8_t *backbone_addr, uint8_t *claim_addr, - short vid) + unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw;
@@@ -865,15 -861,14 +865,15 @@@ static int batadv_bla_process_claim(str struct batadv_bla_claim_dst *bla_dst; uint16_t proto; int headlen; - short vid = -1; + unsigned short vid = BATADV_NO_FLAGS; int ret;
- ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb);
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { vhdr = (struct vlan_ethhdr *)ethhdr; vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + vid |= BATADV_VLAN_HAS_TAG; proto = ntohs(vhdr->h_vlan_encapsulated_proto); headlen = sizeof(*vhdr); } else { @@@ -890,7 -885,7 +890,7 @@@ return 0;
/* pskb_may_pull() may have modified the pointers, get ethhdr again */ - ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb); arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
/* Check whether the ARP frame carries a valid @@@ -915,8 -910,7 +915,8 @@@ if (ret == 1) batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", - ethhdr->h_source, vid, hw_src, hw_dst); + ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, + hw_dst);
if (ret < 2) return ret; @@@ -951,7 -945,7 +951,7 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", - ethhdr->h_source, vid, hw_src, hw_dst); + ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst); return 1; }
@@@ -1073,6 -1067,10 +1073,10 @@@ void batadv_bla_update_orig_address(str group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); bat_priv->bla.claim_dest.group = group;
+ /* purge everything when bridge loop avoidance is turned off */ + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + oldif = NULL; + if (!oldif) { batadv_bla_purge_claims(bat_priv, NULL, 1); batadv_bla_purge_backbone_gw(bat_priv, 1); @@@ -1364,7 -1362,7 +1368,7 @@@ int batadv_bla_is_backbone_gw(struct sk struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; struct batadv_bla_backbone_gw *backbone_gw; - short vid = -1; + unsigned short vid = BATADV_NO_FLAGS;
if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) return 0; @@@ -1381,7 -1379,6 +1385,7 @@@
vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size); vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + vid |= BATADV_VLAN_HAS_TAG; }
/* see if this originator is a backbone gw for this VLAN */ @@@ -1431,15 -1428,15 +1435,15 @@@ void batadv_bla_free(struct batadv_pri * returns 1, otherwise it returns 0 and the caller shall further * process the skb. */ -int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, - bool is_bcast) +int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid, bool is_bcast) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; int ret;
- ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb);
primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) @@@ -1526,8 -1523,7 +1530,8 @@@ out * returns 1, otherwise it returns 0 and the caller shall further * process the skb. */ -int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) +int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; @@@ -1547,7 -1543,7 +1551,7 @@@ if (batadv_bla_process_claim(bat_priv, primary_if, skb)) goto handled;
- ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb);
if (unlikely(atomic_read(&bat_priv->bla.num_requests))) /* don't allow broadcasts while requests are in flight */ @@@ -1631,8 -1627,8 +1635,8 @@@ int batadv_bla_claim_table_seq_print_te hlist_for_each_entry_rcu(claim, head, hash_entry) { is_own = batadv_compare_eth(claim->backbone_gw->orig, primary_addr); - seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n", - claim->addr, claim->vid, + seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", + claim->addr, BATADV_PRINT_VID(claim->vid), claim->backbone_gw->orig, (is_own ? 'x' : ' '), claim->backbone_gw->crc); @@@ -1684,10 -1680,10 +1688,10 @@@ int batadv_bla_backbone_table_seq_print if (is_own) continue;
- seq_printf(seq, - " * %pM on % 5d % 4i.%03is (%#.4x)\n", - backbone_gw->orig, backbone_gw->vid, - secs, msecs, backbone_gw->crc); + seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n", + backbone_gw->orig, + BATADV_PRINT_VID(backbone_gw->vid), secs, + msecs, backbone_gw->crc); } rcu_read_unlock(); } diff --combined net/bridge/br_multicast.c index 37a4676,d6448e3..31952a1 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@@ -23,7 -23,6 +23,7 @@@ #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/timer.h> +#include <linux/inetdevice.h> #include <net/ip.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> @@@ -382,8 -381,7 +382,8 @@@ static struct sk_buff *br_ip4_multicast iph->frag_off = htons(IP_DF); iph->ttl = 1; iph->protocol = IPPROTO_IGMP; - iph->saddr = 0; + iph->saddr = br->multicast_query_use_ifaddr ? + inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0; iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); ((u8 *)&iph[1])[0] = IPOPT_RA; ((u8 *)&iph[1])[1] = 4; @@@ -467,8 -465,9 +467,9 @@@ static struct sk_buff *br_ip6_multicast skb_set_transport_header(skb, skb->len); mldq = (struct mld_msg *) icmp6_hdr(skb);
- interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : - br->multicast_query_response_interval; + interval = ipv6_addr_any(group) ? + br->multicast_query_response_interval : + br->multicast_last_member_interval;
mldq->mld_type = ICMPV6_MGM_QUERY; mldq->mld_code = 0; @@@ -617,6 -616,8 +618,6 @@@ rehash
mp->br = br; mp->addr = *group; - setup_timer(&mp->timer, br_multicast_group_expired, - (unsigned long)mp);
hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); mdb->size++; @@@ -654,6 -655,7 +655,6 @@@ static int br_multicast_add_group(struc struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; - unsigned long now = jiffies; int err;
spin_lock(&br->multicast_lock); @@@ -668,6 -670,7 +669,6 @@@
if (!port) { mp->mglist = true; - mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
@@@ -675,7 -678,7 +676,7 @@@ (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (p->port == port) - goto found; + goto out; if ((unsigned long)p->port < (unsigned long)port) break; } @@@ -686,6 -689,8 +687,6 @@@ rcu_assign_pointer(*pp, p); br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
-found: - mod_timer(&p->timer, now + br->multicast_membership_interval); out: err = 0;
@@@ -1125,10 -1130,6 +1126,10 @@@ static int br_ip4_multicast_query(struc if (!mp) goto out;
+ setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); + mod_timer(&mp->timer, now + br->multicast_membership_interval); + mp->timer_armed = true; + max_delay *= br->multicast_last_member_count;
if (mp->mglist && @@@ -1203,10 -1204,6 +1204,10 @@@ static int br_ip6_multicast_query(struc if (!mp) goto out;
+ setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); + mod_timer(&mp->timer, now + br->multicast_membership_interval); + mp->timer_armed = true; + max_delay *= br->multicast_last_member_count; if (mp->mglist && (timer_pending(&mp->timer) ? @@@ -1250,32 -1247,6 +1251,32 @@@ static void br_multicast_leave_group(st if (!mp) goto out;
+ if (br->multicast_querier && + !timer_pending(&br->multicast_querier_timer)) { + __br_multicast_send_query(br, port, &mp->addr); + + time = jiffies + br->multicast_last_member_count * + br->multicast_last_member_interval; + mod_timer(port ? &port->multicast_query_timer : + &br->multicast_query_timer, time); + + for (p = mlock_dereference(mp->ports, br); + p != NULL; + p = mlock_dereference(p->next, br)) { + if (p->port != port) + continue; + + if (!hlist_unhashed(&p->mglist) && + (timer_pending(&p->timer) ? + time_after(p->timer.expires, time) : + try_to_del_timer_sync(&p->timer) >= 0)) { + mod_timer(&p->timer, time); + } + + break; + } + } + if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { struct net_bridge_port_group __rcu **pp;
@@@ -1291,7 -1262,7 +1292,7 @@@ call_rcu_bh(&p->rcu, br_multicast_free_pg); br_mdb_notify(br->dev, port, group, RTM_DELMDB);
- if (!mp->ports && !mp->mglist && + if (!mp->ports && !mp->mglist && mp->timer_armed && netif_running(br->dev)) mod_timer(&mp->timer, jiffies); } @@@ -1303,12 -1274,30 +1304,12 @@@ br->multicast_last_member_interval;
if (!port) { - if (mp->mglist && + if (mp->mglist && mp->timer_armed && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, time) : try_to_del_timer_sync(&mp->timer) >= 0)) { mod_timer(&mp->timer, time); } - - goto out; - } - - for (p = mlock_dereference(mp->ports, br); - p != NULL; - p = mlock_dereference(p->next, br)) { - if (p->port != port) - continue; - - if (!hlist_unhashed(&p->mglist) && - (timer_pending(&p->timer) ? - time_after(p->timer.expires, time) : - try_to_del_timer_sync(&p->timer) >= 0)) { - mod_timer(&p->timer, time); - } - - break; }
out: @@@ -1630,7 -1619,6 +1631,7 @@@ void br_multicast_init(struct net_bridg
br->multicast_router = 1; br->multicast_querier = 0; + br->multicast_query_use_ifaddr = 0; br->multicast_last_member_count = 2; br->multicast_startup_query_count = 2;
@@@ -1684,7 -1672,6 +1685,7 @@@ void br_multicast_stop(struct net_bridg hlist_for_each_entry_safe(mp, n, &mdb->mhash[i], hlist[ver]) { del_timer(&mp->timer); + mp->timer_armed = false; call_rcu_bh(&mp->rcu, br_multicast_free_group); } } diff --combined net/core/ethtool.c index cd23d31,ce91766..9255bbd --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@@ -60,10 -60,10 +60,10 @@@ static const char netdev_features_strin [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", [NETIF_F_HIGHDMA_BIT] = "highdma", [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", - [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-ctag-hw-insert", + [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert",
- [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-ctag-hw-parse", - [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter", + [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse", + [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter", [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert", [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse", [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter", @@@ -82,7 -82,6 +82,7 @@@ [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", + [NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation",
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", @@@ -1414,7 -1413,7 +1414,7 @@@ static int ethtool_get_module_eeprom(st modinfo.eeprom_len); }
-/* The main entry point in this file. Called from net/core/dev.c */ +/* The main entry point in this file. Called from net/core/dev_ioctl.c */
int dev_ethtool(struct net *net, struct ifreq *ifr) { diff --combined net/ipv4/ip_tunnel.c index 7c79cf8,7fa8f08..e189db4 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@@ -487,7 -487,7 +487,7 @@@ drop EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, - const struct iphdr *tnl_params) + const struct iphdr *tnl_params, const u8 protocol) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *inner_iph; @@@ -670,7 -670,7 +670,7 @@@ iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; - iph->protocol = tnl_params->protocol; + iph->protocol = protocol; iph->tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); iph->daddr = fl4.daddr; iph->saddr = fl4.saddr; @@@ -853,7 -853,7 +853,7 @@@ void ip_tunnel_dellink(struct net_devic } EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
- int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, + int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, struct rtnl_link_ops *ops, char *devname) { struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); @@@ -899,7 -899,7 +899,7 @@@ static void ip_tunnel_destroy(struct ip unregister_netdevice_queue(itn->fb_tunnel_dev, head); }
- void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn) + void ip_tunnel_delete_net(struct ip_tunnel_net *itn) { LIST_HEAD(list);
diff --combined net/ipv6/ndisc.c index 781dd3c,ca4ffcc..b3b5730 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@@ -693,7 -693,7 +693,7 @@@ static void ndisc_recv_ns(struct sk_buf const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; - u32 ndoptlen = skb->tail - (skb->transport_header + + u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) + offsetof(struct nd_msg, opt)); struct ndisc_options ndopts; struct net_device *dev = skb->dev; @@@ -853,7 -853,7 +853,7 @@@ static void ndisc_recv_na(struct sk_buf const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; u8 *lladdr = NULL; - u32 ndoptlen = skb->tail - (skb->transport_header + + u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) + offsetof(struct nd_msg, opt)); struct ndisc_options ndopts; struct net_device *dev = skb->dev; @@@ -1069,8 -1069,7 +1069,8 @@@ static void ndisc_router_discovery(stru
__u8 * opt = (__u8 *)(ra_msg + 1);
- optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg); + optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) - + sizeof(struct ra_msg);
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { ND_PRINTK(2, warn, "RA: source address is not link-local\n"); @@@ -1347,7 -1346,7 +1347,7 @@@ static void ndisc_redirect_rcv(struct s u8 *hdr; struct ndisc_options ndopts; struct rd_msg *msg = (struct rd_msg *)skb_transport_header(skb); - u32 ndoptlen = skb->tail - (skb->transport_header + + u32 ndoptlen = skb_tail_pointer(skb) - (skb_transport_header(skb) + offsetof(struct rd_msg, opt));
#ifdef CONFIG_IPV6_NDISC_NODETYPE @@@ -1494,7 -1493,7 +1494,7 @@@ void ndisc_send_redirect(struct sk_buf */
if (ha) - ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha); + ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha);
/* * build redirect option and copy skb over to the new packet. @@@ -1569,7 -1568,7 +1569,7 @@@ int ndisc_rcv(struct sk_buff *skb
static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct inet6_dev *idev;
diff --combined net/mac80211/cfg.c index 3062210,4fdb306e..a1c6e1c --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@@ -73,19 -73,16 +73,19 @@@ static int ieee80211_change_iface(struc struct ieee80211_local *local = sdata->local;
if (ieee80211_sdata_running(sdata)) { + u32 mask = MONITOR_FLAG_COOK_FRAMES | + MONITOR_FLAG_ACTIVE; + /* - * Prohibit MONITOR_FLAG_COOK_FRAMES to be - * changed while the interface is up. + * Prohibit MONITOR_FLAG_COOK_FRAMES and + * MONITOR_FLAG_ACTIVE to be changed while the + * interface is up. * Else we would need to add a lot of cruft * to update everything: * cooked_mntrs, monitor and all fif_* counters * reconfigure hardware */ - if ((*flags & MONITOR_FLAG_COOK_FRAMES) != - (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) + if ((*flags & mask) != (sdata->u.mntr_flags & mask)) return -EBUSY;
ieee80211_adjust_monitor_flags(sdata, -1); @@@ -447,7 -444,7 +447,7 @@@ static void sta_set_sinfo(struct sta_in struct ieee80211_local *local = sdata->local; struct timespec uptime; u64 packets = 0; - int ac; + int i, ac;
sinfo->generation = sdata->local->sta_generation;
@@@ -491,17 -488,6 +491,17 @@@ sinfo->signal = (s8)sta->last_signal; sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); } + if (sta->chains) { + sinfo->filled |= STATION_INFO_CHAIN_SIGNAL | + STATION_INFO_CHAIN_SIGNAL_AVG; + + sinfo->chains = sta->chains; + for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { + sinfo->chain_signal[i] = sta->chain_signal_last[i]; + sinfo->chain_signal_avg[i] = + (s8) -ewma_read(&sta->chain_signal_avg[i]); + } + }
sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate); sta_set_rate_info_rx(sta, &sinfo->rxrate); @@@ -742,7 -728,7 +742,7 @@@ static void ieee80211_get_et_strings(st
if (sset == ETH_SS_STATS) { sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); - memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats); + memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats); } drv_get_et_strings(sdata, sset, &(data[sz_sta_stats])); } @@@ -1071,6 -1057,12 +1071,12 @@@ static int ieee80211_stop_ap(struct wip clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+ if (sdata->wdev.cac_started) { + cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); + cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED, + GFP_KERNEL); + } + drv_stop_ap(sdata->local, sdata);
/* free all potentially still buffered bcast frames */ @@@ -1749,7 -1741,6 +1755,7 @@@ static int copy_mesh_setup(struct ieee8 ifmsh->mesh_pp_id = setup->path_sel_proto; ifmsh->mesh_pm_id = setup->path_metric; ifmsh->user_mpm = setup->user_mpm; + ifmsh->mesh_auth_id = setup->auth_id; ifmsh->security = IEEE80211_MESH_SEC_NONE; if (setup->is_authenticated) ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; @@@ -2321,7 -2312,7 +2327,7 @@@ int __ieee80211_request_smps(struct iee enum ieee80211_smps_mode old_req; int err;
- lockdep_assert_held(&sdata->u.mgd.mtx); + lockdep_assert_held(&sdata->wdev.mtx);
old_req = sdata->u.mgd.req_smps; sdata->u.mgd.req_smps = smps_mode; @@@ -2378,9 -2369,9 +2384,9 @@@ static int ieee80211_set_power_mgmt(str local->dynamic_ps_forced_timeout = timeout;
/* no change, but if automatic follow powersave */ - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata);
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); diff --combined net/mac80211/ieee80211_i.h index 9eed6f1,9ca8e32..923e177 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@@ -394,6 -394,7 +394,6 @@@ struct ieee80211_if_managed bool nullfunc_failed; bool connection_loss;
- struct mutex mtx; struct cfg80211_bss *associated; struct ieee80211_mgd_auth_data *auth_data; struct ieee80211_mgd_assoc_data *assoc_data; @@@ -487,6 -488,8 +487,6 @@@ struct ieee80211_if_ibss { struct timer_list timer;
- struct mutex mtx; - unsigned long last_scan_completed;
u32 basic_rates; @@@ -577,6 -580,8 +577,6 @@@ struct ieee80211_if_mesh bool accepting_plinks; int num_gates; struct beacon_data __rcu *beacon; - /* just protects beacon updates for now */ - struct mutex mtx; const u8 *ie; u8 ie_len; enum { @@@ -773,26 -778,6 +773,26 @@@ struct ieee80211_sub_if_data *vif_to_sd return container_of(p, struct ieee80211_sub_if_data, vif); }
+static inline void sdata_lock(struct ieee80211_sub_if_data *sdata) + __acquires(&sdata->wdev.mtx) +{ + mutex_lock(&sdata->wdev.mtx); + __acquire(&sdata->wdev.mtx); +} + +static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata) + __releases(&sdata->wdev.mtx) +{ + mutex_unlock(&sdata->wdev.mtx); + __release(&sdata->wdev.mtx); +} + +static inline void +sdata_assert_lock(struct ieee80211_sub_if_data *sdata) +{ + lockdep_assert_held(&sdata->wdev.mtx); +} + static inline enum ieee80211_band ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) { @@@ -1512,15 -1497,19 +1512,16 @@@ static inline void ieee80211_tx_skb(str ieee80211_tx_skb_tid(sdata, skb, 7); }
- u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, + u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc); - static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action, + static inline void ieee802_11_parse_elems(const u8 *start, size_t len, + bool action, struct ieee802_11_elems *elems) { ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); }
-u32 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band); - void ieee80211_dynamic_ps_enable_work(struct work_struct *work); void ieee80211_dynamic_ps_disable_work(struct work_struct *work); void ieee80211_dynamic_ps_timer(unsigned long data); diff --combined net/mac80211/mlme.c index f44f4ca,741448b..118540b --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@@ -91,6 -91,41 +91,6 @@@ MODULE_PARM_DESC(probe_wait_ms #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
/* - * All cfg80211 functions have to be called outside a locked - * section so that they can acquire a lock themselves... This - * is much simpler than queuing up things in cfg80211, but we - * do need some indirection for that here. - */ -enum rx_mgmt_action { - /* no action required */ - RX_MGMT_NONE, - - /* caller must call cfg80211_send_deauth() */ - RX_MGMT_CFG80211_DEAUTH, - - /* caller must call cfg80211_send_disassoc() */ - RX_MGMT_CFG80211_DISASSOC, - - /* caller must call cfg80211_send_rx_auth() */ - RX_MGMT_CFG80211_RX_AUTH, - - /* caller must call cfg80211_send_rx_assoc() */ - RX_MGMT_CFG80211_RX_ASSOC, - - /* caller must call cfg80211_send_assoc_timeout() */ - RX_MGMT_CFG80211_ASSOC_TIMEOUT, - - /* used when a processed beacon causes a deauth */ - RX_MGMT_CFG80211_TX_DEAUTH, -}; - -/* utils */ -static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) -{ - lockdep_assert_held(&ifmgd->mtx); -} - -/* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only * reschedule it when it should fire _earlier_ than it was @@@ -100,14 -135,13 +100,14 @@@ * has happened -- the work that runs from this timer will * do that. */ -static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout) +static void run_again(struct ieee80211_sub_if_data *sdata, + unsigned long timeout) { - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
- if (!timer_pending(&ifmgd->timer) || - time_before(timeout, ifmgd->timer.expires)) - mod_timer(&ifmgd->timer, timeout); + if (!timer_pending(&sdata->u.mgd.timer) || + time_before(timeout, sdata->u.mgd.timer.expires)) + mod_timer(&sdata->u.mgd.timer, timeout); }
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) @@@ -618,7 -652,7 +618,7 @@@ static void ieee80211_send_assoc(struc struct ieee80211_channel *chan; u32 rates = 0;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); @@@ -928,7 -962,7 +928,7 @@@ static void ieee80211_chswitch_work(str if (!ieee80211_sdata_running(sdata)) return;
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) goto out;
@@@ -951,7 -985,7 +951,7 @@@ IEEE80211_QUEUE_STOP_REASON_CSA); out: ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); }
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) @@@ -1002,7 -1036,7 +1002,7 @@@ ieee80211_sta_process_chanswitch(struc const struct ieee80211_ht_operation *ht_oper; int secondary_channel_offset = -1;
- ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
if (!cbss) return; @@@ -1356,9 -1390,6 +1356,9 @@@ static bool ieee80211_powersave_allowed IEEE80211_STA_CONNECTION_POLL)) return false;
+ if (!sdata->vif.bss_conf.dtim_period) + return false; + rcu_read_lock(); sta = sta_info_get(sdata, mgd->bssid); if (sta) @@@ -1811,7 -1842,7 +1811,7 @@@ static void ieee80211_set_disassoc(stru struct ieee80211_local *local = sdata->local; u32 changed = 0;
- ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
if (WARN_ON_ONCE(tx && !frame_buf)) return; @@@ -2020,7 -2051,7 +2020,7 @@@ static void ieee80211_mgd_probe_ap_send }
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); - run_again(ifmgd, ifmgd->probe_timeout); + run_again(sdata, ifmgd->probe_timeout); if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ieee80211_flush_queues(sdata->local, sdata); } @@@ -2034,7 -2065,7 +2034,7 @@@ static void ieee80211_mgd_probe_ap(stru if (!ieee80211_sdata_running(sdata)) return;
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata);
if (!ifmgd->associated) goto out; @@@ -2088,7 -2119,7 +2088,7 @@@ ifmgd->probe_send_count = 0; ieee80211_mgd_probe_ap_send(sdata); out: - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); }
struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, @@@ -2104,7 -2135,7 +2104,7 @@@ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return NULL;
- ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
if (ifmgd->associated) cbss = ifmgd->associated; @@@ -2137,9 -2168,9 +2137,9 @@@ static void __ieee80211_disconnect(stru struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; }
@@@ -2150,9 -2181,13 +2150,9 @@@ ieee80211_wake_queues_by_reason(&sdata->local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); - mutex_unlock(&ifmgd->mtx);
- /* - * must be outside lock due to cfg80211, - * but that's not a problem. - */ cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); + sdata_unlock(sdata); }
static void ieee80211_beacon_connection_loss_work(struct work_struct *work) @@@ -2219,7 -2254,7 +2219,7 @@@ static void ieee80211_destroy_auth_data { struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
- lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata);
if (!assoc) { sta_info_destroy_addr(sdata, auth_data->bss->bssid); @@@ -2260,26 -2295,27 +2260,26 @@@ static void ieee80211_auth_challenge(st auth_data->key_idx, tx_flags); }
-static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 bssid[ETH_ALEN]; u16 auth_alg, auth_transaction, status_code; struct sta_info *sta;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (len < 24 + 6) - return RX_MGMT_NONE; + return;
if (!ifmgd->auth_data || ifmgd->auth_data->done) - return RX_MGMT_NONE; + return;
memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
if (!ether_addr_equal(bssid, mgmt->bssid)) - return RX_MGMT_NONE; + return;
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); @@@ -2291,15 -2327,14 +2291,15 @@@ mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, auth_transaction, ifmgd->auth_data->expected_transaction); - return RX_MGMT_NONE; + return; }
if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); ieee80211_destroy_auth_data(sdata, false); - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; }
switch (ifmgd->auth_data->algorithm) { @@@ -2312,20 -2347,20 +2312,20 @@@ if (ifmgd->auth_data->expected_transaction != 4) { ieee80211_auth_challenge(sdata, mgmt, len); /* need another frame */ - return RX_MGMT_NONE; + return; } break; default: WARN_ONCE(1, "invalid auth alg %d", ifmgd->auth_data->algorithm); - return RX_MGMT_NONE; + return; }
sdata_info(sdata, "authenticated\n"); ifmgd->auth_data->done = true; ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout);
if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && ifmgd->auth_data->expected_transaction != 2) { @@@ -2333,8 -2368,7 +2333,8 @@@ * Report auth frame to user space for processing since another * round of Authentication frames is still needed. */ - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; }
/* move station state to auth */ @@@ -2350,29 -2384,30 +2350,29 @@@ } mutex_unlock(&sdata->local->sta_mtx);
- return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; out_err: mutex_unlock(&sdata->local->sta_mtx); /* ignore frame -- wait for timeout */ }
-static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *bssid = NULL; u16 reason_code;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (len < 24 + 2) - return RX_MGMT_NONE; + return;
if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return;
bssid = ifmgd->associated->bssid;
@@@ -2383,24 -2418,25 +2383,24 @@@
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- return RX_MGMT_CFG80211_DEAUTH; + cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, len); }
-static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) +static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (len < 24 + 2) - return RX_MGMT_NONE; + return;
if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@@ -2409,7 -2445,7 +2409,7 @@@
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- return RX_MGMT_CFG80211_DISASSOC; + cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, len); }
static void ieee80211_get_rates(struct ieee80211_supported_band *sband, @@@ -2459,7 -2495,7 +2459,7 @@@ static void ieee80211_destroy_assoc_dat { struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
- lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata);
if (!assoc) { sta_info_destroy_addr(sdata, assoc_data->bss->bssid); @@@ -2486,8 -2522,11 +2486,11 @@@ static bool ieee80211_assoc_success(str u16 capab_info, aid; struct ieee802_11_elems elems; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + const struct cfg80211_bss_ies *bss_ies = NULL; + struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u32 changed = 0; int err; + bool ret;
/* AssocResp and ReassocResp have identical structure */
@@@ -2519,21 -2558,86 +2522,86 @@@ ifmgd->aid = aid;
/* + * Some APs are erroneously not including some information in their + * (re)association response frames. Try to recover by using the data + * from the beacon or probe response. This seems to afflict mobile + * 2G/3G/4G wifi routers, reported models include the "Onda PN51T", + * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device. + */ + if ((assoc_data->wmm && !elems.wmm_param) || + (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && + (!elems.ht_cap_elem || !elems.ht_operation)) || + (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && + (!elems.vht_cap_elem || !elems.vht_operation))) { + const struct cfg80211_bss_ies *ies; + struct ieee802_11_elems bss_elems; + + rcu_read_lock(); + ies = rcu_dereference(cbss->ies); + if (ies) + bss_ies = kmemdup(ies, sizeof(*ies) + ies->len, + GFP_ATOMIC); + rcu_read_unlock(); + if (!bss_ies) + return false; + + ieee802_11_parse_elems(bss_ies->data, bss_ies->len, + false, &bss_elems); + if (assoc_data->wmm && + !elems.wmm_param && bss_elems.wmm_param) { + elems.wmm_param = bss_elems.wmm_param; + sdata_info(sdata, + "AP bug: WMM param missing from AssocResp\n"); + } + + /* + * Also check if we requested HT/VHT, otherwise the AP doesn't + * have to include the IEs in the (re)association response. + */ + if (!elems.ht_cap_elem && bss_elems.ht_cap_elem && + !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { + elems.ht_cap_elem = bss_elems.ht_cap_elem; + sdata_info(sdata, + "AP bug: HT capability missing from AssocResp\n"); + } + if (!elems.ht_operation && bss_elems.ht_operation && + !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { + elems.ht_operation = bss_elems.ht_operation; + sdata_info(sdata, + "AP bug: HT operation missing from AssocResp\n"); + } + if (!elems.vht_cap_elem && bss_elems.vht_cap_elem && + !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { + elems.vht_cap_elem = bss_elems.vht_cap_elem; + sdata_info(sdata, + "AP bug: VHT capa missing from AssocResp\n"); + } + if (!elems.vht_operation && bss_elems.vht_operation && + !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { + elems.vht_operation = bss_elems.vht_operation; + sdata_info(sdata, + "AP bug: VHT operation missing from AssocResp\n"); + } + } + + /* * We previously checked these in the beacon/probe response, so * they should be present here. This is just a safety net. */ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) { sdata_info(sdata, - "HT AP is missing WMM params or HT capability/operation in AssocResp\n"); - return false; + "HT AP is missing WMM params or HT capability/operation\n"); + ret = false; + goto out; }
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && (!elems.vht_cap_elem || !elems.vht_operation)) { sdata_info(sdata, - "VHT AP is missing VHT capability/operation in AssocResp\n"); - return false; + "VHT AP is missing VHT capability/operation\n"); + ret = false; + goto out; }
mutex_lock(&sdata->local->sta_mtx); @@@ -2544,7 -2648,8 +2612,8 @@@ sta = sta_info_get(sdata, cbss->bssid); if (WARN_ON(!sta)) { mutex_unlock(&sdata->local->sta_mtx); - return false; + ret = false; + goto out; }
sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; @@@ -2597,7 -2702,8 +2666,8 @@@ sta->sta.addr); WARN_ON(__sta_info_destroy(sta)); mutex_unlock(&sdata->local->sta_mtx); - return false; + ret = false; + goto out; }
mutex_unlock(&sdata->local->sta_mtx); @@@ -2637,12 -2743,16 +2707,15 @@@ ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); ieee80211_sta_reset_beacon_monitor(sdata);
- return true; + ret = true; + out: + kfree(bss_ies); + return ret; }
-static enum rx_mgmt_action __must_check -ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len, - struct cfg80211_bss **bss) +static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; @@@ -2650,14 -2760,13 +2723,14 @@@ struct ieee802_11_elems elems; u8 *pos; bool reassoc; + struct cfg80211_bss *bss;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (!assoc_data) - return RX_MGMT_NONE; + return; if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid)) - return RX_MGMT_NONE; + return;
/* * AssocResp and ReassocResp have identical structure, so process both @@@ -2665,7 -2774,7 +2738,7 @@@ */
if (len < 24 + 6) - return RX_MGMT_NONE; + return;
reassoc = ieee80211_is_reassoc_req(mgmt->frame_control); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); @@@ -2692,23 -2801,22 +2765,23 @@@ assoc_data->timeout = jiffies + msecs_to_jiffies(ms); assoc_data->timeout_started = true; if (ms > IEEE80211_ASSOC_TIMEOUT) - run_again(ifmgd, assoc_data->timeout); - return RX_MGMT_NONE; + run_again(sdata, assoc_data->timeout); + return; }
- *bss = assoc_data->bss; + bss = assoc_data->bss;
if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied association (code=%d)\n", mgmt->sa, status_code); ieee80211_destroy_assoc_data(sdata, false); } else { - if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { + if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) { /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, false); - cfg80211_put_bss(sdata->local->hw.wiphy, *bss); - return RX_MGMT_CFG80211_ASSOC_TIMEOUT; + cfg80211_put_bss(sdata->local->hw.wiphy, bss); + cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); + return; } sdata_info(sdata, "associated\n");
@@@ -2720,7 -2828,7 +2793,7 @@@ ieee80211_destroy_assoc_data(sdata, true); }
- return RX_MGMT_CFG80211_RX_ASSOC; + cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, len); }
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, @@@ -2734,7 -2842,7 +2807,7 @@@ struct ieee80211_channel *channel; bool need_ps = false;
- lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata);
if ((sdata->u.mgd.associated && ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) || @@@ -2793,7 -2901,7 +2866,7 @@@ static void ieee80211_rx_mgmt_probe_res
ifmgd = &sdata->u.mgd;
- ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return; /* ignore ProbeResp to foreign address */ @@@ -2818,7 -2926,7 +2891,7 @@@ ifmgd->auth_data->tries = 0; ifmgd->auth_data->timeout = jiffies; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); } }
@@@ -2843,9 -2951,10 +2916,9 @@@ static const u64 care_about_ies (1ULL << WLAN_EID_HT_CAPABILITY) | (1ULL << WLAN_EID_HT_OPERATION);
-static enum rx_mgmt_action -ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len, - u8 *deauth_buf, struct ieee80211_rx_status *rx_status) +static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; @@@ -2860,25 -2969,24 +2933,25 @@@ u8 erp_value = 0; u32 ncrc; u8 *bssid; + u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
/* Process beacon from the current BSS */ baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; if (baselen > len) - return RX_MGMT_NONE; + return;
rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); - return RX_MGMT_NONE; + return; }
if (rx_status->freq != chanctx_conf->def.chan->center_freq) { rcu_read_unlock(); - return RX_MGMT_NONE; + return; } chan = chanctx_conf->def.chan; rcu_read_unlock(); @@@ -2905,13 -3013,13 +2978,13 @@@ /* continue assoc process */ ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; - run_again(ifmgd, ifmgd->assoc_data->timeout); - return RX_MGMT_NONE; + run_again(sdata, ifmgd->assoc_data->timeout); + return; }
if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return; bssid = ifmgd->associated->bssid;
/* Track average RSSI from the Beacon frames of the current AP */ @@@ -3057,7 -3165,7 +3130,7 @@@ }
if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) - return RX_MGMT_NONE; + return; ifmgd->beacon_crc = ncrc; ifmgd->beacon_crc_valid = true;
@@@ -3091,7 -3199,6 +3164,7 @@@ }
changed |= BSS_CHANGED_DTIM_PERIOD; + ieee80211_recalc_ps_vif(sdata); }
if (elems.erp_info) { @@@ -3113,9 -3220,7 +3186,9 @@@ ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, true, deauth_buf); - return RX_MGMT_CFG80211_TX_DEAUTH; + cfg80211_send_deauth(sdata->dev, deauth_buf, + sizeof(deauth_buf)); + return; }
if (sta && elems.opmode_notif) @@@ -3132,13 -3237,19 +3205,13 @@@ elems.pwr_constr_elem);
ieee80211_bss_info_change_notify(sdata, changed); - - return RX_MGMT_NONE; }
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; - struct cfg80211_bss *bss = NULL; - enum rx_mgmt_action rma = RX_MGMT_NONE; - u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; u16 fc; struct ieee802_11_elems elems; int ies_len; @@@ -3147,27 -3258,28 +3220,27 @@@ mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control);
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata);
switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: - rma = ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, - deauth_buf, rx_status); + ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_RESP: ieee80211_rx_mgmt_probe_resp(sdata, skb); break; case IEEE80211_STYPE_AUTH: - rma = ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: - rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DISASSOC: - rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: - rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss); + ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { @@@ -3213,7 -3325,34 +3286,7 @@@ } break; } - mutex_unlock(&ifmgd->mtx); - - switch (rma) { - case RX_MGMT_NONE: - /* no action */ - break; - case RX_MGMT_CFG80211_DEAUTH: - cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_DISASSOC: - cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_RX_AUTH: - cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_RX_ASSOC: - cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_ASSOC_TIMEOUT: - cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); - break; - case RX_MGMT_CFG80211_TX_DEAUTH: - cfg80211_send_deauth(sdata->dev, deauth_buf, - sizeof(deauth_buf)); - break; - default: - WARN(1, "unexpected: %d", rma); - } + sdata_unlock(sdata); }
static void ieee80211_sta_timer(unsigned long data) @@@ -3227,12 -3366,20 +3300,12 @@@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 *bssid, u8 reason, bool tx) { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, tx, frame_buf); - mutex_unlock(&ifmgd->mtx);
- /* - * must be outside lock due to cfg80211, - * but that's not a problem. - */ cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); - - mutex_lock(&ifmgd->mtx); }
static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) @@@ -3242,7 -3389,7 +3315,7 @@@ struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; u32 tx_flags = 0;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (WARN_ON_ONCE(!auth_data)) return -EINVAL; @@@ -3315,7 -3462,7 +3388,7 @@@ if (tx_flags == 0) { auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, auth_data->timeout); + run_again(sdata, auth_data->timeout); } else { auth_data->timeout_started = false; } @@@ -3328,7 -3475,7 +3401,7 @@@ static int ieee80211_do_assoc(struct ie struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; struct ieee80211_local *local = sdata->local;
- lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata);
assoc_data->tries++; if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { @@@ -3352,7 -3499,7 +3425,7 @@@ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; assoc_data->timeout_started = true; - run_again(&sdata->u.mgd, assoc_data->timeout); + run_again(sdata, assoc_data->timeout); } else { assoc_data->timeout_started = false; } @@@ -3377,7 -3524,7 +3450,7 @@@ void ieee80211_sta_work(struct ieee8021 struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata);
if (ifmgd->status_received) { __le16 fc = ifmgd->status_fc; @@@ -3389,7 -3536,7 +3462,7 @@@ if (status_acked) { ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); } else { ifmgd->auth_data->timeout = jiffies - 1; } @@@ -3400,7 -3547,7 +3473,7 @@@ if (status_acked) { ifmgd->assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT; - run_again(ifmgd, ifmgd->assoc_data->timeout); + run_again(sdata, ifmgd->assoc_data->timeout); } else { ifmgd->assoc_data->timeout = jiffies - 1; } @@@ -3423,10 -3570,12 +3496,10 @@@
ieee80211_destroy_auth_data(sdata, false);
- mutex_unlock(&ifmgd->mtx); cfg80211_send_auth_timeout(sdata->dev, bssid); - mutex_lock(&ifmgd->mtx); } } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout);
if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && time_after(jiffies, ifmgd->assoc_data->timeout)) { @@@ -3439,10 -3588,12 +3512,10 @@@
ieee80211_destroy_assoc_data(sdata, false);
- mutex_unlock(&ifmgd->mtx); cfg80211_send_assoc_timeout(sdata->dev, bssid); - mutex_lock(&ifmgd->mtx); } } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) - run_again(ifmgd, ifmgd->assoc_data->timeout); + run_again(sdata, ifmgd->assoc_data->timeout);
if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL) && @@@ -3476,7 -3627,7 +3549,7 @@@ false); } } else if (time_is_after_jiffies(ifmgd->probe_timeout)) - run_again(ifmgd, ifmgd->probe_timeout); + run_again(sdata, ifmgd->probe_timeout); else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { mlme_dbg(sdata, "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", @@@ -3505,7 -3656,7 +3578,7 @@@ } }
- mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); }
static void ieee80211_sta_bcn_mon_timer(unsigned long data) @@@ -3566,9 -3717,9 +3639,9 @@@ void ieee80211_sta_restart(struct ieee8 { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; }
@@@ -3579,10 -3730,10 +3652,10 @@@ ifmgd->associated->bssid, WLAN_REASON_UNSPECIFIED, true); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; } - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } #endif
@@@ -3614,6 -3765,8 +3687,6 @@@ void ieee80211_sta_setup_sdata(struct i ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len; ifmgd->p2p_noa_index = -1;
- mutex_init(&ifmgd->mtx); - if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; else @@@ -3969,6 -4122,8 +4042,6 @@@ int ieee80211_mgd_auth(struct ieee80211
/* try to authenticate/probe */
- mutex_lock(&ifmgd->mtx); - if ((ifmgd->auth_data && !ifmgd->auth_data->done) || ifmgd->assoc_data) { err = -EBUSY; @@@ -3988,8 -4143,8 +4061,8 @@@ WLAN_REASON_UNSPECIFIED, false, frame_buf);
- __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); }
sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@@ -4006,7 -4161,8 +4079,7 @@@
/* hold our own reference */ cfg80211_ref_bss(local->hw.wiphy, auth_data->bss); - err = 0; - goto out_unlock; + return 0;
err_clear: memset(ifmgd->bssid, 0, ETH_ALEN); @@@ -4014,6 -4170,9 +4087,6 @@@ ifmgd->auth_data = NULL; err_free: kfree(auth_data); - out_unlock: - mutex_unlock(&ifmgd->mtx); - return err; }
@@@ -4044,6 -4203,8 +4117,6 @@@ int ieee80211_mgd_assoc(struct ieee8021 assoc_data->ssid_len = ssidie[1]; rcu_read_unlock();
- mutex_lock(&ifmgd->mtx); - if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@@ -4051,8 -4212,8 +4124,8 @@@ WLAN_REASON_UNSPECIFIED, false, frame_buf);
- __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); }
if (ifmgd->auth_data && !ifmgd->auth_data->done) { @@@ -4246,7 -4407,7 +4319,7 @@@ } rcu_read_unlock();
- run_again(ifmgd, assoc_data->timeout); + run_again(sdata, assoc_data->timeout);
if (bss->corrupt_data) { char *corrupt_type = "data"; @@@ -4262,13 -4423,17 +4335,13 @@@ corrupt_type); }
- err = 0; - goto out; + return 0; err_clear: memset(ifmgd->bssid, 0, ETH_ALEN); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->assoc_data = NULL; err_free: kfree(assoc_data); - out: - mutex_unlock(&ifmgd->mtx); - return err; }
@@@ -4280,6 -4445,8 +4353,6 @@@ int ieee80211_mgd_deauth(struct ieee802 bool tx = !req->local_state_change; bool report_frame = false;
- mutex_lock(&ifmgd->mtx); - sdata_info(sdata, "deauthenticating from %pM by local choice (reason=%d)\n", req->bssid, req->reason_code); @@@ -4291,6 -4458,7 +4364,6 @@@ req->reason_code, tx, frame_buf); ieee80211_destroy_auth_data(sdata, false); - mutex_unlock(&ifmgd->mtx);
report_frame = true; goto out; @@@ -4302,11 -4470,12 +4375,11 @@@ req->reason_code, tx, frame_buf); report_frame = true; }
out: if (report_frame) - __cfg80211_send_deauth(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_send_deauth(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN);
return 0; } @@@ -4318,14 -4487,18 +4391,14 @@@ int ieee80211_mgd_disassoc(struct ieee8 u8 bssid[ETH_ALEN]; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
- mutex_lock(&ifmgd->mtx); - /* * cfg80211 should catch this ... but it's racy since * we can receive a disassoc frame, process it, hand it * to cfg80211 while that's in a locked section already * trying to tell us that the user wants to disconnect. */ - if (ifmgd->associated != req->bss) { - mutex_unlock(&ifmgd->mtx); + if (ifmgd->associated != req->bss) return -ENOLINK; - }
sdata_info(sdata, "disassociating from %pM by local choice (reason=%d)\n", @@@ -4335,9 -4508,10 +4408,9 @@@ ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, req->reason_code, !req->local_state_change, frame_buf); - mutex_unlock(&ifmgd->mtx);
- __cfg80211_send_disassoc(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_send_disassoc(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN);
return 0; } @@@ -4357,13 -4531,13 +4430,13 @@@ void ieee80211_mgd_stop(struct ieee8021 cancel_work_sync(&ifmgd->csa_connection_drop_work); cancel_work_sync(&ifmgd->chswitch_work);
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false); if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); del_timer_sync(&ifmgd->timer); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); }
void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, diff --combined net/mac80211/util.c index 89a8377,72e6292..c75d3db --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@@ -560,9 -560,6 +560,9 @@@ void ieee80211_iterate_active_interface list_for_each_entry(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: + if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)) + continue; + break; case NL80211_IFTYPE_AP_VLAN: continue; default: @@@ -601,9 -598,6 +601,9 @@@ void ieee80211_iterate_active_interface list_for_each_entry_rcu(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: + if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)) + continue; + break; case NL80211_IFTYPE_AP_VLAN: continue; default: @@@ -667,12 -661,12 +667,12 @@@ void ieee80211_queue_delayed_work(struc } EXPORT_SYMBOL(ieee80211_queue_delayed_work);
- u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, + u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc) { size_t left = len; - u8 *pos = start; + const u8 *pos = start; bool calc_crc = filter != 0; DECLARE_BITMAP(seen_elems, 256); const u8 *ie; @@@ -1078,6 -1072,32 +1078,6 @@@ void ieee80211_sta_def_wmm_params(struc ieee80211_set_wmm_default(sdata, true); }
-u32 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band) -{ - struct ieee80211_supported_band *sband; - struct ieee80211_rate *bitrates; - u32 mandatory_rates; - enum ieee80211_rate_flags mandatory_flag; - int i; - - sband = local->hw.wiphy->bands[band]; - if (WARN_ON(!sband)) - return 1; - - if (band == IEEE80211_BAND_2GHZ) - mandatory_flag = IEEE80211_RATE_MANDATORY_B; - else - mandatory_flag = IEEE80211_RATE_MANDATORY_A; - - bitrates = sband->bitrates; - mandatory_rates = 0; - for (i = 0; i < sband->n_bitrates; i++) - if (bitrates[i].flags & mandatory_flag) - mandatory_rates |= BIT(i); - return mandatory_rates; -} - void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *da, @@@ -1587,9 -1607,9 +1587,9 @@@ int ieee80211_reconfig(struct ieee80211 if (sdata->u.mgd.dtim_period) changed |= BSS_CHANGED_DTIM_PERIOD;
- mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); ieee80211_bss_info_change_notify(sdata, changed); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; diff --combined net/netfilter/ipvs/ip_vs_ctl.c index edb88fb,9e6c2a0..47e5108 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@@ -1487,9 -1487,9 +1487,9 @@@ ip_vs_forget_dev(struct ip_vs_dest *des * Currently only NETDEV_DOWN is handled to release refs to cached dsts */ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_service *svc; @@@ -1575,7 -1575,7 +1575,7 @@@ static int zero static int three = 3;
static int -proc_do_defense_mode(ctl_table *table, int write, +proc_do_defense_mode(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net = current->nsproxy->net_ns; @@@ -1596,7 -1596,7 +1596,7 @@@ }
static int -proc_do_sync_threshold(ctl_table *table, int write, +proc_do_sync_threshold(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; @@@ -1616,7 -1616,7 +1616,7 @@@ }
static int -proc_do_sync_mode(ctl_table *table, int write, +proc_do_sync_mode(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; @@@ -1634,7 -1634,7 +1634,7 @@@ }
static int -proc_do_sync_ports(ctl_table *table, int write, +proc_do_sync_ports(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; @@@ -1716,9 -1716,9 +1716,9 @@@ static struct ctl_table vs_vars[] = }, { .procname = "sync_qlen_max", - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "sync_sock_size", @@@ -2542,6 -2542,7 +2542,7 @@@ __ip_vs_get_dest_entries(struct net *ne struct ip_vs_dest *dest; struct ip_vs_dest_entry entry;
+ memset(&entry, 0, sizeof(entry)); list_for_each_entry(dest, &svc->destinations, n_list) { if (count >= get->num_dests) break; diff --combined net/netfilter/nfnetlink_queue_core.c index c011543,5352b2d..299a48a --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@@ -41,14 -41,6 +41,14 @@@
#define NFQNL_QMAX_DEFAULT 1024
+/* We're using struct nlattr which has 16bit nla_len. Note that nla_len + * includes the header length. Thus, the maximum packet length that we + * support is 65531 bytes. We send truncated packets if the specified length + * is larger than that. Userspace can check for presence of NFQA_CAP_LEN + * attribute to detect truncation. + */ +#define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN) + struct nfqnl_instance { struct hlist_node hlist; /* global list of queues */ struct rcu_head rcu; @@@ -130,7 -122,7 +130,7 @@@ instance_create(struct nfnl_queue_net * inst->queue_num = queue_num; inst->peer_portid = portid; inst->queue_maxlen = NFQNL_QMAX_DEFAULT; - inst->copy_range = 0xffff; + inst->copy_range = NFQNL_MAX_COPY_RANGE; inst->copy_mode = NFQNL_COPY_NONE; spin_lock_init(&inst->lock); INIT_LIST_HEAD(&inst->queue_list); @@@ -341,9 -333,10 +341,9 @@@ nfqnl_build_packet_message(struct nfqnl return NULL;
data_len = ACCESS_ONCE(queue->copy_range); - if (data_len == 0 || data_len > entskb->len) + if (data_len > entskb->len) data_len = entskb->len;
- if (!entskb->head_frag || skb_headlen(entskb) < L1_CACHE_BYTES || skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS) @@@ -472,8 -465,7 +472,8 @@@ if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0) goto nla_put_failure;
- if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) + if (cap_len > data_len && + nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) goto nla_put_failure;
if (nfqnl_put_packet_info(skb, entskb)) @@@ -517,6 -509,10 +517,6 @@@ __nfqnl_enqueue_packet(struct net *net } spin_lock_bh(&queue->lock);
- if (!queue->peer_portid) { - err = -EINVAL; - goto err_out_free_nskb; - } if (queue->queue_total >= queue->queue_maxlen) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; @@@ -641,9 -637,6 +641,6 @@@ nfqnl_enqueue_packet(struct nf_queue_en if (queue->copy_mode == NFQNL_COPY_NONE) return -EINVAL;
- if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb)) - return __nfqnl_enqueue_packet(net, queue, entry); - skb = entry->skb;
switch (entry->pf) { @@@ -655,6 -648,9 +652,9 @@@ break; }
+ if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) + return __nfqnl_enqueue_packet(net, queue, entry); + nf_bridge_adjust_skb_data(skb); segs = skb_gso_segment(skb, 0); /* Does not use PTR_ERR to limit the number of error codes that can be @@@ -735,8 -731,13 +735,8 @@@ nfqnl_set_mode(struct nfqnl_instance *q
case NFQNL_COPY_PACKET: queue->copy_mode = mode; - /* We're using struct nlattr which has 16bit nla_len. Note that - * nla_len includes the header length. Thus, the maximum packet - * length that we support is 65531 bytes. We send truncated - * packets if the specified length is larger than that. - */ - if (range > 0xffff - NLA_HDRLEN) - queue->copy_range = 0xffff - NLA_HDRLEN; + if (range == 0 || range > NFQNL_MAX_COPY_RANGE) + queue->copy_range = NFQNL_MAX_COPY_RANGE; else queue->copy_range = range; break; @@@ -799,7 -800,7 +799,7 @@@ static in nfqnl_rcv_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
/* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) diff --combined net/netlink/af_netlink.c index 8978755,57ee84d..275d901 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -371,7 -371,7 +371,7 @@@ static int netlink_mmap(struct file *fi err = 0; out: mutex_unlock(&nlk->pg_vec_lock); - return 0; + return err; }
static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) @@@ -750,10 -750,6 +750,10 @@@ static void netlink_skb_destructor(stru skb->head = NULL; } #endif + if (is_vmalloc_addr(skb->head)) { + vfree(skb->head); + skb->head = NULL; + } if (skb->sk != NULL) sock_rfree(skb); } @@@ -858,23 -854,16 +858,23 @@@ netlink_unlock_table(void wake_up(&nl_table_wait); }
+static bool netlink_compare(struct net *net, struct sock *sk) +{ + return net_eq(sock_net(sk), net); +} + static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) { - struct nl_portid_hash *hash = &nl_table[protocol].hash; + struct netlink_table *table = &nl_table[protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; struct sock *sk;
read_lock(&nl_table_lock); head = nl_portid_hashfn(hash, portid); sk_for_each(sk, head) { - if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { + if (table->compare(net, sk) && + (nlk_sk(sk)->portid == portid)) { sock_hold(sk); goto found; } @@@ -987,8 -976,7 +987,8 @@@ netlink_update_listeners(struct sock *s
static int netlink_insert(struct sock *sk, struct net *net, u32 portid) { - struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; + struct netlink_table *table = &nl_table[sk->sk_protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; int err = -EADDRINUSE; struct sock *osk; @@@ -998,8 -986,7 +998,8 @@@ head = nl_portid_hashfn(hash, portid); len = 0; sk_for_each(osk, head) { - if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) + if (table->compare(net, osk) && + (nlk_sk(osk)->portid == portid)) break; len++; } @@@ -1196,8 -1183,7 +1196,8 @@@ static int netlink_autobind(struct sock { struct sock *sk = sock->sk; struct net *net = sock_net(sk); - struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; + struct netlink_table *table = &nl_table[sk->sk_protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; struct sock *osk; s32 portid = task_tgid_vnr(current); @@@ -1209,7 -1195,7 +1209,7 @@@ retry netlink_table_grab(); head = nl_portid_hashfn(hash, portid); sk_for_each(osk, head) { - if (!net_eq(sock_net(osk), net)) + if (!table->compare(net, osk)) continue; if (nlk_sk(osk)->portid == portid) { /* Bind collision, search negative portid values. */ @@@ -1434,35 -1420,6 +1434,35 @@@ struct sock *netlink_getsockbyfilp(stru return sock; }
+static struct sk_buff *netlink_alloc_large_skb(unsigned int size) +{ + struct sk_buff *skb; + void *data; + + if (size <= NLMSG_GOODSIZE) + return alloc_skb(size, GFP_KERNEL); + + skb = alloc_skb_head(GFP_KERNEL); + if (skb == NULL) + return NULL; + + data = vmalloc(size); + if (data == NULL) + goto err; + + skb->head = data; + skb->data = data; + skb_reset_tail_pointer(skb); + skb->end = skb->tail + size; + skb->len = 0; + skb->destructor = netlink_skb_destructor; + + return skb; +err: + kfree_skb(skb); + return NULL; +} + /* * Attach a skb to a netlink socket. * The caller must hold a reference to the destination socket. On error, the @@@ -1553,7 -1510,7 +1553,7 @@@ static struct sk_buff *netlink_trim(str return skb;
delta = skb->end - skb->tail; - if (delta * 2 < skb->truesize) + if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize) return skb;
if (skb_shared(skb)) { @@@ -2139,7 -2096,7 +2139,7 @@@ static int netlink_sendmsg(struct kioc if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; - skb = alloc_skb(len, GFP_KERNEL); + skb = netlink_alloc_large_skb(len); if (skb == NULL) goto out;
@@@ -2328,8 -2285,6 +2328,8 @@@ __netlink_kernel_create(struct net *net if (cfg) { nl_table[unit].bind = cfg->bind; nl_table[unit].flags = cfg->flags; + if (cfg->compare) + nl_table[unit].compare = cfg->compare; } nl_table[unit].registered = 1; } else { @@@ -2752,7 -2707,6 +2752,7 @@@ static void *netlink_seq_next(struct se { struct sock *s; struct nl_seq_iter *iter; + struct net *net; int i, j;
++*pos; @@@ -2760,12 -2714,11 +2760,12 @@@ if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0);
+ net = seq_file_net(seq); iter = seq->private; s = v; do { s = sk_next(s); - } while (s && sock_net(s) != seq_file_net(seq)); + } while (s && !nl_table[s->sk_protocol].compare(net, s)); if (s) return s;
@@@ -2777,8 -2730,7 +2777,8 @@@
for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); - while (s && sock_net(s) != seq_file_net(seq)) + + while (s && !nl_table[s->sk_protocol].compare(net, s)) s = sk_next(s); if (s) { iter->link = i; @@@ -2971,8 -2923,6 +2971,8 @@@ static int __init netlink_proto_init(vo hash->shift = 0; hash->mask = 0; hash->rehash_time = jiffies; + + nl_table[i].compare = netlink_compare; }
netlink_add_usersock_entry(); diff --combined net/packet/af_packet.c index 79fe632,20a1bd0..4b66c75 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@@ -2851,12 -2851,11 +2851,11 @@@ static int packet_getname_spkt(struct s return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET; + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) - strncpy(uaddr->sa_data, dev->name, 14); - else - memset(uaddr->sa_data, 0, 14); + strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr);
@@@ -3331,11 -3330,10 +3330,11 @@@ static int packet_getsockopt(struct soc }
-static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) +static int packet_notifier(struct notifier_block *this, + unsigned long msg, void *ptr) { struct sock *sk; - struct net_device *dev = data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev);
rcu_read_lock(); diff --combined net/sctp/socket.c index 75fe92a,6abb1ca..32db19b --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@@ -84,6 -84,11 +84,6 @@@ #include <net/sctp/sctp.h> #include <net/sctp/sm.h>
-/* WARNING: Please do not remove the SCTP_STATIC attribute to - * any of the functions below as they are used to export functions - * used by a project regression testsuite. - */ - /* Forward declarations for internal helper functions. */ static int sctp_writeable(struct sock *sk); static void sctp_wfree(struct sk_buff *skb); @@@ -274,7 -279,7 +274,7 @@@ static struct sctp_transport *sctp_addr * sockaddr_in6 [RFC 2553]), * addr_len - the size of the address structure. */ -SCTP_STATIC int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) +static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) { int retval = 0;
@@@ -328,7 -333,7 +328,7 @@@ static struct sctp_af *sctp_sockaddr_af }
/* Bind a local address either to an endpoint or to an association. */ -SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) +static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { struct net *net = sock_net(sk); struct sctp_sock *sp = sctp_sk(sk); @@@ -959,9 -964,9 +959,9 @@@ int sctp_asconf_mgmt(struct sctp_sock * * * Returns 0 if ok, <0 errno code on error. */ -SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, - struct sockaddr __user *addrs, - int addrs_size, int op) +static int sctp_setsockopt_bindx(struct sock* sk, + struct sockaddr __user *addrs, + int addrs_size, int op) { struct sockaddr *kaddrs; int err; @@@ -1307,7 -1312,7 +1307,7 @@@ out_free * * Returns >=0 if ok, <0 errno code on error. */ -SCTP_STATIC int __sctp_setsockopt_connectx(struct sock* sk, +static int __sctp_setsockopt_connectx(struct sock* sk, struct sockaddr __user *addrs, int addrs_size, sctp_assoc_t *assoc_id) @@@ -1345,9 -1350,9 +1345,9 @@@ * This is an older interface. It's kept for backward compatibility * to the option that doesn't provide association id. */ -SCTP_STATIC int sctp_setsockopt_connectx_old(struct sock* sk, - struct sockaddr __user *addrs, - int addrs_size) +static int sctp_setsockopt_connectx_old(struct sock* sk, + struct sockaddr __user *addrs, + int addrs_size) { return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); } @@@ -1358,9 -1363,9 +1358,9 @@@ * indication to the call. Error is always negative and association id is * always positive. */ -SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk, - struct sockaddr __user *addrs, - int addrs_size) +static int sctp_setsockopt_connectx(struct sock* sk, + struct sockaddr __user *addrs, + int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; @@@ -1381,9 -1386,9 +1381,9 @@@ * addrs_num structure member. That way we can re-use the existing * code. */ -SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len, - char __user *optval, - int __user *optlen) +static int sctp_getsockopt_connectx3(struct sock* sk, int len, + char __user *optval, + int __user *optlen) { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; @@@ -1459,7 -1464,7 +1459,7 @@@ * shutdown phase does not finish during this period, close() will * return but the graceful shutdown phase continues in the system. */ -SCTP_STATIC void sctp_close(struct sock *sk, long timeout) +static void sctp_close(struct sock *sk, long timeout) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; @@@ -1568,10 -1573,10 +1568,10 @@@ static int sctp_error(struct sock *sk, */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */
-SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); +static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *);
-SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t msg_len) +static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, + struct msghdr *msg, size_t msg_len) { struct net *net = sock_net(sk); struct sctp_sock *sp; @@@ -2029,9 -2034,9 +2029,9 @@@ static int sctp_skb_pull(struct sk_buf */ static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
-SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, - struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) +static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, + struct msghdr *msg, size_t len, int noblock, + int flags, int *addr_len) { struct sctp_ulpevent *event = NULL; struct sctp_sock *sp = sctp_sk(sk); @@@ -3560,8 -3565,8 +3560,8 @@@ static int sctp_setsockopt_paddr_thresh * optval - the buffer to store the value of the option. * optlen - the size of the buffer. */ -SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +static int sctp_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen) { int retval = 0;
@@@ -3720,8 -3725,8 +3720,8 @@@ out_nounlock * * len: the size of the address. */ -SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *addr, - int addr_len) +static int sctp_connect(struct sock *sk, struct sockaddr *addr, + int addr_len) { int err = 0; struct sctp_af *af; @@@ -3747,7 -3752,7 +3747,7 @@@ }
/* FIXME: Write comments. */ -SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags) +static int sctp_disconnect(struct sock *sk, int flags) { return -EOPNOTSUPP; /* STUB */ } @@@ -3759,7 -3764,7 +3759,7 @@@ * descriptor will be returned from accept() to represent the newly * formed association. */ -SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err) +static struct sock *sctp_accept(struct sock *sk, int flags, int *err) { struct sctp_sock *sp; struct sctp_endpoint *ep; @@@ -3812,7 -3817,7 +3812,7 @@@ out }
/* The SCTP ioctl handler. */ -SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) +static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) { int rc = -ENOTCONN;
@@@ -3854,9 -3859,10 +3854,9 @@@ out * initialized the SCTP-specific portion of the sock. * The sock structure should already be zero-filled memory. */ -SCTP_STATIC int sctp_init_sock(struct sock *sk) +static int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); - struct sctp_endpoint *ep; struct sctp_sock *sp;
SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk); @@@ -3965,10 -3971,11 +3965,10 @@@ * change the data structure relationships, this may still * be useful for storing pre-connect address information. */ - ep = sctp_endpoint_new(sk, GFP_KERNEL); - if (!ep) + sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); + if (!sp->ep) return -ENOMEM;
- sp->ep = ep; sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock); @@@ -3988,7 -3995,7 +3988,7 @@@ }
/* Cleanup any SCTP per socket resources. */ -SCTP_STATIC void sctp_destroy_sock(struct sock *sk) +static void sctp_destroy_sock(struct sock *sk) { struct sctp_sock *sp;
@@@ -3996,6 -4003,12 +3996,12 @@@
/* Release our hold on the endpoint. */ sp = sctp_sk(sk); + /* This could happen during socket init, thus we bail out + * early, since the rest of the below is not setup either. + */ + if (sp->ep == NULL) + return; + if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); @@@ -4023,7 -4036,7 +4029,7 @@@ * Disables further send and receive operations * and initiates the SCTP shutdown sequence. */ -SCTP_STATIC void sctp_shutdown(struct sock *sk, int how) +static void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; @@@ -5695,8 -5708,8 +5701,8 @@@ static int sctp_getsockopt_assoc_stats( return 0; }
-SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) +static int sctp_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen) { int retval = 0; int len; @@@ -6041,7 -6054,7 +6047,7 @@@ static int sctp_get_port(struct sock *s /* * Move a socket to LISTENING state. */ -SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog) +static int sctp_listen_start(struct sock *sk, int backlog) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_endpoint *ep = sp->ep; @@@ -6328,7 -6341,8 +6334,7 @@@ static int sctp_autobind(struct sock *s * msg_control * points here */ -SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg, - sctp_cmsgs_t *cmsgs) +static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) { struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; diff --combined net/wireless/nl80211.c index 31d265f,b14b7e3..ea74b9d --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -37,10 -37,10 +37,10 @@@ static void nl80211_post_doit(struct ge
/* the netlink family */ static struct genl_family nl80211_fam = { - .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ - .name = "nl80211", /* have users key off the name instead */ - .hdrsize = 0, /* no private header */ - .version = 1, /* no particular meaning now */ + .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ + .name = NL80211_GENL_NAME, /* have users key off the name instead */ + .hdrsize = 0, /* no private header */ + .version = 1, /* no particular meaning now */ .maxattr = NL80211_ATTR_MAX, .netnsok = true, .pre_doit = nl80211_pre_doit, @@@ -59,7 -59,7 +59,7 @@@ __cfg80211_wdev_from_attrs(struct net * int wiphy_idx = -1; int ifidx = -1;
- assert_cfg80211_lock(); + ASSERT_RTNL();
if (!have_ifidx && !have_wdev_id) return ERR_PTR(-EINVAL); @@@ -80,6 -80,7 +80,6 @@@ if (have_wdev_id && rdev->wiphy_idx != wiphy_idx) continue;
- mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { if (have_ifidx && wdev->netdev && wdev->netdev->ifindex == ifidx) { @@@ -91,6 -92,7 +91,6 @@@ break; } } - mutex_unlock(&rdev->devlist_mtx);
if (result) break; @@@ -107,7 -109,7 +107,7 @@@ __cfg80211_rdev_from_attrs(struct net * struct cfg80211_registered_device *rdev = NULL, *tmp; struct net_device *netdev;
- assert_cfg80211_lock(); + ASSERT_RTNL();
if (!attrs[NL80211_ATTR_WIPHY] && !attrs[NL80211_ATTR_IFINDEX] && @@@ -126,12 -128,14 +126,12 @@@ tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32); if (tmp) { /* make sure wdev exists */ - mutex_lock(&tmp->devlist_mtx); list_for_each_entry(wdev, &tmp->wdev_list, list) { if (wdev->identifier != (u32)wdev_id) continue; found = true; break; } - mutex_unlock(&tmp->devlist_mtx);
if (!found) tmp = NULL; @@@ -178,6 -182,19 +178,6 @@@ /* * This function returns a pointer to the driver * that the genl_info item that is passed refers to. - * If successful, it returns non-NULL and also locks - * the driver's mutex! - * - * This means that you need to call cfg80211_unlock_rdev() - * before being allowed to acquire &cfg80211_mutex! - * - * This is necessary because we need to lock the global - * mutex to get an item off the list safely, and then - * we lock the rdev mutex so it doesn't go away under us. - * - * We don't want to keep cfg80211_mutex locked - * for all the time in order to allow requests on - * other interfaces to go through at the same time. * * The result of this can be a PTR_ERR and hence must * be checked with IS_ERR() for errors. @@@ -185,7 -202,20 +185,7 @@@ static struct cfg80211_registered_device * cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info) { - struct cfg80211_registered_device *rdev; - - mutex_lock(&cfg80211_mutex); - rdev = __cfg80211_rdev_from_attrs(netns, info->attrs); - - /* if it is not an error we grab the lock on - * it to assure it won't be going away while - * we operate on it */ - if (!IS_ERR(rdev)) - mutex_lock(&rdev->mtx); - - mutex_unlock(&cfg80211_mutex); - - return rdev; + return __cfg80211_rdev_from_attrs(netns, info->attrs); }
/* policy for the attributes */ @@@ -348,7 -378,6 +348,7 @@@ static const struct nla_policy nl80211_ [NL80211_ATTR_MDID] = { .type = NLA_U16 }, [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, + [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 }, };
/* policy for the key attributes */ @@@ -426,6 -455,7 +426,6 @@@ static int nl80211_prepare_wdev_dump(st int err;
rtnl_lock(); - mutex_lock(&cfg80211_mutex);
if (!cb->args[0]) { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, @@@ -454,12 -484,14 +454,12 @@@ *rdev = wiphy_to_dev(wiphy); *wdev = NULL;
- mutex_lock(&(*rdev)->devlist_mtx); list_for_each_entry(tmp, &(*rdev)->wdev_list, list) { if (tmp->identifier == cb->args[1]) { *wdev = tmp; break; } } - mutex_unlock(&(*rdev)->devlist_mtx);
if (!*wdev) { err = -ENODEV; @@@ -467,14 -499,19 +467,14 @@@ } }
- cfg80211_lock_rdev(*rdev); - - mutex_unlock(&cfg80211_mutex); return 0; out_unlock: - mutex_unlock(&cfg80211_mutex); rtnl_unlock(); return err; }
static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev) { - cfg80211_unlock_rdev(rdev); rtnl_unlock(); }
@@@ -1527,12 -1564,17 +1527,18 @@@ static int nl80211_dump_wiphy(struct sk struct cfg80211_registered_device *dev; s64 filter_wiphy = -1; bool split = false; - struct nlattr **tb = nl80211_fam.attrbuf; + struct nlattr **tb; int res;
+ /* will be zeroed in nlmsg_parse() */ + tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL); + if (!tb) + return -ENOMEM; + - mutex_lock(&cfg80211_mutex); + rtnl_lock(); ++ res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, - tb, nl80211_fam.maxattr, nl80211_policy); + tb, NL80211_ATTR_MAX, nl80211_policy); if (res == 0) { split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP]; if (tb[NL80211_ATTR_WIPHY]) @@@ -1544,8 -1586,11 +1550,11 @@@ int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]);
netdev = dev_get_by_index(sock_net(skb->sk), ifidx); - if (!netdev) + if (!netdev) { - mutex_unlock(&cfg80211_mutex); ++ rtnl_unlock(); + kfree(tb); return -ENODEV; + } if (netdev->ieee80211_ptr) { dev = wiphy_to_dev( netdev->ieee80211_ptr->wiphy); @@@ -1554,6 -1599,7 +1563,7 @@@ dev_put(netdev); } } + kfree(tb);
list_for_each_entry(dev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) @@@ -1589,6 -1635,7 +1599,7 @@@ !skb->len && cb->min_dump_alloc < 4096) { cb->min_dump_alloc = 4096; - mutex_unlock(&cfg80211_mutex); ++ rtnl_unlock(); return 1; } idx--; @@@ -1597,7 -1644,7 +1608,7 @@@ } while (cb->args[1] > 0); break; } - mutex_unlock(&cfg80211_mutex); + rtnl_unlock();
cb->args[0] = idx;
@@@ -1752,6 -1799,7 +1763,6 @@@ static int __nl80211_set_channel(struc if (result) return result;
- mutex_lock(&rdev->devlist_mtx); switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: @@@ -1775,6 -1823,7 +1786,6 @@@ default: result = -EINVAL; } - mutex_unlock(&rdev->devlist_mtx);
return result; } @@@ -1823,8 -1872,6 +1834,8 @@@ static int nl80211_set_wiphy(struct sk_ u32 frag_threshold = 0, rts_threshold = 0; u8 coverage_class = 0;
+ ASSERT_RTNL(); + /* * Try to find the wiphy and netdev. Normally this * function shouldn't need the netdev, but this is @@@ -1834,25 -1881,31 +1845,25 @@@ * also passed a netdev to set_wiphy, so that it is * possible to let that go to the right netdev! */ - mutex_lock(&cfg80211_mutex);
if (info->attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
netdev = dev_get_by_index(genl_info_net(info), ifindex); - if (netdev && netdev->ieee80211_ptr) { + if (netdev && netdev->ieee80211_ptr) rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy); - mutex_lock(&rdev->mtx); - } else + else netdev = NULL; }
if (!netdev) { rdev = __cfg80211_rdev_from_attrs(genl_info_net(info), info->attrs); - if (IS_ERR(rdev)) { - mutex_unlock(&cfg80211_mutex); + if (IS_ERR(rdev)) return PTR_ERR(rdev); wdev = NULL; netdev = NULL; result = 0; - - mutex_lock(&rdev->mtx); } else wdev = netdev->ieee80211_ptr;
@@@ -1865,6 -1918,8 +1876,6 @@@ result = cfg80211_dev_rename( rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME]));
- mutex_unlock(&cfg80211_mutex); - if (result) goto bad_res;
@@@ -2071,6 -2126,7 +2082,6 @@@ }
bad_res: - mutex_unlock(&rdev->mtx); if (netdev) dev_put(netdev); return result; @@@ -2168,7 -2224,7 +2179,7 @@@ static int nl80211_dump_interface(struc struct cfg80211_registered_device *rdev; struct wireless_dev *wdev;
- mutex_lock(&cfg80211_mutex); + rtnl_lock(); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; @@@ -2178,6 -2234,7 +2189,6 @@@ } if_idx = 0;
- mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->wdev_list, list) { if (if_idx < if_start) { if_idx++; @@@ -2186,15 -2243,17 +2197,15 @@@ if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev) < 0) { - mutex_unlock(&rdev->devlist_mtx); goto out; } if_idx++; } - mutex_unlock(&rdev->devlist_mtx);
wp_idx++; } out: - mutex_unlock(&cfg80211_mutex); + rtnl_unlock();
cb->args[0] = wp_idx; cb->args[1] = if_idx; @@@ -2227,7 -2286,6 +2238,7 @@@ static const struct nla_policy mntr_fla [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG }, + [NL80211_MNTR_FLAG_ACTIVE] = { .type = NLA_FLAG }, };
static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) @@@ -2339,10 -2397,6 +2350,10 @@@ static int nl80211_set_interface(struc change = true; }
+ if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) && + !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) + return -EOPNOTSUPP; + if (change) err = cfg80211_change_iface(rdev, dev, ntype, flags, ¶ms); else @@@ -2400,11 -2454,6 +2411,11 @@@ static int nl80211_new_interface(struc err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, &flags); + + if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) && + !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) + return -EOPNOTSUPP; + wdev = rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL80211_ATTR_IFNAME]), type, err ? NULL : &flags, ¶ms); @@@ -2437,9 -2486,11 +2448,9 @@@ INIT_LIST_HEAD(&wdev->mgmt_registrations); spin_lock_init(&wdev->mgmt_registrations_lock);
wdev->identifier = ++rdev->wdev_id; list_add_rcu(&wdev->list, &rdev->wdev_list); rdev->devlist_generation++; - mutex_unlock(&rdev->devlist_mtx); break; default: break; @@@ -2948,6 -2999,8 +2959,6 @@@ static bool nl80211_get_ap_channel(stru struct wireless_dev *wdev; bool ret = false;
- mutex_lock(&rdev->devlist_mtx); - list_for_each_entry(wdev, &rdev->wdev_list, list) { if (wdev->iftype != NL80211_IFTYPE_AP && wdev->iftype != NL80211_IFTYPE_P2P_GO) @@@ -2961,6 -3014,8 +2972,6 @@@ break; }
- mutex_unlock(&rdev->devlist_mtx); - return ret; }
@@@ -3122,10 -3177,13 +3133,10 @@@ static int nl80211_start_ap(struct sk_b params.radar_required = true; }
- mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, params.chandef.chan, CHAN_MODE_SHARED, radar_detect_width); - mutex_unlock(&rdev->devlist_mtx); - if (err) return err;
@@@ -3325,32 -3383,6 +3336,32 @@@ static bool nl80211_put_sta_rate(struc return true; }
+static bool nl80211_put_signal(struct sk_buff *msg, u8 mask, s8 *signal, + int id) +{ + void *attr; + int i = 0; + + if (!mask) + return true; + + attr = nla_nest_start(msg, id); + if (!attr) + return false; + + for (i = 0; i < IEEE80211_MAX_CHAINS; i++) { + if (!(mask & BIT(i))) + continue; + + if (nla_put_u8(msg, i, signal[i])) + return false; + } + + nla_nest_end(msg, attr); + + return true; +} + static int nl80211_send_station(struct sk_buff *msg, u32 portid, u32 seq, int flags, struct cfg80211_registered_device *rdev, @@@ -3422,18 -3454,6 +3433,18 @@@ default: break; } + if (sinfo->filled & STATION_INFO_CHAIN_SIGNAL) { + if (!nl80211_put_signal(msg, sinfo->chains, + sinfo->chain_signal, + NL80211_STA_INFO_CHAIN_SIGNAL)) + goto nla_put_failure; + } + if (sinfo->filled & STATION_INFO_CHAIN_SIGNAL_AVG) { + if (!nl80211_put_signal(msg, sinfo->chains, + sinfo->chain_signal_avg, + NL80211_STA_INFO_CHAIN_SIGNAL_AVG)) + goto nla_put_failure; + } if (sinfo->filled & STATION_INFO_TX_BITRATE) { if (!nl80211_put_sta_rate(msg, &sinfo->txrate, NL80211_STA_INFO_TX_BITRATE)) @@@ -3821,8 -3841,6 +3832,8 @@@ static int nl80211_set_station_tdls(str struct station_parameters *params) { /* Dummy STA entry gets updated once the peer capabilities are known */ + if (info->attrs[NL80211_ATTR_PEER_AID]) + params->aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params->ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); @@@ -3963,8 -3981,7 +3974,8 @@@ static int nl80211_new_station(struct s if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) return -EINVAL;
- if (!info->attrs[NL80211_ATTR_STA_AID]) + if (!info->attrs[NL80211_ATTR_STA_AID] && + !info->attrs[NL80211_ATTR_PEER_AID]) return -EINVAL;
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); @@@ -3975,10 -3992,7 +3986,10 @@@ params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
- params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); + if (info->attrs[NL80211_ATTR_STA_AID]) + params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); + else + params.aid = nla_get_u16(info->attrs[NL80211_ATTR_PEER_AID]); if (!params.aid || params.aid > IEEE80211_MAX_AID) return -EINVAL;
@@@ -4627,7 -4641,6 +4638,7 @@@ static const struct nla_polic [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, + [NL80211_MESH_SETUP_AUTH_PROTOCOL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, @@@ -4813,13 -4826,6 +4824,13 @@@ static int nl80211_parse_mesh_setup(str if (setup->is_secure) setup->user_mpm = true;
+ if (tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]) { + if (!setup->user_mpm) + return -EINVAL; + setup->auth_id = + nla_get_u8(tb[NL80211_MESH_SETUP_AUTH_PROTOCOL]); + } + return 0; }
@@@ -4862,13 -4868,18 +4873,13 @@@ static int nl80211_get_reg(struct sk_bu void *hdr = NULL; struct nlattr *nl_reg_rules; unsigned int i; - int err = -EINVAL; - - mutex_lock(&cfg80211_mutex);
if (!cfg80211_regdomain) - goto out; + return -EINVAL;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) { - err = -ENOBUFS; - goto out; - } + if (!msg) + return -ENOBUFS;
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0, NL80211_CMD_GET_REG); @@@ -4927,7 -4938,8 +4938,7 @@@ nla_nest_end(msg, nl_reg_rules);
genlmsg_end(msg, hdr); - err = genlmsg_reply(msg, info); - goto out; + return genlmsg_reply(msg, info);
nla_put_failure_rcu: rcu_read_unlock(); @@@ -4935,7 -4947,10 +4946,7 @@@ nla_put_failure genlmsg_cancel(msg, hdr); put_failure: nlmsg_free(msg); - err = -EMSGSIZE; -out: - mutex_unlock(&cfg80211_mutex); - return err; + return -EMSGSIZE; }
static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) @@@ -5001,9 -5016,12 +5012,9 @@@ } }
r = set_regdom(rd); /* set_regdom took ownership */ rd = NULL; - mutex_unlock(&cfg80211_mutex);
bad_reg: kfree(rd); @@@ -5053,6 -5071,7 +5064,6 @@@ static int nl80211_trigger_scan(struct if (!rdev->ops->scan) return -EOPNOTSUPP;
- mutex_lock(&rdev->sched_scan_mtx); if (rdev->scan_req) { err = -EBUSY; goto unlock; @@@ -5238,6 -5257,7 +5249,6 @@@ }
unlock: - mutex_unlock(&rdev->sched_scan_mtx); return err; }
@@@ -5309,6 -5329,8 +5320,6 @@@ static int nl80211_start_sched_scan(str if (ie_len > wiphy->max_sched_scan_ie_len) return -EINVAL;
- mutex_lock(&rdev->sched_scan_mtx); - if (rdev->sched_scan_req) { err = -EINPROGRESS; goto out; @@@ -5476,6 -5498,7 +5487,6 @@@ out_free: kfree(request); out: - mutex_unlock(&rdev->sched_scan_mtx); return err; }
@@@ -5483,12 -5506,17 +5494,12 @@@ static int nl80211_stop_sched_scan(stru struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; - int err;
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP;
- mutex_lock(&rdev->sched_scan_mtx); - err = __cfg80211_stop_sched_scan(rdev, false); - mutex_unlock(&rdev->sched_scan_mtx); - - return err; + return __cfg80211_stop_sched_scan(rdev, false); }
static int nl80211_start_radar_detection(struct sk_buff *skb, @@@ -5520,11 -5548,12 +5531,11 @@@ if (!rdev->ops->start_radar_detection) return -EOPNOTSUPP;
- mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype, chandef.chan, CHAN_MODE_SHARED, BIT(chandef.width)); if (err) - goto err_locked; + return err;
err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef); if (!err) { @@@ -5532,6 -5561,9 +5543,6 @@@ wdev->cac_started = true; wdev->cac_start_time = jiffies; } -err_locked: - mutex_unlock(&rdev->devlist_mtx); - return err; }
@@@ -5914,13 -5946,10 +5925,13 @@@ static int nl80211_authenticate(struct if (local_state_change) return 0;
- return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, - ssid, ssid_len, ie, ie_len, - key.p.key, key.p.key_len, key.idx, - sae_data, sae_data_len); + wdev_lock(dev->ieee80211_ptr); + err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, + ssid, ssid_len, ie, ie_len, + key.p.key, key.p.key_len, key.idx, + sae_data, sae_data_len); + wdev_unlock(dev->ieee80211_ptr); + return err; }
static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, @@@ -6087,12 -6116,9 +6098,12 @@@ static int nl80211_associate(struct sk_ }
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1); - if (!err) + if (!err) { + wdev_lock(dev->ieee80211_ptr); err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, ssid, ssid_len, &req); + wdev_unlock(dev->ieee80211_ptr); + }
return err; } @@@ -6102,7 -6128,7 +6113,7 @@@ static int nl80211_deauthenticate(struc struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; - int ie_len = 0; + int ie_len = 0, err; u16 reason_code; bool local_state_change;
@@@ -6137,11 -6163,8 +6148,11 @@@
local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
- return cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code, - local_state_change); + wdev_lock(dev->ieee80211_ptr); + err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code, + local_state_change); + wdev_unlock(dev->ieee80211_ptr); + return err; }
static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) @@@ -6149,7 -6172,7 +6160,7 @@@ struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; - int ie_len = 0; + int ie_len = 0, err; u16 reason_code; bool local_state_change;
@@@ -6184,11 -6207,8 +6195,11 @@@
local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
- return cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code, - local_state_change); + wdev_lock(dev->ieee80211_ptr); + err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code, + local_state_change); + wdev_unlock(dev->ieee80211_ptr); + return err; }
static bool @@@ -6406,8 -6426,6 +6417,8 @@@ static int nl80211_testmode_dump(struc void *data = NULL; int data_len = 0;
+ rtnl_lock(); + if (cb->args[0]) { /* * 0 is a valid index, but not valid for args[0], @@@ -6419,16 -6437,18 +6430,16 @@@ nl80211_fam.attrbuf, nl80211_fam.maxattr, nl80211_policy); if (err) - return err; + goto out_err;
rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), nl80211_fam.attrbuf); if (IS_ERR(rdev)) { - mutex_unlock(&cfg80211_mutex); - return PTR_ERR(rdev); + err = PTR_ERR(rdev); + goto out_err; } phy_idx = rdev->wiphy_idx; rdev = NULL; - mutex_unlock(&cfg80211_mutex);
if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]) cb->args[1] = @@@ -6440,11 -6460,14 +6451,11 @@@ data_len = nla_len((void *)cb->args[1]); }
- mutex_lock(&cfg80211_mutex); rdev = cfg80211_rdev_by_wiphy_idx(phy_idx); if (!rdev) { - mutex_unlock(&cfg80211_mutex); - return -ENOENT; + err = -ENOENT; + goto out_err; } - cfg80211_lock_rdev(rdev); - mutex_unlock(&cfg80211_mutex);
if (!rdev->ops->testmode_dump) { err = -EOPNOTSUPP; @@@ -6485,7 -6508,7 +6496,7 @@@ /* see above */ cb->args[0] = phy_idx + 1; out_err: - cfg80211_unlock_rdev(rdev); + rtnl_unlock(); return err; }
@@@ -6693,9 -6716,7 +6704,9 @@@ static int nl80211_connect(struct sk_bu sizeof(connect.vht_capa)); }
- err = cfg80211_connect(rdev, dev, &connect, connkeys); + wdev_lock(dev->ieee80211_ptr); + err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL); + wdev_unlock(dev->ieee80211_ptr); if (err) kfree(connkeys); return err; @@@ -6706,7 -6727,6 +6717,7 @@@ static int nl80211_disconnect(struct sk struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u16 reason; + int ret;
if (!info->attrs[NL80211_ATTR_REASON_CODE]) reason = WLAN_REASON_DEAUTH_LEAVING; @@@ -6720,10 -6740,7 +6731,10 @@@ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP;
- return cfg80211_disconnect(rdev, dev, reason, true); + wdev_lock(dev->ieee80211_ptr); + ret = cfg80211_disconnect(rdev, dev, reason, true); + wdev_unlock(dev->ieee80211_ptr); + return ret; }
static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) @@@ -7499,29 -7516,28 +7510,29 @@@ static int nl80211_leave_mesh(struct sk static int nl80211_send_wowlan_patterns(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { + struct cfg80211_wowlan *wowlan = rdev->wiphy.wowlan_config; struct nlattr *nl_pats, *nl_pat; int i, pat_len;
- if (!rdev->wowlan->n_patterns) + if (!wowlan->n_patterns) return 0;
nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN); if (!nl_pats) return -ENOBUFS;
- for (i = 0; i < rdev->wowlan->n_patterns; i++) { + for (i = 0; i < wowlan->n_patterns; i++) { nl_pat = nla_nest_start(msg, i + 1); if (!nl_pat) return -ENOBUFS; - pat_len = rdev->wowlan->patterns[i].pattern_len; + pat_len = wowlan->patterns[i].pattern_len; if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), - rdev->wowlan->patterns[i].mask) || + wowlan->patterns[i].mask) || nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN, - pat_len, rdev->wowlan->patterns[i].pattern) || + pat_len, wowlan->patterns[i].pattern) || nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET, - rdev->wowlan->patterns[i].pkt_offset)) + wowlan->patterns[i].pkt_offset)) return -ENOBUFS; nla_nest_end(msg, nl_pat); } @@@ -7584,12 -7600,12 +7595,12 @@@ static int nl80211_get_wowlan(struct sk !rdev->wiphy.wowlan.tcp) return -EOPNOTSUPP;
- if (rdev->wowlan && rdev->wowlan->tcp) { + if (rdev->wiphy.wowlan_config && rdev->wiphy.wowlan_config->tcp) { /* adjust size to have room for all the data */ - size += rdev->wowlan->tcp->tokens_size + - rdev->wowlan->tcp->payload_len + - rdev->wowlan->tcp->wake_len + - rdev->wowlan->tcp->wake_len / 8; + size += rdev->wiphy.wowlan_config->tcp->tokens_size + + rdev->wiphy.wowlan_config->tcp->payload_len + + rdev->wiphy.wowlan_config->tcp->wake_len + + rdev->wiphy.wowlan_config->tcp->wake_len / 8; }
msg = nlmsg_new(size, GFP_KERNEL); @@@ -7601,34 -7617,33 +7612,34 @@@ if (!hdr) goto nla_put_failure;
- if (rdev->wowlan) { + if (rdev->wiphy.wowlan_config) { struct nlattr *nl_wowlan;
nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!nl_wowlan) goto nla_put_failure;
- if ((rdev->wowlan->any && + if ((rdev->wiphy.wowlan_config->any && nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || - (rdev->wowlan->disconnect && + (rdev->wiphy.wowlan_config->disconnect && nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) || - (rdev->wowlan->magic_pkt && + (rdev->wiphy.wowlan_config->magic_pkt && nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) || - (rdev->wowlan->gtk_rekey_failure && + (rdev->wiphy.wowlan_config->gtk_rekey_failure && nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) || - (rdev->wowlan->eap_identity_req && + (rdev->wiphy.wowlan_config->eap_identity_req && nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) || - (rdev->wowlan->four_way_handshake && + (rdev->wiphy.wowlan_config->four_way_handshake && nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) || - (rdev->wowlan->rfkill_release && + (rdev->wiphy.wowlan_config->rfkill_release && nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE))) goto nla_put_failure;
if (nl80211_send_wowlan_patterns(msg, rdev)) goto nla_put_failure;
- if (nl80211_send_wowlan_tcp(msg, rdev->wowlan->tcp)) + if (nl80211_send_wowlan_tcp(msg, + rdev->wiphy.wowlan_config->tcp)) goto nla_put_failure;
nla_nest_end(msg, nl_wowlan); @@@ -7795,7 -7810,7 +7806,7 @@@ static int nl80211_set_wowlan(struct sk struct cfg80211_wowlan *ntrig; struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; int err, i; - bool prev_enabled = rdev->wowlan; + bool prev_enabled = rdev->wiphy.wowlan_config;
if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns && !rdev->wiphy.wowlan.tcp) @@@ -7803,7 -7818,7 +7814,7 @@@
if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) { cfg80211_rdev_free_wowlan(rdev); - rdev->wowlan = NULL; + rdev->wiphy.wowlan_config = NULL; goto set_wakeup; }
@@@ -7939,12 -7954,11 +7950,12 @@@ goto error; } cfg80211_rdev_free_wowlan(rdev); - rdev->wowlan = ntrig; + rdev->wiphy.wowlan_config = ntrig;
set_wakeup: - if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan) - rdev_set_wakeup(rdev, rdev->wowlan); + if (rdev->ops->set_wakeup && + prev_enabled != !!rdev->wiphy.wowlan_config) + rdev_set_wakeup(rdev, rdev->wiphy.wowlan_config);
return 0; error: @@@ -8129,7 -8143,9 +8140,7 @@@ static int nl80211_start_p2p_device(str if (wdev->p2p_started) return 0;
- mutex_lock(&rdev->devlist_mtx); err = cfg80211_can_add_interface(rdev, wdev->iftype); - mutex_unlock(&rdev->devlist_mtx); if (err) return err;
@@@ -8138,7 -8154,9 +8149,7 @@@ return err;
wdev->p2p_started = true; - mutex_lock(&rdev->devlist_mtx); rdev->opencount++; - mutex_unlock(&rdev->devlist_mtx);
return 0; } @@@ -8154,7 -8172,11 +8165,7 @@@ static int nl80211_stop_p2p_device(stru if (!rdev->ops->stop_p2p_device) return -EOPNOTSUPP;
- mutex_lock(&rdev->devlist_mtx); - mutex_lock(&rdev->sched_scan_mtx); cfg80211_stop_p2p_device(rdev, wdev); - mutex_unlock(&rdev->sched_scan_mtx); - mutex_unlock(&rdev->devlist_mtx);
return 0; } @@@ -8297,11 -8319,11 +8308,11 @@@ static int nl80211_pre_doit(struct genl info->user_ptr[0] = rdev; } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV || ops->internal_flags & NL80211_FLAG_NEED_WDEV) { - mutex_lock(&cfg80211_mutex); + ASSERT_RTNL(); + wdev = __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs); if (IS_ERR(wdev)) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return PTR_ERR(wdev); @@@ -8312,6 -8334,7 +8323,6 @@@
if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { if (!dev) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -EINVAL; @@@ -8325,6 -8348,7 +8336,6 @@@ if (dev) { if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP && !netif_running(dev)) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -ENETDOWN; @@@ -8333,12 -8357,17 +8344,12 @@@ dev_hold(dev); } else if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP) { if (!wdev->p2p_started) { - mutex_unlock(&cfg80211_mutex); if (rtnl) rtnl_unlock(); return -ENETDOWN; } }
- cfg80211_lock_rdev(rdev); - - mutex_unlock(&cfg80211_mutex); - info->user_ptr[0] = rdev; }
@@@ -8348,6 -8377,8 +8359,6 @@@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { - if (info->user_ptr[0]) - cfg80211_unlock_rdev(info->user_ptr[0]); if (info->user_ptr[1]) { if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) { struct wireless_dev *wdev = info->user_ptr[1]; @@@ -8369,8 -8400,7 +8380,8 @@@ static struct genl_ops nl80211_ops[] = .dumpit = nl80211_dump_wiphy, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ - .internal_flags = NL80211_FLAG_NEED_WIPHY, + .internal_flags = NL80211_FLAG_NEED_WIPHY | + NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WIPHY, @@@ -8385,8 -8415,7 +8396,8 @@@ .dumpit = nl80211_dump_interface, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ - .internal_flags = NL80211_FLAG_NEED_WDEV, + .internal_flags = NL80211_FLAG_NEED_WDEV | + NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_INTERFACE, @@@ -8545,7 -8574,6 +8556,7 @@@ .cmd = NL80211_CMD_GET_REG, .doit = nl80211_get_reg, .policy = nl80211_policy, + .internal_flags = NL80211_FLAG_NEED_RTNL, /* can be retrieved by unprivileged users */ }, { @@@ -8553,7 -8581,6 +8564,7 @@@ .doit = nl80211_set_reg, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, + .internal_flags = NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_REQ_SET_REG, @@@ -9009,6 -9036,8 +9020,6 @@@ static int nl80211_add_scan_req(struct struct nlattr *nest; int i;
- lockdep_assert_held(&rdev->sched_scan_mtx); - if (WARN_ON(!req)) return 0;
linux-merge@lists.open-mesh.org