The following commit has been merged in the master branch: commit c134d52434dca8be3145c182887af60b723882a4 Merge: c19d6b23ffe944a666999f7200a2af8352c209b6 2e0c9e7911465b29daf85f7de97949004bf7b31c Author: Stephen Rothwell sfr@canb.auug.org.au Date: Mon Jun 17 12:44:40 2013 +1000
Merge remote-tracking branch 'net-next/master'
Conflicts: drivers/net/wireless/ath/ath9k/Kconfig drivers/net/xen-netback/netback.c net/batman-adv/bat_iv_ogm.c
diff --combined Documentation/devicetree/bindings/vendor-prefixes.txt index be80992,2fe74e6..bebbdf8 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@@ -18,6 -18,7 +18,7 @@@ chrp Common Hardware Reference Platfor cirrus Cirrus Logic, Inc. cortina Cortina Systems, Inc. dallas Maxim Integrated Products (formerly Dallas Semiconductor) + davicom DAVICOM Semiconductor, Inc. denx Denx Software Engineering emmicro EM Microelectronic epson Seiko Epson Corp. @@@ -57,7 -58,6 +58,7 @@@ snps Synopsys, Inc st STMicroelectronics ste ST-Ericsson stericsson ST-Ericsson +toumaz Toumaz ti Texas Instruments toshiba Toshiba Corporation via VIA Technologies, Inc. diff --combined MAINTAINERS index dc610a4,0518ec4..417f249 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -180,11 -180,6 +180,11 @@@ T: git git://git.kernel.org/pub/scm/lin S: Maintained F: Documentation/filesystems/9p.txt F: fs/9p/ +F: net/9p/ +F: include/net/9p/ +F: include/uapi/linux/virtio_9p.h +F: include/trace/events/9p.h +
A8293 MEDIA DRIVER M: Antti Palosaari crope@iki.fi @@@ -1314,7 -1309,6 +1314,7 @@@ W: http://wiki.xilinx.co T: git git://git.xilinx.com/linux-xlnx.git S: Supported F: arch/arm/mach-zynq/ +F: drivers/cpuidle/cpuidle-zynq.c
ARM64 PORT (AARCH64 ARCHITECTURE) M: Catalin Marinas catalin.marinas@arm.com @@@ -2104,9 -2098,7 +2104,9 @@@ COCCINELLE/Semantic Patches (SmPL M: Julia Lawall Julia.Lawall@lip6.fr M: Gilles Muller Gilles.Muller@lip6.fr M: Nicolas Palix nicolas.palix@imag.fr +M: Michal Marek mmarek@suse.cz L: cocci@systeme.lip6.fr (moderated for non-subscribers) +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc W: http://coccinelle.lip6.fr/ S: Supported F: scripts/coccinelle/ @@@ -2223,8 -2215,7 +2223,8 @@@ M: Viresh Kumar <viresh.kumar@linaro.or L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org S: Maintained -T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git +T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) F: drivers/cpufreq/ F: include/linux/cpufreq.h
@@@ -2308,6 -2299,11 +2308,11 @@@ M: Jaya Kumar <jayakumar.alsa@gmail.com S: Maintained F: sound/pci/cs5535audio/
+ CW1200 WLAN driver + M: Solomon Peachy pizza@shaftnet.org + S: Maintained + F: drivers/net/wireless/cw1200/ + CX18 VIDEO4LINUX DRIVER M: Andy Walls awalls@md.metrocast.net L: ivtv-devel@ivtvdriver.org (moderated for non-subscribers) @@@ -2525,7 -2521,7 +2530,7 @@@ F: drivers/usb/dwc3 DEVICE FREQUENCY (DEVFREQ) M: MyungJoo Ham myungjoo.ham@samsung.com M: Kyungmin Park kyungmin.park@samsung.com -L: linux-kernel@vger.kernel.org +L: linux-pm@vger.kernel.org S: Maintained F: drivers/devfreq/
@@@ -2899,8 -2895,8 +2904,8 @@@ F: drivers/media/dvb-frontends/ec100
ECRYPT FILE SYSTEM M: Tyler Hicks tyhicks@canonical.com -M: Dustin Kirkland dustin.kirkland@gazzang.com L: ecryptfs@vger.kernel.org +W: http://ecryptfs.org W: https://launchpad.net/ecryptfs S: Supported F: Documentation/filesystems/ecryptfs.txt @@@ -4457,16 -4453,6 +4462,16 @@@ S: Maintaine F: drivers/scsi/*iscsi* F: include/scsi/*iscsi*
+ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR +M: Or Gerlitz ogerlitz@mellanox.com +M: Roi Dayan roid@mellanox.com +L: linux-rdma@vger.kernel.org +S: Supported +W: http://www.openfabrics.org +W: www.open-iscsi.org +Q: http://patchwork.kernel.org/project/linux-rdma/list/ +F: drivers/infiniband/ulp/iser + ISDN SUBSYSTEM M: Karsten Keil isdn@linux-pingi.de L: isdn4linux@listserv.isdn4linux.de (subscribers-only) @@@ -4728,15 -4714,6 +4733,15 @@@ F: arch/arm/include/uapi/asm/kvm F: arch/arm/include/asm/kvm* F: arch/arm/kvm/
+KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64) +M: Marc Zyngier marc.zyngier@arm.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: kvmarm@lists.cs.columbia.edu +S: Maintained +F: arch/arm64/include/uapi/asm/kvm* +F: arch/arm64/include/asm/kvm* +F: arch/arm64/kvm/ + KEXEC M: Eric Biederman ebiederm@xmission.com W: http://kernel.org/pub/linux/utils/kernel/kexec/ @@@ -5784,7 -5761,7 +5789,7 @@@ M: Matthew Wilcox <willy@linux.intel.co L: linux-nvme@lists.infradead.org T: git git://git.infradead.org/users/willy/linux-nvme.git S: Supported -F: drivers/block/nvme.c +F: drivers/block/nvme* F: include/linux/nvme.h
OMAP SUPPORT @@@ -7008,7 -6985,8 +7013,7 @@@ SYNOPSYS DESIGNWARE DMAC DRIVE M: Viresh Kumar viresh.linux@gmail.com S: Maintained F: include/linux/dw_dmac.h -F: drivers/dma/dw_dmac_regs.h -F: drivers/dma/dw_dmac.c +F: drivers/dma/dw/
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER M: Seungwon Jeon tgih.jun@samsung.com @@@ -7641,7 -7619,7 +7646,7 @@@ F: drivers/clk/spear SPI SUBSYSTEM M: Mark Brown broonie@kernel.org M: Grant Likely grant.likely@linaro.org -L: spi-devel-general@lists.sourceforge.net +L: linux-spi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi.git Q: http://patchwork.kernel.org/project/spi-devel-general/list/ S: Maintained @@@ -8108,8 -8086,8 +8113,8 @@@ F: drivers/platform/x86/thinkpad_acpi. TI BANDGAP AND THERMAL DRIVER M: Eduardo Valentin eduardo.valentin@ti.com L: linux-pm@vger.kernel.org -S: Maintained -F: drivers/staging/omap-thermal/ +S: Supported +F: drivers/thermal/ti-soc-thermal/
TI FLASH MEDIA INTERFACE DRIVER M: Alex Dubov oakad@yahoo.com @@@ -9021,7 -8999,7 +9026,7 @@@ S: Maintaine F: drivers/net/wireless/wl3501*
WM97XX TOUCHSCREEN DRIVERS -M: Mark Brown broonie@opensource.wolfsonmicro.com +M: Mark Brown broonie@kernel.org M: Liam Girdwood lrg@slimlogic.co.uk L: linux-input@vger.kernel.org T: git git://opensource.wolfsonmicro.com/linux-2.6-touch @@@ -9031,6 -9009,7 +9036,6 @@@ F: drivers/input/touchscreen/*wm97 F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS -M: Mark Brown broonie@opensource.wolfsonmicro.com L: patches@opensource.wolfsonmicro.com T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus @@@ -9131,13 -9110,6 +9136,13 @@@ S: Supporte F: arch/arm/xen/ F: arch/arm/include/asm/xen/
+XEN HYPERVISOR ARM64 +M: Stefano Stabellini stefano.stabellini@eu.citrix.com +L: xen-devel@lists.xensource.com (moderated for non-subscribers) +S: Supported +F: arch/arm64/xen/ +F: arch/arm64/include/asm/xen/ + XEN NETWORK BACKEND DRIVER M: Ian Campbell ian.campbell@citrix.com L: xen-devel@lists.xensource.com (moderated for non-subscribers) diff --combined drivers/net/bonding/bond_main.c index 02d9ae7,bc1246f..3b31c19 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -706,45 -706,6 +706,6 @@@ static int bond_set_allmulti(struct bon return err; }
- /* - * Add a Multicast address to slaves - * according to mode - */ - static void bond_mc_add(struct bonding *bond, void *addr) - { - if (USES_PRIMARY(bond->params.mode)) { - /* write lock already acquired */ - if (bond->curr_active_slave) - dev_mc_add(bond->curr_active_slave->dev, addr); - } else { - struct slave *slave; - int i; - - bond_for_each_slave(bond, slave, i) - dev_mc_add(slave->dev, addr); - } - } - - /* - * Remove a multicast address from slave - * according to mode - */ - static void bond_mc_del(struct bonding *bond, void *addr) - { - if (USES_PRIMARY(bond->params.mode)) { - /* write lock already acquired */ - if (bond->curr_active_slave) - dev_mc_del(bond->curr_active_slave->dev, addr); - } else { - struct slave *slave; - int i; - bond_for_each_slave(bond, slave, i) { - dev_mc_del(slave->dev, addr); - } - } - } - - static void __bond_resend_igmp_join_requests(struct net_device *dev) { struct in_device *in_dev; @@@ -764,8 -725,8 +725,8 @@@ static void bond_resend_igmp_join_reque struct net_device *bond_dev, *vlan_dev, *upper_dev; struct vlan_entry *vlan;
- rcu_read_lock(); read_lock(&bond->lock); + rcu_read_lock();
bond_dev = bond->dev;
@@@ -787,19 -748,12 +748,19 @@@ if (vlan_dev) __bond_resend_igmp_join_requests(vlan_dev); } + rcu_read_unlock();
- if (--bond->igmp_retrans > 0) + /* We use curr_slave_lock to protect against concurrent access to + * igmp_retrans from multiple running instances of this function and + * bond_change_active_slave + */ + write_lock_bh(&bond->curr_slave_lock); + if (bond->igmp_retrans > 1) { + bond->igmp_retrans--; queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); - + } + write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); - rcu_read_unlock(); }
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) @@@ -810,17 -764,15 +771,15 @@@ bond_resend_igmp_join_requests(bond); }
- /* - * flush all members of flush->mc_list from device dev->mc_list + /* Flush bond's hardware addresses from slave */ - static void bond_mc_list_flush(struct net_device *bond_dev, + static void bond_hw_addr_flush(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(ha, bond_dev) - dev_mc_del(slave_dev, ha->addr); + dev_uc_unsync(slave_dev, bond_dev); + dev_mc_unsync(slave_dev, bond_dev);
if (bond->params.mode == BOND_MODE_8023AD) { /* del lacpdu mc addr from mc list */ @@@ -832,22 -784,14 +791,14 @@@
/*--------------------------- Active slave change ---------------------------*/
- /* - * Update the mc list and multicast-related flags for the new and - * old active slaves (if any) according to the multicast mode, and - * promiscuous flags unconditionally. + /* Update the hardware address list and promisc/allmulti for the new and + * old active slaves (if any). Modes that are !USES_PRIMARY keep all + * slaves up date at all times; only the USES_PRIMARY modes need to call + * this function to swap these settings during a failover. */ - static void bond_mc_swap(struct bonding *bond, struct slave *new_active, - struct slave *old_active) + static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, + struct slave *old_active) { - struct netdev_hw_addr *ha; - - if (!USES_PRIMARY(bond->params.mode)) - /* nothing to do - mc list is already up-to-date on - * all slaves - */ - return; - if (old_active) { if (bond->dev->flags & IFF_PROMISC) dev_set_promiscuity(old_active->dev, -1); @@@ -855,10 -799,7 +806,7 @@@ if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1);
- netif_addr_lock_bh(bond->dev); - netdev_for_each_mc_addr(ha, bond->dev) - dev_mc_del(old_active->dev, ha->addr); - netif_addr_unlock_bh(bond->dev); + bond_hw_addr_flush(bond->dev, old_active->dev); }
if (new_active) { @@@ -870,8 -811,8 +818,8 @@@ dev_set_allmulti(new_active->dev, 1);
netif_addr_lock_bh(bond->dev); - netdev_for_each_mc_addr(ha, bond->dev) - dev_mc_add(new_active->dev, ha->addr); + dev_uc_sync(new_active->dev, bond->dev); + dev_mc_sync(new_active->dev, bond->dev); netif_addr_unlock_bh(bond->dev); } } @@@ -1090,7 -1031,7 +1038,7 @@@ void bond_change_active_slave(struct bo }
if (USES_PRIMARY(bond->params.mode)) - bond_mc_swap(bond, new_active, old_active); + bond_hw_addr_swap(bond, new_active, old_active);
if (bond_is_lb(bond)) { bond_alb_handle_active_change(bond, new_active); @@@ -1533,7 -1474,6 +1481,6 @@@ int bond_enslave(struct net_device *bon struct bonding *bond = netdev_priv(bond_dev); const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct slave *new_slave = NULL; - struct netdev_hw_addr *ha; struct sockaddr addr; int link_reporting; int res = 0; @@@ -1713,10 -1653,8 +1660,8 @@@ goto err_close; }
- /* If the mode USES_PRIMARY, then the new slave gets the - * master's promisc (and mc) settings only if it becomes the - * curr_active_slave, and that is taken care of later when calling - * bond_change_active() + /* If the mode USES_PRIMARY, then the following is handled by + * bond_change_active_slave(). */ if (!USES_PRIMARY(bond->params.mode)) { /* set promiscuity level to new slave */ @@@ -1734,9 -1672,10 +1679,10 @@@ }
netif_addr_lock_bh(bond_dev); - /* upload master's mc_list to new slave */ - netdev_for_each_mc_addr(ha, bond_dev) - dev_mc_add(slave_dev, ha->addr); + + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); }
@@@ -1915,11 -1854,9 +1861,9 @@@ err_dest_symlinks bond_destroy_slave_symlinks(bond_dev, slave_dev);
err_detach: - if (!USES_PRIMARY(bond->params.mode)) { - netif_addr_lock_bh(bond_dev); - bond_mc_list_flush(bond_dev, slave_dev); - netif_addr_unlock_bh(bond_dev); - } + if (!USES_PRIMARY(bond->params.mode)) + bond_hw_addr_flush(bond_dev, slave_dev); + bond_del_vlans_from_slave(bond, slave_dev); write_lock_bh(&bond->lock); bond_detach_slave(bond, new_slave); @@@ -1964,10 -1901,6 +1908,10 @@@ err_free
err_undo_flags: bond_compute_features(bond); + /* Enslave of first slave has failed and we need to fix master's mac */ + if (bond->slave_cnt == 0 && + ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr)) + eth_hw_addr_random(bond_dev);
return res; } @@@ -2118,9 -2051,8 +2062,8 @@@ static int __bond_release_one(struct ne
bond_del_vlans_from_slave(bond, slave_dev);
- /* If the mode USES_PRIMARY, then we should only remove its - * promisc and mc settings if it was the curr_active_slave, but that was - * already taken care of above when we detached the slave + /* If the mode USES_PRIMARY, then this cases was handled above by + * bond_change_active_slave(..., NULL) */ if (!USES_PRIMARY(bond->params.mode)) { /* unset promiscuity level from slave */ @@@ -2131,10 -2063,7 +2074,7 @@@ if (bond_dev->flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1);
- /* flush master's mc_list from slave */ - netif_addr_lock_bh(bond_dev); - bond_mc_list_flush(bond_dev, slave_dev); - netif_addr_unlock_bh(bond_dev); + bond_hw_addr_flush(bond_dev, slave_dev); }
bond_upper_dev_unlink(bond_dev, slave_dev); @@@ -3288,7 -3217,7 +3228,7 @@@ static int bond_slave_netdev_event(unsi static int bond_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *event_dev = (struct net_device *)ptr; + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
pr_debug("event_dev: %s, event: %lx\n", event_dev ? event_dev->name : "None", @@@ -3671,19 -3600,6 +3611,6 @@@ static int bond_do_ioctl(struct net_dev return res; }
- static bool bond_addr_in_mc_list(unsigned char *addr, - struct netdev_hw_addr_list *list, - int addrlen) - { - struct netdev_hw_addr *ha; - - netdev_hw_addr_list_for_each(ha, list) - if (!memcmp(ha->addr, addr, addrlen)) - return true; - - return false; - } - static void bond_change_rx_flags(struct net_device *bond_dev, int change) { struct bonding *bond = netdev_priv(bond_dev); @@@ -3697,35 -3613,29 +3624,29 @@@ bond_dev->flags & IFF_ALLMULTI ? 1 : -1); }
- static void bond_set_multicast_list(struct net_device *bond_dev) + static void bond_set_rx_mode(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct netdev_hw_addr *ha; - bool found; + struct slave *slave; + int i;
read_lock(&bond->lock);
- /* looking for addresses to add to slaves' mc list */ - netdev_for_each_mc_addr(ha, bond_dev) { - found = bond_addr_in_mc_list(ha->addr, &bond->mc_list, - bond_dev->addr_len); - if (!found) - bond_mc_add(bond, ha->addr); - } - - /* looking for addresses to delete from slaves' list */ - netdev_hw_addr_list_for_each(ha, &bond->mc_list) { - found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc, - bond_dev->addr_len); - if (!found) - bond_mc_del(bond, ha->addr); + if (USES_PRIMARY(bond->params.mode)) { + read_lock(&bond->curr_slave_lock); + slave = bond->curr_active_slave; + if (slave) { + dev_uc_sync(slave->dev, bond_dev); + dev_mc_sync(slave->dev, bond_dev); + } + read_unlock(&bond->curr_slave_lock); + } else { + bond_for_each_slave(bond, slave, i) { + dev_uc_sync_multiple(slave->dev, bond_dev); + dev_mc_sync_multiple(slave->dev, bond_dev); + } }
- /* save master's multicast list */ - __hw_addr_flush(&bond->mc_list); - __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc, - bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST); - read_unlock(&bond->lock); }
@@@ -3870,11 -3780,10 +3791,10 @@@ static int bond_set_mac_address(struct pr_debug("bond=%p, name=%s\n", bond, bond_dev ? bond_dev->name : "None");
- /* - * If fail_over_mac is set to active, do nothing and return - * success. Returning an error causes ifenslave to fail. + /* If fail_over_mac is enabled, do nothing and return success. + * Returning an error causes ifenslave to fail. */ - if (bond->params.fail_over_mac == BOND_FOM_ACTIVE) + if (bond->params.fail_over_mac) return 0;
if (!is_valid_ether_addr(sa->sa_data)) @@@ -4332,7 -4241,7 +4252,7 @@@ static const struct net_device_ops bond .ndo_get_stats64 = bond_get_stats, .ndo_do_ioctl = bond_do_ioctl, .ndo_change_rx_flags = bond_change_rx_flags, - .ndo_set_rx_mode = bond_set_multicast_list, + .ndo_set_rx_mode = bond_set_rx_mode, .ndo_change_mtu = bond_change_mtu, .ndo_set_mac_address = bond_set_mac_address, .ndo_neigh_setup = bond_neigh_setup, @@@ -4437,8 -4346,6 +4357,6 @@@ static void bond_uninit(struct net_devi
bond_debug_unregister(bond);
- __hw_addr_flush(&bond->mc_list); - list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { list_del(&vlan->vlan_list); kfree(vlan); @@@ -4849,7 -4756,6 +4767,6 @@@ static int bond_init(struct net_device bond->dev_addr_from_first = true; }
- __hw_addr_init(&bond->mc_list); return 0; }
diff --combined drivers/net/bonding/bonding.h index f989e15,b38609b..c990b42 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@@ -225,13 -225,12 +225,12 @@@ struct bonding rwlock_t curr_slave_lock; u8 send_peer_notif; s8 setup_by_slave; - s8 igmp_retrans; + u8 igmp_retrans; #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_entry; char proc_file_name[IFNAMSIZ]; #endif /* CONFIG_PROC_FS */ struct list_head bond_list; - struct netdev_hw_addr_list mc_list; int (*xmit_hash_policy)(struct sk_buff *, int); u16 rr_tx_counter; struct ad_bond_info ad_info; diff --combined drivers/net/ethernet/broadcom/tg3.c index c777b90,28a645f..a53bd18 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -965,9 -965,6 +965,6 @@@ static void tg3_ape_driver_state_change
event = APE_EVENT_STATUS_STATE_UNLOAD; break; - case RESET_KIND_SUSPEND: - event = APE_EVENT_STATUS_STATE_SUSPEND; - break; default: return; } @@@ -1314,8 -1311,8 +1311,8 @@@ static int tg3_phy_toggle_auxctl_smdsp(
if (err) return err; - if (enable)
+ if (enable) val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA; else val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA; @@@ -1739,10 -1736,6 +1736,6 @@@ static void tg3_write_sig_pre_reset(str break; } } - - if (kind == RESET_KIND_INIT || - kind == RESET_KIND_SUSPEND) - tg3_ape_driver_state_change(tp, kind); }
/* tp->lock is held. */ @@@ -1764,9 -1757,6 +1757,6 @@@ static void tg3_write_sig_post_reset(st break; } } - - if (kind == RESET_KIND_SHUTDOWN) - tg3_ape_driver_state_change(tp, kind); }
/* tp->lock is held. */ @@@ -1800,9 -1790,6 +1790,9 @@@ static int tg3_poll_fw(struct tg3 *tp int i; u32 val;
+ if (tg3_flag(tp, NO_FWARE_REPORTED)) + return 0; + if (tg3_flag(tp, IS_SSB_CORE)) { /* We don't use firmware. */ return 0; @@@ -2323,6 -2310,46 +2313,46 @@@ static void tg3_phy_apply_otp(struct tg tg3_phy_toggle_auxctl_smdsp(tp, false); }
+ static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) + { + u32 val; + struct ethtool_eee *dest = &tp->eee; + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + return; + + if (eee) + dest = eee; + + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val)) + return; + + /* Pull eee_active */ + if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || + val == TG3_CL45_D7_EEERES_STAT_LP_100TX) { + dest->eee_active = 1; + } else + dest->eee_active = 0; + + /* Pull lp advertised settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) + return; + dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull advertised and eee_enabled settings */ + if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) + return; + dest->eee_enabled = !!val; + dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); + + /* Pull tx_lpi_enabled */ + val = tr32(TG3_CPMU_EEE_MODE); + dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX); + + /* Pull lpi timer value */ + dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff; + } + static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up) { u32 val; @@@ -2346,11 -2373,8 +2376,8 @@@
tw32(TG3_CPMU_EEE_CTRL, eeectl);
- tg3_phy_cl45_read(tp, MDIO_MMD_AN, - TG3_CL45_D7_EEERES_STAT, &val); - - if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || - val == TG3_CL45_D7_EEERES_STAT_LP_100TX) + tg3_eee_pull_config(tp, NULL); + if (tp->eee.eee_active) tp->setlpicnt = 2; }
@@@ -4172,6 -4196,8 +4199,8 @@@ static int tg3_power_down_prepare(struc
tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+ tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN); + return 0; }
@@@ -4272,6 -4298,16 +4301,16 @@@ static int tg3_phy_autoneg_cfg(struct t /* Advertise 1000-BaseT EEE ability */ if (advertise & ADVERTISED_1000baseT_Full) val |= MDIO_AN_EEE_ADV_1000T; + + if (!tp->eee.eee_enabled) { + val = 0; + tp->eee.advertised = 0; + } else { + tp->eee.advertised = advertise & + (ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full); + } + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); if (err) val = 0; @@@ -4516,26 -4552,23 +4555,23 @@@ static int tg3_init_5401phy_dsp(struct
static bool tg3_phy_eee_config_ok(struct tg3 *tp) { - u32 val; - u32 tgtadv = 0; - u32 advertising = tp->link_config.advertising; + struct ethtool_eee eee;
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) return true;
- if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) - return false; - - val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T); - + tg3_eee_pull_config(tp, &eee);
- if (advertising & ADVERTISED_100baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_100TX; - if (advertising & ADVERTISED_1000baseT_Full) - tgtadv |= MDIO_AN_EEE_ADV_1000T; - - if (val != tgtadv) - return false; + if (tp->eee.eee_enabled) { + if (tp->eee.advertised != eee.advertised || + tp->eee.tx_lpi_timer != eee.tx_lpi_timer || + tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) + return false; + } else { + /* EEE is disabled but we're advertising */ + if (eee.advertised) + return false; + }
return true; } @@@ -4636,6 -4669,42 +4672,42 @@@ static void tg3_clear_mac_status(struc udelay(40); }
+ static void tg3_setup_eee(struct tg3 *tp) + { + u32 val; + + val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 | + TG3_CPMU_EEE_LNKIDL_UART_IDL; + if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) + val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT; + + tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val); + + tw32_f(TG3_CPMU_EEE_CTRL, + TG3_CPMU_EEE_CTRL_EXIT_20_1_US); + + val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET | + (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) | + TG3_CPMU_EEEMD_LPI_IN_RX | + TG3_CPMU_EEEMD_EEE_ENABLE; + + if (tg3_asic_rev(tp) != ASIC_REV_5717) + val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN; + + if (tg3_flag(tp, ENABLE_APE)) + val |= TG3_CPMU_EEEMD_APE_TX_DET_EN; + + tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0); + + tw32_f(TG3_CPMU_EEE_DBTMR1, + TG3_CPMU_DBTMR1_PCIEXIT_2047US | + (tp->eee.tx_lpi_timer & 0xffff)); + + tw32_f(TG3_CPMU_EEE_DBTMR2, + TG3_CPMU_DBTMR2_APE_TX_2047US | + TG3_CPMU_DBTMR2_TXIDXEQ_2047US); + } + static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset) { bool current_link_up; @@@ -4802,8 -4871,10 +4874,10 @@@ */ if (!eee_config_ok && (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && - !force_reset) + !force_reset) { + tg3_setup_eee(tp); tg3_phy_reset(tp); + } } else { if (!(bmcr & BMCR_ANENABLE) && tp->link_config.speed == current_speed && @@@ -6315,9 -6386,7 +6389,7 @@@ static void tg3_tx_recover(struct tg3 * "Please report the problem to the driver maintainer " "and include system chipset information.\n");
- spin_lock(&tp->lock); tg3_flag_set(tp, TX_RECOVERY_PENDING); - spin_unlock(&tp->lock); }
static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) @@@ -9169,11 -9238,9 +9241,9 @@@ static void __tg3_set_coalesce(struct t }
/* tp->lock is held. */ - static void tg3_rings_reset(struct tg3 *tp) + static void tg3_tx_rcbs_disable(struct tg3 *tp) { - int i; - u32 stblk, txrcb, rxrcb, limit; - struct tg3_napi *tnapi = &tp->napi[0]; + u32 txrcb, limit;
/* Disable all transmit rings but the first. */ if (!tg3_flag(tp, 5705_PLUS)) @@@ -9190,7 -9257,33 +9260,33 @@@ txrcb < limit; txrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); + } + + /* tp->lock is held. */ + static void tg3_tx_rcbs_init(struct tg3 *tp) + { + int i = 0; + u32 txrcb = NIC_SRAM_SEND_RCB; + + if (tg3_flag(tp, ENABLE_TSS)) + i++; + + for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->tx_ring) + continue; + + tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, + (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT), + NIC_SRAM_TX_BUFFER_DESC); + } + }
+ /* tp->lock is held. */ + static void tg3_rx_ret_rcbs_disable(struct tg3 *tp) + { + u32 rxrcb, limit;
/* Disable all receive return rings but the first. */ if (tg3_flag(tp, 5717_PLUS)) @@@ -9208,6 -9301,39 +9304,39 @@@ rxrcb < limit; rxrcb += TG3_BDINFO_SIZE) tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS, BDINFO_FLAGS_DISABLED); + } + + /* tp->lock is held. */ + static void tg3_rx_ret_rcbs_init(struct tg3 *tp) + { + int i = 0; + u32 rxrcb = NIC_SRAM_RCV_RET_RCB; + + if (tg3_flag(tp, ENABLE_RSS)) + i++; + + for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) { + struct tg3_napi *tnapi = &tp->napi[i]; + + if (!tnapi->rx_rcb) + continue; + + tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, + (tp->rx_ret_ring_mask + 1) << + BDINFO_FLAGS_MAXLEN_SHIFT, 0); + } + } + + /* tp->lock is held. */ + static void tg3_rings_reset(struct tg3 *tp) + { + int i; + u32 stblk; + struct tg3_napi *tnapi = &tp->napi[0]; + + tg3_tx_rcbs_disable(tp); + + tg3_rx_ret_rcbs_disable(tp);
/* Disable interrupts */ tw32_mailbox_f(tp->napi[0].int_mbox, 1); @@@ -9244,9 -9370,6 +9373,6 @@@ tw32_tx_mbox(mbox + i * 8, 0); }
- txrcb = NIC_SRAM_SEND_RCB; - rxrcb = NIC_SRAM_RCV_RET_RCB; - /* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
@@@ -9256,46 -9379,20 +9382,20 @@@ tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW, ((u64) tnapi->status_mapping & 0xffffffff));
- if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - if (tnapi->rx_rcb) { - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - (tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT, 0); - rxrcb += TG3_BDINFO_SIZE; - } - stblk = HOSTCC_STATBLCK_RING1;
for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) { u64 mapping = (u64)tnapi->status_mapping; tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); + stblk += 8;
/* Clear status block in ram. */ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE); - - if (tnapi->tx_ring) { - tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping, - (TG3_TX_RING_SIZE << - BDINFO_FLAGS_MAXLEN_SHIFT), - NIC_SRAM_TX_BUFFER_DESC); - txrcb += TG3_BDINFO_SIZE; - } - - tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping, - ((tp->rx_ret_ring_mask + 1) << - BDINFO_FLAGS_MAXLEN_SHIFT), 0); - - stblk += 8; - rxrcb += TG3_BDINFO_SIZE; } + + tg3_tx_rcbs_init(tp); + tg3_rx_ret_rcbs_init(tp); }
static void tg3_setup_rxbd_thresholds(struct tg3 *tp) @@@ -9495,46 -9592,17 +9595,17 @@@ static int tg3_reset_hw(struct tg3 *tp if (tg3_flag(tp, INIT_COMPLETE)) tg3_abort_hw(tp, 1);
if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) { tg3_phy_pull_config(tp); + tg3_eee_pull_config(tp, NULL); tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; }
+ /* Enable MAC control of LPI */ + if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) + tg3_setup_eee(tp); + if (reset_phy) tg3_phy_reset(tp);
@@@ -10407,13 -10475,6 +10478,13 @@@ */ static int tg3_init_hw(struct tg3 *tp, bool reset_phy) { + /* Chip may have been just powered on. If so, the boot code may still + * be running initialization. Wait for it to finish to avoid races in + * accessing the hardware. + */ + tg3_enable_register_access(tp); + tg3_poll_fw(tp); + tg3_switch_clocks(tp);
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); @@@ -11190,7 -11251,7 +11261,7 @@@ static int tg3_start(struct tg3 *tp, bo */ err = tg3_alloc_consistent(tp); if (err) - goto err_out1; + goto out_ints_fini;
tg3_napi_init(tp);
@@@ -11204,12 -11265,15 +11275,15 @@@ tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); } - goto err_out2; + goto out_napi_fini; } }
tg3_full_lock(tp, 0);
+ if (init) + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + err = tg3_init_hw(tp, reset_phy); if (err) { tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); @@@ -11219,7 -11283,7 +11293,7 @@@ tg3_full_unlock(tp);
if (err) - goto err_out3; + goto out_free_irq;
if (test_irq && tg3_flag(tp, USING_MSI)) { err = tg3_test_msi(tp); @@@ -11230,7 -11294,7 +11304,7 @@@ tg3_free_rings(tp); tg3_full_unlock(tp);
- goto err_out2; + goto out_napi_fini; }
if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) { @@@ -11270,18 -11334,18 +11344,18 @@@
return 0;
- err_out3: + out_free_irq: for (i = tp->irq_cnt - 1; i >= 0; i--) { struct tg3_napi *tnapi = &tp->napi[i]; free_irq(tnapi->irq_vec, tnapi); }
- err_out2: + out_napi_fini: tg3_napi_disable(tp); tg3_napi_fini(tp); tg3_free_consistent(tp);
- err_out1: + out_ints_fini: tg3_ints_fini(tp);
return err; @@@ -13326,11 -13390,13 +13400,13 @@@ static void tg3_self_test(struct net_de struct tg3 *tp = netdev_priv(dev); bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
- if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) && - tg3_power_up(tp)) { - etest->flags |= ETH_TEST_FL_FAILED; - memset(data, 1, sizeof(u64) * TG3_NUM_TEST); - return; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + if (tg3_power_up(tp)) { + etest->flags |= ETH_TEST_FL_FAILED; + memset(data, 1, sizeof(u64) * TG3_NUM_TEST); + return; + } + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); }
memset(data, 0, sizeof(u64) * TG3_NUM_TEST); @@@ -13621,6 -13687,57 +13697,57 @@@ static int tg3_set_coalesce(struct net_ return 0; }
+ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) + { + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + if (edata->advertised != tp->eee.advertised) { + netdev_warn(tp->dev, + "Direct manipulation of EEE advertisement is not supported\n"); + return -EINVAL; + } + + if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) { + netdev_warn(tp->dev, + "Maximal Tx Lpi timer supported is %#x(u)\n", + TG3_CPMU_DBTMR1_LNKIDLE_MAX); + return -EINVAL; + } + + tp->eee = *edata; + + tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; + tg3_warn_mgmt_link_flap(tp); + + if (netif_running(tp->dev)) { + tg3_full_lock(tp, 0); + tg3_setup_eee(tp); + tg3_phy_reset(tp); + tg3_full_unlock(tp); + } + + return 0; + } + + static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) + { + struct tg3 *tp = netdev_priv(dev); + + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) { + netdev_warn(tp->dev, + "Board does not support EEE!\n"); + return -EOPNOTSUPP; + } + + *edata = tp->eee; + return 0; + } + static const struct ethtool_ops tg3_ethtool_ops = { .get_settings = tg3_get_settings, .set_settings = tg3_set_settings, @@@ -13654,6 -13771,8 +13781,8 @@@ .get_channels = tg3_get_channels, .set_channels = tg3_set_channels, .get_ts_info = tg3_get_ts_info, + .get_eee = tg3_get_eee, + .set_eee = tg3_set_eee, };
static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, @@@ -15002,9 -15121,18 +15131,18 @@@ static int tg3_phy_probe(struct tg3 *tp (tg3_asic_rev(tp) == ASIC_REV_5717 && tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || (tg3_asic_rev(tp) == ASIC_REV_57765 && - tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) + tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+ tp->eee.supported = SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full; + tp->eee.advertised = ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Full; + tp->eee.eee_enabled = 1; + tp->eee.tx_lpi_enabled = 1; + tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; + } + tg3_phy_init_link_config(tp);
if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) && @@@ -17076,7 -17204,7 +17214,7 @@@ static int tg3_init_one(struct pci_dev { struct net_device *dev; struct tg3 *tp; - int i, err, pm_cap; + int i, err; u32 sndmbx, rcvmbx, intmbx; char str[40]; u64 dma_mask, persist_dma_mask; @@@ -17098,25 -17226,10 +17236,10 @@@
pci_set_master(pdev);
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { err = -ENOMEM; - goto err_out_power_down; + goto err_out_free_res; }
SET_NETDEV_DEV(dev, &pdev->dev); @@@ -17124,7 -17237,7 +17247,7 @@@ tp = netdev_priv(dev); tp->pdev = pdev; tp->dev = dev; - tp->pm_cap = pm_cap; + tp->pm_cap = pdev->pm_cap; tp->rx_mode = TG3_DEF_RX_MODE; tp->tx_mode = TG3_DEF_TX_MODE; tp->irq_sync = 1; @@@ -17462,9 -17575,6 +17585,6 @@@ err_out_iounmap err_out_free_dev: free_netdev(dev);
- err_out_power_down: - pci_set_power_state(pdev, PCI_D3hot); - err_out_free_res: pci_release_regions(pdev);
@@@ -17574,6 -17684,8 +17694,8 @@@ static int tg3_resume(struct device *de
tg3_full_lock(tp, 0);
+ tg3_ape_driver_state_change(tp, RESET_KIND_INIT); + tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)); @@@ -17708,6 -17820,7 +17830,7 @@@ static void tg3_io_resume(struct pci_de goto done;
tg3_full_lock(tp, 0); + tg3_ape_driver_state_change(tp, RESET_KIND_INIT); tg3_flag_set(tp, INIT_COMPLETE); err = tg3_restart_hw(tp, true); if (err) { @@@ -17745,15 -17858,4 +17868,4 @@@ static struct pci_driver tg3_driver = .driver.pm = &tg3_pm_ops, };
- static int __init tg3_init(void) - { - return pci_register_driver(&tg3_driver); - } - - static void __exit tg3_cleanup(void) - { - pci_unregister_driver(&tg3_driver); - } - - module_init(tg3_init); - module_exit(tg3_cleanup); + module_pci_driver(tg3_driver); diff --combined drivers/net/ethernet/emulex/benet/be_main.c index a0b4be5,9aef457..98efc29 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@@ -834,32 -834,39 +834,39 @@@ static int be_vlan_tag_tx_chk(struct be return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; }
- static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) + static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, + struct sk_buff *skb) { - return BE3_chip(adapter) && - be_ipv6_exthdr_check(skb); + return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); }
- static netdev_tx_t be_xmit(struct sk_buff *skb, - struct net_device *netdev) + static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, + struct sk_buff *skb, + bool *skip_hw_vlan) { - struct be_adapter *adapter = netdev_priv(netdev); - struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; - struct be_queue_info *txq = &txo->q; - struct iphdr *ip = NULL; - u32 wrb_cnt = 0, copied = 0; - u32 start = txq->head, eth_hdr_len; - bool dummy_wrb, stopped = false; - bool skip_hw_vlan = false; struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + unsigned int eth_hdr_len; + struct iphdr *ip;
- eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? - VLAN_ETH_HLEN : ETH_HLEN; + /* Lancer ASIC has a bug wherein packets that are 32 bytes or less + * may cause a transmit stall on that port. So the work-around is to + * pad such packets to a 36-byte length. + */ + if (unlikely(lancer_chip(adapter) && skb->len <= 32)) { + if (skb_padto(skb, 36)) + goto tx_drop; + skb->len = 36; + }
/* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. + * For padded packets, Lancer computes incorrect checksum. */ - if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) { + eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? + VLAN_ETH_HLEN : ETH_HLEN; + if (skb->len <= 60 && + (lancer_chip(adapter) || vlan_tx_tag_present(skb)) && + is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); } @@@ -869,15 -876,15 +876,15 @@@ */ if ((adapter->function_mode & UMC_ENABLED) && veh->h_vlan_proto == htons(ETH_P_8021Q)) - skip_hw_vlan = true; + *skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. * Manually insert VLAN in pkt. */ if (skb->ip_summed != CHECKSUM_PARTIAL && - vlan_tx_tag_present(skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); + vlan_tx_tag_present(skb)) { + skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto tx_drop; } @@@ -887,8 -894,8 +894,8 @@@ * skip HW tagging is not enabled by FW. */ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && - (adapter->pvid || adapter->qnq_vid) && - !qnq_async_evt_rcvd(adapter))) + (adapter->pvid || adapter->qnq_vid) && + !qnq_async_evt_rcvd(adapter))) goto tx_drop;
/* Manual VLAN tag insertion to prevent: @@@ -899,11 -906,31 +906,31 @@@ */ if (be_ipv6_tx_stall_chk(adapter, skb) && be_vlan_tag_tx_chk(adapter, skb)) { - skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan); + skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto tx_drop; }
+ return skb; + tx_drop: + dev_kfree_skb_any(skb); + return NULL; + } + + static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) + { + struct be_adapter *adapter = netdev_priv(netdev); + struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; + struct be_queue_info *txq = &txo->q; + bool dummy_wrb, stopped = false; + u32 wrb_cnt = 0, copied = 0; + bool skip_hw_vlan = false; + u32 start = txq->head; + + skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); + if (!skb) + return NETDEV_TX_OK; + wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, @@@ -933,7 -960,6 +960,6 @@@ txq->head = start; dev_kfree_skb_any(skb); } - tx_drop: return NETDEV_TX_OK; }
@@@ -3184,7 -3210,7 +3210,7 @@@ static int be_setup(struct be_adapter * if (status) goto err;
- be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); + be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
if (adapter->vlans_added) be_vid_config(adapter); @@@ -3530,40 -3556,6 +3556,6 @@@ static int be_flash_skyhawk(struct be_a return 0; }
- static int lancer_wait_idle(struct be_adapter *adapter) - { - #define SLIPORT_IDLE_TIMEOUT 30 - u32 reg_val; - int status = 0, i; - - for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { - reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); - if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) - break; - - ssleep(1); - } - - if (i == SLIPORT_IDLE_TIMEOUT) - status = -1; - - return status; - } - - static int lancer_fw_reset(struct be_adapter *adapter) - { - int status = 0; - - status = lancer_wait_idle(adapter); - if (status) - return status; - - iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db + - PHYSDEV_CONTROL_OFFSET); - - return status; - } - static int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw) { @@@ -3641,7 -3633,8 +3633,8 @@@ }
if (change_status == LANCER_FW_RESET_NEEDED) { - status = lancer_fw_reset(adapter); + status = lancer_physdev_ctrl(adapter, + PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { dev_err(&adapter->pdev->dev, "Adapter busy for FW reset.\n" @@@ -3776,6 -3769,10 +3769,10 @@@ int be_load_fw(struct be_adapter *adapt else status = be_fw_download(adapter, fw);
+ if (!status) + be_cmd_get_fw_ver(adapter, adapter->fw_ver, + adapter->fw_on_flash); + fw_exit: release_firmware(fw); return status; @@@ -4262,9 -4259,6 +4259,9 @@@ static int be_probe(struct pci_dev *pde netdev->features |= NETIF_F_HIGHDMA; } else { status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (!status) + status = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; diff --combined drivers/net/ethernet/renesas/sh_eth.c index 5e3982f,a2eadc0..8cb600c --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@@ -313,9 -313,14 +313,14 @@@ static const u16 sh_eth_offset_fast_sh3 [TSU_ADRL31] = 0x01fc, };
- #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ - defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_ARCH_R8A7740) + static int sh_eth_is_gether(struct sh_eth_private *mdp) + { + if (mdp->reg_offset == sh_eth_offset_gigabit) + return 1; + else + return 0; + } + static void sh_eth_select_mii(struct net_device *ndev) { u32 value = 0x0; @@@ -339,11 -344,7 +344,7 @@@
sh_eth_write(ndev, value, RMII_MII); } - #endif
- /* There is CPU dependent code */ - #if defined(CONFIG_ARCH_R8A7778) || defined(CONFIG_ARCH_R8A7779) - #define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@@ -354,7 -355,8 +355,8 @@@ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); }
- static void sh_eth_set_rate(struct net_device *ndev) + /* There is CPU dependent code */ + static void sh_eth_set_rate_r8a777x(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -371,9 -373,9 +373,9 @@@ }
/* R8A7778/9 */ - static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + static struct sh_eth_cpu_data r8a777x_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_r8a777x,
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, @@@ -389,19 -391,8 +391,8 @@@ .tpauser = 1, .hw_swap = 1, }; - #elif defined(CONFIG_CPU_SUBTYPE_SH7724) - #define SH_ETH_RESET_DEFAULT 1 - static void sh_eth_set_duplex(struct net_device *ndev) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); - }
- static void sh_eth_set_rate(struct net_device *ndev) + static void sh_eth_set_rate_sh7724(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -418,9 -409,9 +409,9 @@@ }
/* SH7724 */ - static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + static struct sh_eth_cpu_data sh7724_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_sh7724,
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD, .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP, @@@ -438,22 -429,8 +429,8 @@@ .rpadir = 1, .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */ }; - #elif defined(CONFIG_CPU_SUBTYPE_SH7757) - #define SH_ETH_HAS_BOTH_MODULES 1 - #define SH_ETH_HAS_TSU 1 - static int sh_eth_check_reset(struct net_device *ndev); - - static void sh_eth_set_duplex(struct net_device *ndev) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); - }
- static void sh_eth_set_rate(struct net_device *ndev) + static void sh_eth_set_rate_sh7757(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -470,9 -447,9 +447,9 @@@ }
/* SH7757 */ - static struct sh_eth_cpu_data sh_eth_my_cpu_data = { - .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + static struct sh_eth_cpu_data sh7757_data = { + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_sh7757,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .rmcr_value = 0x00000001, @@@ -482,6 -459,7 +459,7 @@@ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+ .irq_flags = IRQF_SHARED, .apr = 1, .mpr = 1, .tpauser = 1, @@@ -491,7 -469,7 +469,7 @@@ .rpadir_value = 2 << 16, };
- #define SH_GIGA_ETH_BASE 0xfee00000 + #define SH_GIGA_ETH_BASE 0xfee00000UL #define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8) #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) static void sh_eth_chip_reset_giga(struct net_device *ndev) @@@ -516,52 -494,6 +494,6 @@@ } }
- static int sh_eth_is_gether(struct sh_eth_private *mdp); - static int sh_eth_reset(struct net_device *ndev) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - int ret = 0; - - if (sh_eth_is_gether(mdp)) { - sh_eth_write(ndev, 0x03, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, - EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - } else { - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, - EDMR); - mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, - EDMR); - } - - out: - return ret; - } - - static void sh_eth_set_duplex_giga(struct net_device *ndev) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); - } - static void sh_eth_set_rate_giga(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@@ -582,9 -514,9 +514,9 @@@ }
/* SH7757(GETHERC) */ - static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = { + static struct sh_eth_cpu_data sh7757_data_giga = { .chip_reset = sh_eth_chip_reset_giga, - .set_duplex = sh_eth_set_duplex_giga, + .set_duplex = sh_eth_set_duplex, .set_rate = sh_eth_set_rate_giga,
.ecsr_value = ECSR_ICD | ECSR_MPD, @@@ -600,6 -532,7 +532,7 @@@ .fdr_value = 0x0000072f, .rmcr_value = 0x00000001,
+ .irq_flags = IRQF_SHARED, .apr = 1, .mpr = 1, .tpauser = 1, @@@ -612,19 -545,6 +545,6 @@@ .tsu = 1, };
- static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) - { - if (sh_eth_is_gether(mdp)) - return &sh_eth_my_cpu_data_giga; - else - return &sh_eth_my_cpu_data; - } - - #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) - #define SH_ETH_HAS_TSU 1 - static int sh_eth_check_reset(struct net_device *ndev); - static void sh_eth_reset_hw_crc(struct net_device *ndev); - static void sh_eth_chip_reset(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@@ -634,17 -554,7 +554,7 @@@ mdelay(1); }
- static void sh_eth_set_duplex(struct net_device *ndev) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); - } - - static void sh_eth_set_rate(struct net_device *ndev) + static void sh_eth_set_rate_gether(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -663,11 -573,11 +573,11 @@@ } }
- /* sh7763 */ - static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + /* SH7734 */ + static struct sh_eth_cpu_data sh7734_data = { .chip_reset = sh_eth_chip_reset, .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_gether,
.ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@@ -688,54 -598,39 +598,39 @@@ .no_trimd = 1, .no_ade = 1, .tsu = 1, - #if defined(CONFIG_CPU_SUBTYPE_SH7734) - .hw_crc = 1, - .select_mii = 1, - #endif + .hw_crc = 1, + .select_mii = 1, };
- static int sh_eth_reset(struct net_device *ndev) - { - int ret = 0; - - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; + /* SH7763 */ + static struct sh_eth_cpu_data sh7763_data = { + .chip_reset = sh_eth_chip_reset, + .set_duplex = sh_eth_set_duplex, + .set_rate = sh_eth_set_rate_gether,
- /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - - /* Reset HW CRC register */ - sh_eth_reset_hw_crc(ndev); - - /* Select MII mode */ - if (sh_eth_my_cpu_data.select_mii) - sh_eth_select_mii(ndev); - out: - return ret; - } + .ecsr_value = ECSR_ICD | ECSR_MPD, + .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, + .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
- static void sh_eth_reset_hw_crc(struct net_device *ndev) - { - if (sh_eth_my_cpu_data.hw_crc) - sh_eth_write(ndev, 0x0, CSMR); - } + .tx_check = EESR_TC1 | EESR_FTC, + .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \ + EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \ + EESR_ECI, + .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \ + EESR_TFE,
- #elif defined(CONFIG_ARCH_R8A7740) - #define SH_ETH_HAS_TSU 1 - static int sh_eth_check_reset(struct net_device *ndev); + .apr = 1, + .mpr = 1, + .tpauser = 1, + .bculr = 1, + .hw_swap = 1, + .no_trimd = 1, + .no_ade = 1, + .tsu = 1, + .irq_flags = IRQF_SHARED, + };
- static void sh_eth_chip_reset(struct net_device *ndev) + static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev);
@@@ -746,65 -641,11 +641,11 @@@ sh_eth_select_mii(ndev); }
- static int sh_eth_reset(struct net_device *ndev) - { - int ret = 0; - - sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); - - ret = sh_eth_check_reset(ndev); - if (ret) - goto out; - - /* Table Init */ - sh_eth_write(ndev, 0x0, TDLAR); - sh_eth_write(ndev, 0x0, TDFAR); - sh_eth_write(ndev, 0x0, TDFXR); - sh_eth_write(ndev, 0x0, TDFFR); - sh_eth_write(ndev, 0x0, RDLAR); - sh_eth_write(ndev, 0x0, RDFAR); - sh_eth_write(ndev, 0x0, RDFXR); - sh_eth_write(ndev, 0x0, RDFFR); - - out: - return ret; - } - - static void sh_eth_set_duplex(struct net_device *ndev) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); - } - - static void sh_eth_set_rate(struct net_device *ndev) - { - struct sh_eth_private *mdp = netdev_priv(ndev); - - switch (mdp->speed) { - case 10: /* 10BASE */ - sh_eth_write(ndev, GECMR_10, GECMR); - break; - case 100:/* 100BASE */ - sh_eth_write(ndev, GECMR_100, GECMR); - break; - case 1000: /* 1000BASE */ - sh_eth_write(ndev, GECMR_1000, GECMR); - break; - default: - break; - } - } - /* R8A7740 */ - static struct sh_eth_cpu_data sh_eth_my_cpu_data = { - .chip_reset = sh_eth_chip_reset, + static struct sh_eth_cpu_data r8a7740_data = { + .chip_reset = sh_eth_chip_reset_r8a7740, .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate, + .set_rate = sh_eth_set_rate_gether,
.ecsr_value = ECSR_ICD | ECSR_MPD, .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP, @@@ -828,9 -669,7 +669,7 @@@ .select_mii = 1, };
- #elif defined(CONFIG_CPU_SUBTYPE_SH7619) - #define SH_ETH_RESET_DEFAULT 1 - static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + static struct sh_eth_cpu_data sh7619_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.apr = 1, @@@ -838,14 -677,11 +677,11 @@@ .tpauser = 1, .hw_swap = 1, }; - #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) - #define SH_ETH_RESET_DEFAULT 1 - #define SH_ETH_HAS_TSU 1 - static struct sh_eth_cpu_data sh_eth_my_cpu_data = { + + static struct sh_eth_cpu_data sh771x_data = { .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff, .tsu = 1, }; - #endif
static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) { @@@ -875,17 -711,6 +711,6 @@@ cd->tx_error_check = DEFAULT_TX_ERROR_CHECK; }
- #if defined(SH_ETH_RESET_DEFAULT) - /* Chip Reset */ - static int sh_eth_reset(struct net_device *ndev) - { - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); - mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); - - return 0; - } - #else static int sh_eth_check_reset(struct net_device *ndev) { int ret = 0; @@@ -897,13 -722,55 +722,55 @@@ mdelay(1); cnt--; } - if (cnt < 0) { - pr_err("Device reset fail\n"); + if (cnt <= 0) { + pr_err("Device reset failed\n"); ret = -ETIMEDOUT; } return ret; } - #endif + + static int sh_eth_reset(struct net_device *ndev) + { + struct sh_eth_private *mdp = netdev_priv(ndev); + int ret = 0; + + if (sh_eth_is_gether(mdp)) { + sh_eth_write(ndev, EDSR_ENALL, EDSR); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, + EDMR); + + ret = sh_eth_check_reset(ndev); + if (ret) + goto out; + + /* Table Init */ + sh_eth_write(ndev, 0x0, TDLAR); + sh_eth_write(ndev, 0x0, TDFAR); + sh_eth_write(ndev, 0x0, TDFXR); + sh_eth_write(ndev, 0x0, TDFFR); + sh_eth_write(ndev, 0x0, RDLAR); + sh_eth_write(ndev, 0x0, RDFAR); + sh_eth_write(ndev, 0x0, RDFXR); + sh_eth_write(ndev, 0x0, RDFFR); + + /* Reset HW CRC register */ + if (mdp->cd->hw_crc) + sh_eth_write(ndev, 0x0, CSMR); + + /* Select MII mode */ + if (mdp->cd->select_mii) + sh_eth_select_mii(ndev); + } else { + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, + EDMR); + mdelay(3); + sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, + EDMR); + } + + out: + return ret; + }
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) static void sh_eth_set_receive_align(struct sk_buff *skb) @@@ -979,14 -846,6 +846,6 @@@ static void read_mac_address(struct net } }
- static int sh_eth_is_gether(struct sh_eth_private *mdp) - { - if (mdp->reg_offset == sh_eth_offset_gigabit) - return 1; - else - return 0; - } - static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp) { if (sh_eth_is_gether(mdp)) @@@ -1401,23 -1260,16 +1260,23 @@@ static int sh_eth_rx(struct net_device desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length;
-#if defined(CONFIG_ARCH_R8A7740) - desc_status >>= 16; -#endif - if (--boguscnt < 0) break;
if (!(desc_status & RDFEND)) ndev->stats.rx_length_errors++;
+#if defined(CONFIG_ARCH_R8A7740) + /* + * In case of almost all GETHER/ETHERs, the Receive Frame State + * (RFS) bits in the Receive Descriptor 0 are from bit 9 to + * bit 0. However, in case of the R8A7740's GETHER, the RFS + * bits are from bit 25 to bit 16. So, the driver needs right + * shifting by 16. + */ + desc_status >>= 16; +#endif + if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { ndev->stats.rx_errors++; @@@ -1968,14 -1820,7 +1827,7 @@@ static int sh_eth_open(struct net_devic pm_runtime_get_sync(&mdp->pdev->dev);
ret = request_irq(ndev->irq, sh_eth_interrupt, - #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ - defined(CONFIG_CPU_SUBTYPE_SH7764) || \ - defined(CONFIG_CPU_SUBTYPE_SH7757) - IRQF_SHARED, - #else - 0, - #endif - ndev->name, ndev); + mdp->cd->irq_flags, ndev->name, ndev); if (ret) { dev_err(&ndev->dev, "Can not assign IRQ number\n"); return ret; @@@ -2161,7 -2006,6 +2013,6 @@@ static int sh_eth_do_ioctl(struct net_d return phy_mii_ioctl(phydev, rq, cmd); }
- #if defined(SH_ETH_HAS_TSU) /* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */ static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp, int entry) @@@ -2504,7 -2348,6 +2355,6 @@@ static int sh_eth_vlan_rx_kill_vid(stru
return 0; } - #endif /* SH_ETH_HAS_TSU */
/* SuperH's TSU register init function */ static void sh_eth_tsu_init(struct sh_eth_private *mdp) @@@ -2648,11 -2491,21 +2498,21 @@@ static const struct net_device_ops sh_e .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, .ndo_get_stats = sh_eth_get_stats, - #if defined(SH_ETH_HAS_TSU) + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, + }; + + static const struct net_device_ops sh_eth_netdev_ops_tsu = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, .ndo_set_rx_mode = sh_eth_set_multicast_list, .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, - #endif .ndo_tx_timeout = sh_eth_tx_timeout, .ndo_do_ioctl = sh_eth_do_ioctl, .ndo_validate_addr = eth_validate_addr, @@@ -2667,6 -2520,7 +2527,7 @@@ static int sh_eth_drv_probe(struct plat struct net_device *ndev = NULL; struct sh_eth_private *mdp = NULL; struct sh_eth_plat_data *pd = pdev->dev.platform_data; + const struct platform_device_id *id = platform_get_device_id(pdev);
/* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@@ -2725,15 -2579,14 +2586,14 @@@ mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
/* set cpu data */ - #if defined(SH_ETH_HAS_BOTH_MODULES) - mdp->cd = sh_eth_get_cpu_data(mdp); - #else - mdp->cd = &sh_eth_my_cpu_data; - #endif + mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; sh_eth_set_default_cpu_data(mdp->cd);
/* set function */ - ndev->netdev_ops = &sh_eth_netdev_ops; + if (mdp->cd->tsu) + ndev->netdev_ops = &sh_eth_netdev_ops_tsu; + else + ndev->netdev_ops = &sh_eth_netdev_ops; SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); ndev->watchdog_timeo = TX_TIMEOUT;
@@@ -2810,11 -2663,11 +2670,11 @@@ static int sh_eth_drv_remove(struct pla unregister_netdev(ndev); pm_runtime_disable(&pdev->dev); free_netdev(ndev); - platform_set_drvdata(pdev, NULL);
return 0; }
+ #ifdef CONFIG_PM static int sh_eth_runtime_nop(struct device *dev) { /* @@@ -2828,17 -2681,36 +2688,36 @@@ return 0; }
- static struct dev_pm_ops sh_eth_dev_pm_ops = { + static const struct dev_pm_ops sh_eth_dev_pm_ops = { .runtime_suspend = sh_eth_runtime_nop, .runtime_resume = sh_eth_runtime_nop, }; + #define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops) + #else + #define SH_ETH_PM_OPS NULL + #endif + + static struct platform_device_id sh_eth_id_table[] = { + { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, + { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, + { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, + { "sh7734-gether", (kernel_ulong_t)&sh7734_data }, + { "sh7757-ether", (kernel_ulong_t)&sh7757_data }, + { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga }, + { "sh7763-gether", (kernel_ulong_t)&sh7763_data }, + { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data }, + { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data }, + { } + }; + MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
static struct platform_driver sh_eth_driver = { .probe = sh_eth_drv_probe, .remove = sh_eth_drv_remove, + .id_table = sh_eth_id_table, .driver = { .name = CARDNAME, - .pm = &sh_eth_dev_pm_ops, + .pm = SH_ETH_PM_OPS, }, };
diff --combined drivers/net/macvlan.c index 6e91931,edfddc5..d811b06 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@@ -853,24 -853,18 +853,24 @@@ static int macvlan_changelink(struct ne struct nlattr *tb[], struct nlattr *data[]) { struct macvlan_dev *vlan = netdev_priv(dev); - if (data && data[IFLA_MACVLAN_MODE]) - vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); + if (data && data[IFLA_MACVLAN_FLAGS]) { __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC; - - if (promisc && (flags & MACVLAN_FLAG_NOPROMISC)) - dev_set_promiscuity(vlan->lowerdev, -1); - else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC)) - dev_set_promiscuity(vlan->lowerdev, 1); + if (vlan->port->passthru && promisc) { + int err; + + if (flags & MACVLAN_FLAG_NOPROMISC) + err = dev_set_promiscuity(vlan->lowerdev, -1); + else + err = dev_set_promiscuity(vlan->lowerdev, 1); + if (err < 0) + return err; + } vlan->flags = flags; } + if (data && data[IFLA_MACVLAN_MODE]) + vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); return 0; }
@@@ -927,7 -921,7 +927,7 @@@ static struct rtnl_link_ops macvlan_lin static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct macvlan_dev *vlan, *next; struct macvlan_port *port; LIST_HEAD(list_kill); diff --combined drivers/net/phy/Kconfig index 84461e8,3a316b3..342561a --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@@ -135,7 -135,7 +135,7 @@@ config MDIO_GPI
config MDIO_OCTEON tristate "Support for MDIO buses on Octeon SOCs" - depends on CPU_CAVIUM_OCTEON + depends on CAVIUM_OCTEON_SOC default y help
@@@ -144,6 -144,16 +144,16 @@@
If in doubt, say Y.
+ config MDIO_SUN4I + tristate "Allwinner sun4i MDIO interface support" + depends on ARCH_SUNXI + select REGULATOR + select REGULATOR_FIXED_VOLTAGE + help + This driver supports the MDIO interface found in the network + interface units of the Allwinner SoC that have an EMAC (A10, + A12, A10s, etc.) + config MDIO_BUS_MUX tristate depends on OF_MDIO diff --combined drivers/net/team/team.c index b305105,e46fef3..bff7e0b --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -525,31 -525,26 +525,26 @@@ static void team_set_no_mode(struct tea team->mode = &__team_no_mode; }
- static void __team_adjust_ops(struct team *team, int en_port_count) + static void team_adjust_ops(struct team *team) { /* * To avoid checks in rx/tx skb paths, ensure here that non-null and * correct ops are always set. */
- if (!en_port_count || !team_is_mode_set(team) || + if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->transmit) team->ops.transmit = team_dummy_transmit; else team->ops.transmit = team->mode->ops->transmit;
- if (!en_port_count || !team_is_mode_set(team) || + if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->receive) team->ops.receive = team_dummy_receive; else team->ops.receive = team->mode->ops->receive; }
- static void team_adjust_ops(struct team *team) - { - __team_adjust_ops(team, team->en_port_count); - } - /* * We can benefit from the fact that it's ensured no port is present * at the time of mode change. Therefore no packets are in fly so there's no @@@ -725,9 -720,9 +720,9 @@@ static bool team_queue_override_transmi static void __team_queue_override_port_del(struct team *team, struct team_port *port) { + if (!port->queue_id) + return; list_del_rcu(&port->qom_list); - synchronize_rcu(); - INIT_LIST_HEAD(&port->qom_list); }
static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, @@@ -749,9 -744,8 +744,8 @@@ static void __team_queue_override_port_ struct list_head *qom_list; struct list_head *node;
- if (!port->queue_id || !team_port_enabled(port)) + if (!port->queue_id) return; - qom_list = __team_get_qom_list(team, port->queue_id); node = qom_list; list_for_each_entry(cur, qom_list, qom_list) { @@@ -768,7 -762,7 +762,7 @@@ static void __team_queue_override_enabl bool enabled = false;
list_for_each_entry(port, &team->port_list, list) { - if (!list_empty(&port->qom_list)) { + if (port->queue_id) { enabled = true; break; } @@@ -780,14 -774,44 +774,44 @@@ team->queue_override_enabled = enabled; }
- static void team_queue_override_port_refresh(struct team *team, - struct team_port *port) + static void team_queue_override_port_prio_changed(struct team *team, + struct team_port *port) { + if (!port->queue_id || team_port_enabled(port)) + return; __team_queue_override_port_del(team, port); __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); }
+ static void team_queue_override_port_change_queue_id(struct team *team, + struct team_port *port, + u16 new_queue_id) + { + if (team_port_enabled(port)) { + __team_queue_override_port_del(team, port); + port->queue_id = new_queue_id; + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); + } else { + port->queue_id = new_queue_id; + } + } + + static void team_queue_override_port_add(struct team *team, + struct team_port *port) + { + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); + } + + static void team_queue_override_port_del(struct team *team, + struct team_port *port) + { + __team_queue_override_port_del(team, port); + __team_queue_override_enabled_check(team); + } +
/**************** * Port handling @@@ -819,7 -843,7 +843,7 @@@ static void team_port_enable(struct tea hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); - team_queue_override_port_refresh(team, port); + team_queue_override_port_add(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); } @@@ -848,14 -872,9 +872,9 @@@ static void team_port_disable(struct te hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; - team_queue_override_port_refresh(team, port); - __team_adjust_ops(team, team->en_port_count - 1); - /* - * Wait until readers see adjusted ops. This ensures that - * readers never see team->en_port_count == 0 - */ - synchronize_rcu(); team->en_port_count--; + team_queue_override_port_del(team, port); + team_adjust_ops(team); }
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ @@@ -1092,8 -1111,8 +1111,8 @@@ static int team_port_add(struct team *t }
port->index = -1; - team_port_enable(team, port); list_add_tail_rcu(&port->list, &team->port_list); + team_port_enable(team, port); __team_compute_features(team); __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); __team_options_change_check(team); @@@ -1163,8 -1182,7 +1182,7 @@@ static int team_port_del(struct team *t
team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); - synchronize_rcu(); - kfree(port); + kfree_rcu(port, rcu); netdev_info(dev, "Port device %s removed\n", portname); __team_compute_features(team);
@@@ -1259,9 -1277,12 +1277,12 @@@ static int team_priority_option_set(str struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; + s32 priority = ctx->data.s32_val;
- port->priority = ctx->data.s32_val; - team_queue_override_port_refresh(team, port); + if (port->priority == priority) + return 0; + port->priority = priority; + team_queue_override_port_prio_changed(team, port); return 0; }
@@@ -1278,17 -1299,16 +1299,16 @@@ static int team_queue_id_option_set(str struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; + u16 new_queue_id = ctx->data.u32_val;
- if (port->queue_id == ctx->data.u32_val) + if (port->queue_id == new_queue_id) return 0; - if (ctx->data.u32_val >= team->dev->real_num_tx_queues) + if (new_queue_id >= team->dev->real_num_tx_queues) return -EINVAL; - port->queue_id = ctx->data.u32_val; - team_queue_override_port_refresh(team, port); + team_queue_override_port_change_queue_id(team, port, new_queue_id); return 0; }
- static const struct team_option team_options[] = { { .name = "mode", @@@ -2648,7 -2668,7 +2668,7 @@@ static void team_port_change_check(stru static int team_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *) ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct team_port *port;
port = team_port_get_rtnl(dev); diff --combined drivers/net/team/team_mode_roundrobin.c index 472623f,90311c73..5366585 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@@ -30,10 -30,9 +30,11 @@@ static bool rr_transmit(struct team *te struct team_port *port; int port_index;
- port_index = rr_priv(team)->sent_packets++ % team->en_port_count; + port_index = team_num_to_port_index(team, + rr_priv(team)->sent_packets++); port = team_get_port_by_index_rcu(team, port_index); + if (unlikely(!port)) + goto drop; port = team_get_first_port_txable_rcu(team, port); if (unlikely(!port)) goto drop; diff --combined drivers/net/tun.c index bfa9bb4,a344270..cea2fe4 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -352,7 -352,7 +352,7 @@@ static u16 tun_select_queue(struct net_ u32 numqueues = 0;
rcu_read_lock(); - numqueues = tun->numqueues; + numqueues = ACCESS_ONCE(tun->numqueues);
txq = skb_get_rxhash(skb); if (txq) { @@@ -841,7 -841,7 +841,7 @@@ static const struct net_device_ops tap_ #endif };
- static int tun_flow_init(struct tun_struct *tun) + static void tun_flow_init(struct tun_struct *tun) { int i;
@@@ -852,8 -852,6 +852,6 @@@ setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun); mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + tun->ageing_time)); - - return 0; }
static void tun_flow_uninit(struct tun_struct *tun) @@@ -1530,6 -1528,9 +1528,9 @@@ static int tun_flags(struct tun_struct if (tun->flags & TUN_TAP_MQ) flags |= IFF_MULTI_QUEUE;
+ if (tun->flags & TUN_PERSIST) + flags |= IFF_PERSIST; + return flags; }
@@@ -1659,10 -1660,7 +1660,7 @@@ static int tun_set_iff(struct net *net goto err_free_dev;
tun_net_init(dev); - - err = tun_flow_init(tun); - if (err < 0) - goto err_free_dev; + tun_flow_init(tun);
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES; @@@ -2159,8 -2157,6 +2157,8 @@@ static int tun_chr_open(struct inode *i set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); INIT_LIST_HEAD(&tfile->next);
+ sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); + return 0; }
diff --combined drivers/net/wireless/ath/ath9k/Kconfig index 3c2cbc9,3b07851..760ab3f --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@@ -84,25 -84,13 +84,17 @@@ config ATH9K_DFS_CERTIFIE developed. At this point enabling this option won't do anything except increase code size.
- config ATH9K_MAC_DEBUG - bool "Atheros MAC statistics" - depends on ATH9K_DEBUGFS - default y - ---help--- - This option enables collection of statistics for Rx/Tx status - data and some other MAC related statistics - -config ATH9K_RATE_CONTROL +config ATH9K_LEGACY_RATE_CONTROL bool "Atheros ath9k rate control" depends on ATH9K - default y + default n ---help--- Say Y, if you want to use the ath9k specific rate control - module instead of minstrel_ht. + module instead of minstrel_ht. Be warned that there are various + issues with the ath9k RC and minstrel is a more robust algorithm. + Note that even if this option is selected, "ath9k_rate_control" + has to be passed to mac80211 using the module parameter, + ieee80211_default_rc_algo.
config ATH9K_HTC tristate "Atheros HTC based wireless cards support" diff --combined drivers/net/wireless/ath/ath9k/init.c index 2ba4945,daba841..389ee1b --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@@ -21,6 -21,7 +21,7 @@@ #include <linux/ath9k_platform.h> #include <linux/module.h> #include <linux/relay.h> + #include <net/ieee80211_radiotap.h>
#include "ath9k.h"
@@@ -613,9 -614,6 +614,6 @@@ static int ath9k_init_softc(u16 devid, spin_lock_init(&sc->sc_serial_rw); spin_lock_init(&sc->sc_pm_lock); mutex_init(&sc->mutex); - #ifdef CONFIG_ATH9K_MAC_DEBUG - spin_lock_init(&sc->debug.samp_lock); - #endif tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, (unsigned long)sc); @@@ -769,12 -767,19 +767,19 @@@ void ath9k_set_hw_capab(struct ath_soft IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_RC_TABLE;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) - hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { + hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; + + if (AR_SREV_9280_20_OR_LATER(ah)) + hw->radiotap_mcs_details |= + IEEE80211_RADIOTAP_MCS_HAVE_STBC; + }
if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt) hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT) | @@@ -787,28 -792,25 +792,24 @@@ hw->wiphy->iface_combinations = if_comb; hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
- if (AR_SREV_5416(sc->sc_ah)) - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
#ifdef CONFIG_PM_SLEEP - if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) && device_can_wakeup(sc->dev)) { - hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT; hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN; hw->wiphy->wowlan.pattern_min_len = 1; hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE; - }
atomic_set(&sc->wow_sleep_proc_intr, -1); atomic_set(&sc->wow_got_bmiss_intr, -1); - #endif
hw->queues = 4; @@@ -829,6 -831,10 +830,6 @@@ sc->ant_rx = hw->wiphy->available_antennas_rx; sc->ant_tx = hw->wiphy->available_antennas_tx;
-#ifdef CONFIG_ATH9K_RATE_CONTROL - hw->rate_control_algorithm = "ath9k_rate_control"; -#endif - if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &sc->sbands[IEEE80211_BAND_2GHZ]; diff --combined drivers/net/wireless/iwlwifi/dvm/rs.c index 10fbb17,94314a8..8fe76dc --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@@ -1088,7 -1088,7 +1088,7 @@@ done (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) rs_program_fix_rate(priv, lq_sta); #endif - if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) + if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist) rs_bt_update_lq(priv, ctx, lq_sta); }
@@@ -2799,7 -2799,7 +2799,7 @@@ static void rs_get_rate(void *priv_r, s info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; - + info->control.rates[0].count = 1; }
static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta, @@@ -3064,11 -3064,11 +3064,11 @@@ static void rs_fill_link_cmd(struct iwl * overwrite if needed, pass aggregation time limit * to uCode in uSec */ - if (priv && priv->cfg->bt_params && - priv->cfg->bt_params->agg_time_limit && + if (priv && priv->lib->bt_params && + priv->lib->bt_params->agg_time_limit && priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) lq_cmd->agg_params.agg_time_limit = - cpu_to_le16(priv->cfg->bt_params->agg_time_limit); + cpu_to_le16(priv->lib->bt_params->agg_time_limit); }
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) diff --combined drivers/net/wireless/iwlwifi/iwl-drv.c index 40fed1f,4f88613..2f690e5 --- a/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/iwlwifi/iwl-drv.c @@@ -1000,12 -1000,10 +1000,12 @@@ static void iwl_req_fw_callback(const s */ if (load_module) { err = request_module("%s", op->name); +#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR if (err) IWL_ERR(drv, "failed to load module %s (error %d), is dynamic loading enabled?\n", op->name, err); +#endif } return;
@@@ -1236,6 -1234,9 +1236,9 @@@ MODULE_PARM_DESC(wd_disable "Disable stuck queue watchdog timer 0=system default, " "1=disable, 2=enable (default: 0)");
+ module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, S_IRUGO); + MODULE_PARM_DESC(nvm_file, "NVM file name"); + /* * set bt_coex_active to true, uCode will do kill/defer * every time the priority line is asserted (BT is sending signals on the diff --combined drivers/net/wireless/iwlwifi/mvm/rs.c index b99fe31,d6beec7..31587a3 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@@ -401,6 -401,17 +401,17 @@@ static int rs_tl_turn_on_agg_for_tid(st
load = rs_tl_get_load(lq_data, tid);
+ /* + * Don't create TX aggregation sessions when in high + * BT traffic, as they would just be disrupted by BT. + */ + if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) { + IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n", + BT_MBOX_MSG(&mvm->last_bt_notif, + 3, TRAFFIC_LOAD)); + return ret; + } + if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); @@@ -1519,6 -1530,29 +1530,29 @@@ static int rs_move_siso_to_other(struc u8 update_search_tbl_counter = 0; int ret;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) + tbl->action = IWL_SISO_SWITCH_MIMO2_AB; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + valid_tx_ant = + first_antenna(iwl_fw_valid_tx_ant(mvm->fw)); + if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) + tbl->action = IWL_SISO_SWITCH_ANTENNA1; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@@ -1532,7 -1566,9 +1566,9 @@@ tx_chains_num <= 2)) break;
- if (window->success_ratio >= IWL_RS_GOOD_RATIO) + if (window->success_ratio >= IWL_RS_GOOD_RATIO && + BT_MBOX_MSG(&mvm->last_bt_notif, 3, + TRAFFIC_LOAD) == 0) break;
memcpy(search_tbl, tbl, sz); @@@ -1654,6 -1690,28 +1690,28 @@@ static int rs_move_mimo2_to_other(struc u8 update_search_tbl_counter = 0; int ret;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + if (tbl->action != IWL_MIMO2_SWITCH_SISO_A) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || + tbl->action == IWL_MIMO2_SWITCH_SISO_C) + tbl->action = IWL_MIMO2_SWITCH_SISO_A; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@@ -1791,6 -1849,28 +1849,28 @@@ static int rs_move_mimo3_to_other(struc int ret; u8 update_search_tbl_counter = 0;
+ switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + case IWL_BT_COEX_TRAFFIC_LOAD_NONE: + /* nothing */ + break; + case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: + case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: + /* avoid antenna B and MIMO */ + if (tbl->action != IWL_MIMO3_SWITCH_SISO_A) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + break; + case IWL_BT_COEX_TRAFFIC_LOAD_LOW: + /* avoid antenna B unless MIMO */ + if (tbl->action == IWL_MIMO3_SWITCH_SISO_B || + tbl->action == IWL_MIMO3_SWITCH_SISO_C) + tbl->action = IWL_MIMO3_SWITCH_SISO_A; + break; + default: + IWL_ERR(mvm, "Invalid BT load %d", + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)); + break; + } + start_action = tbl->action; while (1) { lq_sta->action_counter++; @@@ -2302,6 -2382,32 +2382,32 @@@ static void rs_rate_scale_perform(struc (current_tpt > (100 * tbl->expected_tpt[low])))) scale_action = 0;
+ if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= + IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + if (lq_sta->last_bt_traffic > + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + /* + * don't set scale_action, don't want to scale up if + * the rate scale doesn't otherwise think that is a + * good idea. + */ + } else if (lq_sta->last_bt_traffic <= + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { + scale_action = -1; + } + } + lq_sta->last_bt_traffic = + BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD); + + if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= + IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && + (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { + /* search for a new modulation */ + rs_stay_in_table(lq_sta, true); + goto lq_update; + } + switch (scale_action) { case -1: /* Decrease starting rate, update uCode's rate table */ @@@ -2546,7 -2652,6 +2652,7 @@@ static void rs_get_rate(void *mvm_r, st info->control.rates[0].flags = 0; } info->control.rates[0].idx = rate_idx; + info->control.rates[0].count = 1; }
static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, @@@ -2783,6 -2888,13 +2889,13 @@@ static void rs_fill_link_cmd(struct iwl
lq_cmd->agg_time_limit = cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); + + /* + * overwrite if needed, pass aggregation time limit + * to uCode in uSec - This is racy - but heh, at least it helps... + */ + if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) + lq_cmd->agg_time_limit = cpu_to_le16(1200); }
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) @@@ -3081,3 -3193,29 +3194,29 @@@ void iwl_mvm_rate_control_unregister(vo { ieee80211_rate_control_unregister(&rs_mvm_ops); } + + /** + * iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable + * Tx protection, according to this rquest and previous requests, + * and send the LQ command. + * @lq: The LQ command + * @mvmsta: The station + * @enable: Enable Tx protection? + */ + int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, + struct iwl_mvm_sta *mvmsta, bool enable) + { + lockdep_assert_held(&mvm->mutex); + + if (enable) { + if (mvmsta->tx_protection == 0) + lq->flags |= LQ_FLAG_SET_STA_TLC_RTS_MSK; + mvmsta->tx_protection++; + } else { + mvmsta->tx_protection--; + if (mvmsta->tx_protection == 0) + lq->flags &= ~LQ_FLAG_SET_STA_TLC_RTS_MSK; + } + + return iwl_mvm_send_lq_cmd(mvm, lq, CMD_ASYNC, false); + } diff --combined drivers/net/wireless/iwlwifi/mvm/tx.c index 48c1891,a830eb6..b9ba4e7 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@@ -175,13 -175,12 +175,13 @@@ static void iwl_mvm_set_tx_cmd_rate(str * table is controlled by LINK_QUALITY commands */
- if (ieee80211_is_data(fc)) { + if (ieee80211_is_data(fc) && sta) { tx_cmd->initial_rate_index = 0; tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); return; } else if (ieee80211_is_back_req(fc)) { - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); + tx_cmd->tx_flags |= + cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); }
/* HT rate doesn't make sense for a non data frame */ diff --combined drivers/net/wireless/rt2x00/rt2800lib.c index 72f32e5,ead3a3e..3aa30dd --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@@ -840,7 -840,7 +840,7 @@@ static inline void rt2800_clear_beacon_ unsigned int beacon_base) { int i; - const int txwi_desc_size = rt2x00dev->ops->bcn->winfo_size; + const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
/* * For the Beacon base registers we only need to clear @@@ -3027,26 -3027,19 +3027,26 @@@ static void rt2800_config_txpower(struc * TODO: we do not use +6 dBm option to do not increase power beyond * regulatory limit, however this could be utilized for devices with * CAPABILITY_POWER_LIMIT. - */ - rt2800_bbp_read(rt2x00dev, 1, &r1); - if (delta <= -12) { - power_ctrl = 2; - delta += 12; - } else if (delta <= -6) { - power_ctrl = 1; - delta += 6; - } else { - power_ctrl = 0; + * + * TODO: add different temperature compensation code for RT3290 & RT5390 + * to allow to use BBP_R1 for those chips. + */ + if (!rt2x00_rt(rt2x00dev, RT3290) && + !rt2x00_rt(rt2x00dev, RT5390)) { + rt2800_bbp_read(rt2x00dev, 1, &r1); + if (delta <= -12) { + power_ctrl = 2; + delta += 12; + } else if (delta <= -6) { + power_ctrl = 1; + delta += 6; + } else { + power_ctrl = 0; + } + rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); + rt2800_bbp_write(rt2x00dev, 1, r1); } - rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); - rt2800_bbp_write(rt2x00dev, 1, r1); + offset = TX_PWR_CFG_0;
for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { @@@ -3960,379 -3953,577 +3960,577 @@@ static void rt2800_init_bbp_early(struc rt2800_bbp_write(rt2x00dev, 106, 0x35); }
- static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) + static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev) { u16 eeprom; u8 value;
- rt2800_init_bbp_early(rt2x00dev); + rt2800_bbp_read(rt2x00dev, 138, &value); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) + value |= 0x20; + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) + value &= ~0x02; + rt2800_bbp_write(rt2x00dev, 138, value); + }
- rt2800_bbp_read(rt2x00dev, 105, &value); - rt2x00_set_field8(&value, BBP105_MLD, - rt2x00dev->default_ant.rx_chain_num == 2); - rt2800_bbp_write(rt2x00dev, 105, value); + static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev) + { + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 105, 0x01); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + } + + static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev) + { + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { + rt2800_bbp_write(rt2x00dev, 69, 0x16); + rt2800_bbp_write(rt2x00dev, 73, 0x12); + } else { + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + } + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 81, 0x37); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) + rt2800_bbp_write(rt2x00dev, 84, 0x19); + else + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + } + + static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev) + { + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || + rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || + rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + else + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + if (rt2x00_rt(rt2x00dev, RT3071) || + rt2x00_rt(rt2x00dev, RT3090)) + rt2800_disable_unused_dac_adc(rt2x00dev); + } + + static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev) + { + u8 value;
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- rt2800_bbp_write(rt2x00dev, 20, 0x06); rt2800_bbp_write(rt2x00dev, 31, 0x08); - rt2800_bbp_write(rt2x00dev, 65, 0x2C); - rt2800_bbp_write(rt2x00dev, 68, 0xDD); - rt2800_bbp_write(rt2x00dev, 69, 0x1A); - rt2800_bbp_write(rt2x00dev, 70, 0x05); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x13); - rt2800_bbp_write(rt2x00dev, 74, 0x0F); - rt2800_bbp_write(rt2x00dev, 75, 0x4F); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + + rt2800_bbp_write(rt2x00dev, 77, 0x58); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 74, 0x0b); + rt2800_bbp_write(rt2x00dev, 79, 0x18); + rt2800_bbp_write(rt2x00dev, 80, 0x09); + rt2800_bbp_write(rt2x00dev, 81, 0x33); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x7a); + + rt2800_bbp_write(rt2x00dev, 84, 0x9a); + + rt2800_bbp_write(rt2x00dev, 86, 0x38); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x02); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 104, 0x92); + + rt2800_bbp_write(rt2x00dev, 105, 0x1c); + + rt2800_bbp_write(rt2x00dev, 106, 0x03); + + rt2800_bbp_write(rt2x00dev, 128, 0x12); + + rt2800_bbp_write(rt2x00dev, 67, 0x24); + rt2800_bbp_write(rt2x00dev, 143, 0x04); + rt2800_bbp_write(rt2x00dev, 142, 0x99); + rt2800_bbp_write(rt2x00dev, 150, 0x30); + rt2800_bbp_write(rt2x00dev, 151, 0x2e); + rt2800_bbp_write(rt2x00dev, 152, 0x20); + rt2800_bbp_write(rt2x00dev, 153, 0x34); + rt2800_bbp_write(rt2x00dev, 154, 0x40); + rt2800_bbp_write(rt2x00dev, 155, 0x3b); + rt2800_bbp_write(rt2x00dev, 253, 0x04); + + rt2800_bbp_read(rt2x00dev, 47, &value); + rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); + rt2800_bbp_write(rt2x00dev, 47, value); + + /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ + rt2800_bbp_read(rt2x00dev, 3, &value); + rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); + rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); + rt2800_bbp_write(rt2x00dev, 3, value); + } + + static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) + { + rt2800_bbp_write(rt2x00dev, 3, 0x00); + rt2800_bbp_write(rt2x00dev, 4, 0x50); + + rt2800_bbp_write(rt2x00dev, 31, 0x08); + + rt2800_bbp_write(rt2x00dev, 47, 0x48); + + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38); + + rt2800_bbp_write(rt2x00dev, 68, 0x0b); + + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); - rt2800_bbp_write(rt2x00dev, 84, 0x9A); + + rt2800_bbp_write(rt2x00dev, 70, 0x0a); + + rt2800_bbp_write(rt2x00dev, 78, 0x0e); + rt2800_bbp_write(rt2x00dev, 80, 0x08); + rt2800_bbp_write(rt2x00dev, 81, 0x37); + + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); - rt2800_bbp_write(rt2x00dev, 95, 0x9a); - rt2800_bbp_write(rt2x00dev, 98, 0x12); - rt2800_bbp_write(rt2x00dev, 103, 0xC0); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); - /* FIXME BBP105 owerwrite */ - rt2800_bbp_write(rt2x00dev, 105, 0x3C); - rt2800_bbp_write(rt2x00dev, 106, 0x35); - rt2800_bbp_write(rt2x00dev, 128, 0x12); - rt2800_bbp_write(rt2x00dev, 134, 0xD0); - rt2800_bbp_write(rt2x00dev, 135, 0xF6); - rt2800_bbp_write(rt2x00dev, 137, 0x0F);
- /* Initialize GLRT (Generalized Likehood Radio Test) */ - rt2800_init_bbp_5592_glrt(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 105, 0x34); + + rt2800_bbp_write(rt2x00dev, 106, 0x05); + + rt2800_bbp_write(rt2x00dev, 120, 0x50); + + rt2800_bbp_write(rt2x00dev, 137, 0x0f); + + rt2800_bbp_write(rt2x00dev, 163, 0xbd); + /* Set ITxBF timeout to 0x9c40=1000msec */ + rt2800_bbp_write(rt2x00dev, 179, 0x02); + rt2800_bbp_write(rt2x00dev, 180, 0x00); + rt2800_bbp_write(rt2x00dev, 182, 0x40); + rt2800_bbp_write(rt2x00dev, 180, 0x01); + rt2800_bbp_write(rt2x00dev, 182, 0x9c); + rt2800_bbp_write(rt2x00dev, 179, 0x00); + /* Reprogram the inband interface to put right values in RXWI */ + rt2800_bbp_write(rt2x00dev, 142, 0x04); + rt2800_bbp_write(rt2x00dev, 143, 0x3b); + rt2800_bbp_write(rt2x00dev, 142, 0x06); + rt2800_bbp_write(rt2x00dev, 143, 0xa0); + rt2800_bbp_write(rt2x00dev, 142, 0x07); + rt2800_bbp_write(rt2x00dev, 143, 0xa1); + rt2800_bbp_write(rt2x00dev, 142, 0x08); + rt2800_bbp_write(rt2x00dev, 143, 0xa2); + + rt2800_bbp_write(rt2x00dev, 148, 0xc8); + }
- rt2800_bbp4_mac_if_ctrl(rt2x00dev); + static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev) + { + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); - div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); - ant = (div_mode == 3) ? 1 : 0; - rt2800_bbp_read(rt2x00dev, 152, &value); - if (ant == 0) { - /* Main antenna */ - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); - } else { - /* Auxiliary antenna */ - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); - } - rt2800_bbp_write(rt2x00dev, 152, value); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10);
- if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { - rt2800_bbp_read(rt2x00dev, 254, &value); - rt2x00_set_field8(&value, BBP254_BIT7, 1); - rt2800_bbp_write(rt2x00dev, 254, value); - } + rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- rt2800_init_freq_calibration(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33);
- rt2800_bbp_write(rt2x00dev, 84, 0x19); - if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); + else + rt2800_bbp_write(rt2x00dev, 103, 0x00); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + rt2800_disable_unused_dac_adc(rt2x00dev); }
- static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) + static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev) { - unsigned int i; - u16 eeprom; - u8 reg_id; - u8 value; + rt2800_bbp_write(rt2x00dev, 31, 0x08);
- if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || - rt2800_wait_bbp_ready(rt2x00dev))) - return -EACCES; + rt2800_bbp_write(rt2x00dev, 65, 0x2c); + rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT5592)) { - rt2800_init_bbp_5592(rt2x00dev); - return 0; - } + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x10);
- if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 3, 0x00); - rt2800_bbp_write(rt2x00dev, 4, 0x50); - } + rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp4_mac_if_ctrl(rt2x00dev); + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33);
- if (rt2800_is_305x_soc(rt2x00dev) || - rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 82, 0x62); + + rt2800_bbp_write(rt2x00dev, 83, 0x6a); + + rt2800_bbp_write(rt2x00dev, 84, 0x99); + + rt2800_bbp_write(rt2x00dev, 86, 0x00); + + rt2800_bbp_write(rt2x00dev, 91, 0x04); + + rt2800_bbp_write(rt2x00dev, 92, 0x00); + + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + + rt2800_bbp_write(rt2x00dev, 105, 0x05); + + rt2800_bbp_write(rt2x00dev, 106, 0x35); + + rt2800_disable_unused_dac_adc(rt2x00dev); + } + + static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) + { + int ant, div_mode; + u16 eeprom; + u8 value; + + rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 47, 0x48); + rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 68, 0x0b); + rt2800_bbp_write(rt2x00dev, 68, 0x0b);
- if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { - rt2800_bbp_write(rt2x00dev, 69, 0x16); - rt2800_bbp_write(rt2x00dev, 73, 0x12); - } else if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_write(rt2x00dev, 69, 0x12); - rt2800_bbp_write(rt2x00dev, 73, 0x13); - rt2800_bbp_write(rt2x00dev, 75, 0x46); - rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 69, 0x12); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 75, 0x46); + rt2800_bbp_write(rt2x00dev, 76, 0x28);
- if (rt2x00_rt(rt2x00dev, RT3290)) - rt2800_bbp_write(rt2x00dev, 77, 0x58); - else - rt2800_bbp_write(rt2x00dev, 77, 0x59); - } else { - rt2800_bbp_write(rt2x00dev, 69, 0x12); - rt2800_bbp_write(rt2x00dev, 73, 0x10); - } + rt2800_bbp_write(rt2x00dev, 77, 0x59);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
- if (rt2x00_rt(rt2x00dev, RT3070) || - rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_write(rt2x00dev, 79, 0x13); - rt2800_bbp_write(rt2x00dev, 80, 0x05); - rt2800_bbp_write(rt2x00dev, 81, 0x33); - } else if (rt2800_is_305x_soc(rt2x00dev)) { - rt2800_bbp_write(rt2x00dev, 78, 0x0e); - rt2800_bbp_write(rt2x00dev, 80, 0x08); - } else if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_bbp_write(rt2x00dev, 74, 0x0b); - rt2800_bbp_write(rt2x00dev, 79, 0x18); - rt2800_bbp_write(rt2x00dev, 80, 0x09); - rt2800_bbp_write(rt2x00dev, 81, 0x33); - } else if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 78, 0x0e); - rt2800_bbp_write(rt2x00dev, 80, 0x08); - rt2800_bbp_write(rt2x00dev, 81, 0x37); - } else { - rt2800_bbp_write(rt2x00dev, 81, 0x37); - } + rt2800_bbp_write(rt2x00dev, 79, 0x13); + rt2800_bbp_write(rt2x00dev, 80, 0x05); + rt2800_bbp_write(rt2x00dev, 81, 0x33);
rt2800_bbp_write(rt2x00dev, 82, 0x62); - if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 83, 0x7a); - else - rt2800_bbp_write(rt2x00dev, 83, 0x6a);
- if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) - rt2800_bbp_write(rt2x00dev, 84, 0x19); - else if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 84, 0x9a); - else - rt2800_bbp_write(rt2x00dev, 84, 0x99); + rt2800_bbp_write(rt2x00dev, 83, 0x7a);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 86, 0x38); - else - rt2800_bbp_write(rt2x00dev, 86, 0x00); + rt2800_bbp_write(rt2x00dev, 84, 0x9a); + + rt2800_bbp_write(rt2x00dev, 86, 0x38);
- if (rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5392)) + if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 88, 0x90);
rt2800_bbp_write(rt2x00dev, 91, 0x04);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 92, 0x02); - else - rt2800_bbp_write(rt2x00dev, 92, 0x00); + rt2800_bbp_write(rt2x00dev, 92, 0x02);
if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 95, 0x9a); rt2800_bbp_write(rt2x00dev, 98, 0x12); }
- if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || - rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || - rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || - rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || - rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392) || - rt2800_is_305x_soc(rt2x00dev)) - rt2800_bbp_write(rt2x00dev, 103, 0xc0); - else - rt2800_bbp_write(rt2x00dev, 103, 0x00); + rt2800_bbp_write(rt2x00dev, 103, 0xc0);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT3352) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 104, 0x92); + rt2800_bbp_write(rt2x00dev, 104, 0x92);
- if (rt2800_is_305x_soc(rt2x00dev)) - rt2800_bbp_write(rt2x00dev, 105, 0x01); - else if (rt2x00_rt(rt2x00dev, RT3290)) - rt2800_bbp_write(rt2x00dev, 105, 0x1c); - else if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 105, 0x34); - else if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 105, 0x3c); - else - rt2800_bbp_write(rt2x00dev, 105, 0x05); + rt2800_bbp_write(rt2x00dev, 105, 0x3c);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390)) + if (rt2x00_rt(rt2x00dev, RT5390)) rt2800_bbp_write(rt2x00dev, 106, 0x03); else if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 106, 0x12); else - rt2800_bbp_write(rt2x00dev, 106, 0x35); - - if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 120, 0x50); + WARN_ON(1);
- if (rt2x00_rt(rt2x00dev, RT3290) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) - rt2800_bbp_write(rt2x00dev, 128, 0x12); + rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 134, 0xd0); rt2800_bbp_write(rt2x00dev, 135, 0xf6); }
- if (rt2x00_rt(rt2x00dev, RT3352)) - rt2800_bbp_write(rt2x00dev, 137, 0x0f); + rt2800_disable_unused_dac_adc(rt2x00dev);
- if (rt2x00_rt(rt2x00dev, RT3071) || - rt2x00_rt(rt2x00dev, RT3090) || - rt2x00_rt(rt2x00dev, RT3390) || - rt2x00_rt(rt2x00dev, RT3572) || - rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - rt2800_bbp_read(rt2x00dev, 138, &value); + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, + EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) - value |= 0x20; - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) - value &= ~0x02; + /* check if this is a Bluetooth combo card */ + if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { + u32 reg;
- rt2800_bbp_write(rt2x00dev, 138, value); + rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); + rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0); + rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0); + rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0); + rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0); + if (ant == 0) + rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1); + else if (ant == 1) + rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1); + rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); }
- if (rt2x00_rt(rt2x00dev, RT3290)) { - rt2800_bbp_write(rt2x00dev, 67, 0x24); - rt2800_bbp_write(rt2x00dev, 143, 0x04); - rt2800_bbp_write(rt2x00dev, 142, 0x99); - rt2800_bbp_write(rt2x00dev, 150, 0x30); - rt2800_bbp_write(rt2x00dev, 151, 0x2e); - rt2800_bbp_write(rt2x00dev, 152, 0x20); - rt2800_bbp_write(rt2x00dev, 153, 0x34); - rt2800_bbp_write(rt2x00dev, 154, 0x40); - rt2800_bbp_write(rt2x00dev, 155, 0x3b); - rt2800_bbp_write(rt2x00dev, 253, 0x04); - - rt2800_bbp_read(rt2x00dev, 47, &value); - rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); - rt2800_bbp_write(rt2x00dev, 47, value); - - /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ - rt2800_bbp_read(rt2x00dev, 3, &value); - rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); - rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); - rt2800_bbp_write(rt2x00dev, 3, value); + /* This chip has hardware antenna diversity*/ + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { + rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ + rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ + rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ }
- if (rt2x00_rt(rt2x00dev, RT3352)) { - rt2800_bbp_write(rt2x00dev, 163, 0xbd); - /* Set ITxBF timeout to 0x9c40=1000msec */ - rt2800_bbp_write(rt2x00dev, 179, 0x02); - rt2800_bbp_write(rt2x00dev, 180, 0x00); - rt2800_bbp_write(rt2x00dev, 182, 0x40); - rt2800_bbp_write(rt2x00dev, 180, 0x01); - rt2800_bbp_write(rt2x00dev, 182, 0x9c); - rt2800_bbp_write(rt2x00dev, 179, 0x00); - /* Reprogram the inband interface to put right values in RXWI */ - rt2800_bbp_write(rt2x00dev, 142, 0x04); - rt2800_bbp_write(rt2x00dev, 143, 0x3b); - rt2800_bbp_write(rt2x00dev, 142, 0x06); - rt2800_bbp_write(rt2x00dev, 143, 0xa0); - rt2800_bbp_write(rt2x00dev, 142, 0x07); - rt2800_bbp_write(rt2x00dev, 143, 0xa1); - rt2800_bbp_write(rt2x00dev, 142, 0x08); - rt2800_bbp_write(rt2x00dev, 143, 0xa2); - - rt2800_bbp_write(rt2x00dev, 148, 0xc8); + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + else + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); + rt2800_bbp_write(rt2x00dev, 152, value); + + rt2800_init_freq_calibration(rt2x00dev); + } + + static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) + { + int ant, div_mode; + u16 eeprom; + u8 value; + + rt2800_init_bbp_early(rt2x00dev); + + rt2800_bbp_read(rt2x00dev, 105, &value); + rt2x00_set_field8(&value, BBP105_MLD, + rt2x00dev->default_ant.rx_chain_num == 2); + rt2800_bbp_write(rt2x00dev, 105, value); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2800_bbp_write(rt2x00dev, 20, 0x06); + rt2800_bbp_write(rt2x00dev, 31, 0x08); + rt2800_bbp_write(rt2x00dev, 65, 0x2C); + rt2800_bbp_write(rt2x00dev, 68, 0xDD); + rt2800_bbp_write(rt2x00dev, 69, 0x1A); + rt2800_bbp_write(rt2x00dev, 70, 0x05); + rt2800_bbp_write(rt2x00dev, 73, 0x13); + rt2800_bbp_write(rt2x00dev, 74, 0x0F); + rt2800_bbp_write(rt2x00dev, 75, 0x4F); + rt2800_bbp_write(rt2x00dev, 76, 0x28); + rt2800_bbp_write(rt2x00dev, 77, 0x59); + rt2800_bbp_write(rt2x00dev, 84, 0x9A); + rt2800_bbp_write(rt2x00dev, 86, 0x38); + rt2800_bbp_write(rt2x00dev, 88, 0x90); + rt2800_bbp_write(rt2x00dev, 91, 0x04); + rt2800_bbp_write(rt2x00dev, 92, 0x02); + rt2800_bbp_write(rt2x00dev, 95, 0x9a); + rt2800_bbp_write(rt2x00dev, 98, 0x12); + rt2800_bbp_write(rt2x00dev, 103, 0xC0); + rt2800_bbp_write(rt2x00dev, 104, 0x92); + /* FIXME BBP105 owerwrite */ + rt2800_bbp_write(rt2x00dev, 105, 0x3C); + rt2800_bbp_write(rt2x00dev, 106, 0x35); + rt2800_bbp_write(rt2x00dev, 128, 0x12); + rt2800_bbp_write(rt2x00dev, 134, 0xD0); + rt2800_bbp_write(rt2x00dev, 135, 0xF6); + rt2800_bbp_write(rt2x00dev, 137, 0x0F); + + /* Initialize GLRT (Generalized Likehood Radio Test) */ + rt2800_init_bbp_5592_glrt(rt2x00dev); + + rt2800_bbp4_mac_if_ctrl(rt2x00dev); + + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); + ant = (div_mode == 3) ? 1 : 0; + rt2800_bbp_read(rt2x00dev, 152, &value); + if (ant == 0) { + /* Main antenna */ + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); + } else { + /* Auxiliary antenna */ + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); } + rt2800_bbp_write(rt2x00dev, 152, value);
- if (rt2x00_rt(rt2x00dev, RT5390) || - rt2x00_rt(rt2x00dev, RT5392)) { - int ant, div_mode; + if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { + rt2800_bbp_read(rt2x00dev, 254, &value); + rt2x00_set_field8(&value, BBP254_BIT7, 1); + rt2800_bbp_write(rt2x00dev, 254, value); + }
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); - div_mode = rt2x00_get_field16(eeprom, - EEPROM_NIC_CONF1_ANT_DIVERSITY); - ant = (div_mode == 3) ? 1 : 0; + rt2800_init_freq_calibration(rt2x00dev);
- /* check if this is a Bluetooth combo card */ - if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) { - u32 reg; - - rt2800_register_read(rt2x00dev, GPIO_CTRL, ®); - rt2x00_set_field32(®, GPIO_CTRL_DIR3, 0); - rt2x00_set_field32(®, GPIO_CTRL_DIR6, 0); - rt2x00_set_field32(®, GPIO_CTRL_VAL3, 0); - rt2x00_set_field32(®, GPIO_CTRL_VAL6, 0); - if (ant == 0) - rt2x00_set_field32(®, GPIO_CTRL_VAL3, 1); - else if (ant == 1) - rt2x00_set_field32(®, GPIO_CTRL_VAL6, 1); - rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); - } + rt2800_bbp_write(rt2x00dev, 84, 0x19); + if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) + rt2800_bbp_write(rt2x00dev, 103, 0xc0); + }
- /* This chip has hardware antenna diversity*/ - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) { - rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ - rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ - rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ - } + static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) + { + unsigned int i; + u16 eeprom; + u8 reg_id; + u8 value;
- rt2800_bbp_read(rt2x00dev, 152, &value); - if (ant == 0) - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); - else - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); - rt2800_bbp_write(rt2x00dev, 152, value); + if (rt2800_is_305x_soc(rt2x00dev)) + rt2800_init_bbp_305x_soc(rt2x00dev);
- rt2800_init_freq_calibration(rt2x00dev); + switch (rt2x00dev->chip.rt) { + case RT2860: + case RT2872: + case RT2883: + rt2800_init_bbp_28xx(rt2x00dev); + break; + case RT3070: + case RT3071: + case RT3090: + rt2800_init_bbp_30xx(rt2x00dev); + break; + case RT3290: + rt2800_init_bbp_3290(rt2x00dev); + break; + case RT3352: + rt2800_init_bbp_3352(rt2x00dev); + break; + case RT3390: + rt2800_init_bbp_3390(rt2x00dev); + break; + case RT3572: + rt2800_init_bbp_3572(rt2x00dev); + break; + case RT5390: + case RT5392: + rt2800_init_bbp_53xx(rt2x00dev); + break; + case RT5592: + rt2800_init_bbp_5592(rt2x00dev); + return; }
for (i = 0; i < EEPROM_BBP_SIZE; i++) { @@@ -4344,8 -4535,6 +4542,6 @@@ rt2800_bbp_write(rt2x00dev, reg_id, value); } } - - return 0; }
static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev) @@@ -5196,9 -5385,11 +5392,11 @@@ int rt2800_enable_radio(struct rt2x00_d } msleep(1);
- if (unlikely(rt2800_init_bbp(rt2x00dev))) + if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) || + rt2800_wait_bbp_ready(rt2x00dev))) return -EIO;
+ rt2800_init_bbp(rt2x00dev); rt2800_init_rfcsr(rt2x00dev);
if (rt2x00_is_usb(rt2x00dev) && diff --combined drivers/net/xen-netback/netback.c index 8c20935,82576ff..a0b50ad --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@@ -47,6 -47,13 +47,13 @@@ #include <asm/xen/hypercall.h> #include <asm/xen/page.h>
+ /* Provide an option to disable split event channels at load time as + * event channels are limited resource. Split event channels are + * enabled by default. + */ + bool separate_tx_rx_irq = 1; + module_param(separate_tx_rx_irq, bool, 0644); + /* * This is the maximum slots a skb can have. If a guest sends a skb * which exceeds this limit it is considered malicious. @@@ -771,21 -778,19 +778,21 @@@ static void xen_netbk_rx_action(struct sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
xenvif_notify_tx_completion(vif);
- xenvif_put(vif); + if (ret && list_empty(&vif->notify_list)) + list_add_tail(&vif->notify_list, ¬ify); + else + xenvif_put(vif); npo.meta_cons += sco->meta_slots_used; dev_kfree_skb(skb); }
list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) { - notify_remote_via_irq(vif->irq); + notify_remote_via_irq(vif->rx_irq); list_del_init(&vif->notify_list); + xenvif_put(vif); }
/* More work to do? */ @@@ -1763,7 -1768,7 +1770,7 @@@ static void make_tx_response(struct xen vif->tx.rsp_prod_pvt = ++i; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify); if (notify) - notify_remote_via_irq(vif->irq); + notify_remote_via_irq(vif->tx_irq); }
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif, @@@ -1940,10 -1945,6 +1947,6 @@@ static int __init netback_init(void failed_init: while (--group >= 0) { struct xen_netbk *netbk = &xen_netbk[group]; - for (i = 0; i < MAX_PENDING_REQS; i++) { - if (netbk->mmap_pages[i]) - __free_page(netbk->mmap_pages[i]); - } del_timer(&netbk->net_timer); kthread_stop(netbk->task); } @@@ -1954,5 -1955,25 +1957,25 @@@
module_init(netback_init);
+ static void __exit netback_fini(void) + { + int i, j; + + xenvif_xenbus_fini(); + + for (i = 0; i < xen_netbk_group_nr; i++) { + struct xen_netbk *netbk = &xen_netbk[i]; + del_timer_sync(&netbk->net_timer); + kthread_stop(netbk->task); + for (j = 0; j < MAX_PENDING_REQS; j++) { + if (netbk->mmap_pages[i]) + __free_page(netbk->mmap_pages[i]); + } + } + + vfree(xen_netbk); + } + module_exit(netback_fini); + MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("xen-backend:vif"); diff --combined include/linux/filter.h index f65f5a6,56a6b7f..a6ac848 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -46,7 -46,6 +46,7 @@@ extern int sk_attach_filter(struct sock extern int sk_detach_filter(struct sock *sk); extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); +extern void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to);
#ifdef CONFIG_BPF_JIT #include <stdarg.h> @@@ -59,10 -58,10 +59,10 @@@ extern void bpf_jit_free(struct sk_filt static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) { - pr_err("flen=%u proglen=%u pass=%u image=%p\n", + pr_err("flen=%u proglen=%u pass=%u image=%pK\n", flen, proglen, pass, image); if (image) - print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS, + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, 16, 1, image, proglen, false); } #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) diff --combined include/linux/if_team.h index 16fae64,b662045..f6156f9 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@@ -69,6 -69,7 +69,7 @@@ struct team_port s32 priority; /* lower number ~ higher priority */ u16 queue_id; struct list_head qom_list; /* node in queue override mapping list */ + struct rcu_head rcu; long mode_priv[0]; };
@@@ -228,6 -229,16 +229,16 @@@ static inline struct team_port *team_ge return port; return NULL; } + + static inline int team_num_to_port_index(struct team *team, int num) + { + int en_port_count = ACCESS_ONCE(team->en_port_count); + + if (unlikely(!en_port_count)) + return 0; + return num % en_port_count; + } + static inline struct team_port *team_get_port_by_index_rcu(struct team *team, int port_index) { @@@ -249,12 -260,12 +260,12 @@@ team_get_first_port_txable_rcu(struct t return port; cur = port; list_for_each_entry_continue_rcu(cur, &team->port_list, list) - if (team_port_txable(port)) + if (team_port_txable(cur)) return cur; list_for_each_entry_rcu(cur, &team->port_list, list) { if (cur == port) break; - if (team_port_txable(port)) + if (team_port_txable(cur)) return cur; } return NULL; diff --combined include/net/ip_tunnels.h index 09b1360,40b4dfc..1be442f --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@@ -95,13 -95,13 +95,13 @@@ struct ip_tunnel_net int ip_tunnel_init(struct net_device *dev); void ip_tunnel_uninit(struct net_device *dev); void ip_tunnel_dellink(struct net_device *dev, struct list_head *head); -int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, - struct rtnl_link_ops *ops, char *devname); +int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, + struct rtnl_link_ops *ops, char *devname);
-void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn); +void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, - const struct iphdr *tnl_params); + const struct iphdr *tnl_params, const u8 protocol); int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
diff --combined net/batman-adv/bat_iv_ogm.c index f680ee1,d07323b..f3810c1 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@@ -19,7 -19,6 +19,6 @@@
#include "main.h" #include "translation-table.h" - #include "ring_buffer.h" #include "originator.h" #include "routing.h" #include "gateway_common.h" @@@ -30,30 -29,56 +29,72 @@@ #include "network-coding.h"
/** + * batadv_dup_status - duplicate status + * @BATADV_NO_DUP: the packet is a duplicate + * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the + * neighbor) + * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor + * @BATADV_PROTECTED: originator is currently protected (after reboot) + */ +enum batadv_dup_status { + BATADV_NO_DUP = 0, + BATADV_ORIG_DUP, + BATADV_NEIGH_DUP, + BATADV_PROTECTED, +}; + ++/** + * batadv_ring_buffer_set - update the ring buffer with the given value + * @lq_recv: pointer to the ring buffer + * @lq_index: index to store the value at + * @value: value to store in the ring buffer + */ + static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, + uint8_t value) + { + lq_recv[*lq_index] = value; + *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE; + } + + /** + * batadv_ring_buffer_set - compute the average of all non-zero values stored + * in the given ring buffer + * @lq_recv: pointer to the ring buffer + * + * Returns computed average value. + */ + static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]) + { + const uint8_t *ptr; + uint16_t count = 0, i = 0, sum = 0; + + ptr = lq_recv; + + while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) { + if (*ptr != 0) { + count++; + sum += *ptr; + } + + i++; + ptr++; + } + + if (count == 0) + return 0; + + return (uint8_t)(sum / count); + } ++ static struct batadv_neigh_node * batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, const uint8_t *neigh_addr, struct batadv_orig_node *orig_node, - struct batadv_orig_node *orig_neigh, __be32 seqno) + struct batadv_orig_node *orig_neigh) { struct batadv_neigh_node *neigh_node;
- neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, - ntohl(seqno)); + neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr); if (!neigh_node) goto out;
@@@ -428,18 -453,16 +469,16 @@@ static void batadv_iv_ogm_aggregate_new else skb_size = packet_len;
- skb_size += ETH_HLEN + NET_IP_ALIGN; + skb_size += ETH_HLEN;
- forw_packet_aggr->skb = dev_alloc_skb(skb_size); + forw_packet_aggr->skb = netdev_alloc_skb_ip_align(NULL, skb_size); if (!forw_packet_aggr->skb) { if (!own_packet) atomic_inc(&bat_priv->batman_queue_left); kfree(forw_packet_aggr); goto out; } - skb_reserve(forw_packet_aggr->skb, ETH_HLEN + NET_IP_ALIGN); - - INIT_HLIST_NODE(&forw_packet_aggr->list); + skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
skb_buff = skb_put(forw_packet_aggr->skb, packet_len); forw_packet_aggr->packet_len = packet_len; @@@ -605,6 -628,41 +644,41 @@@ static void batadv_iv_ogm_forward(struc if_incoming, 0, batadv_iv_ogm_fwd_send_time()); }
+ /** + * batadv_iv_ogm_slide_own_bcast_window - bitshift own OGM broadcast windows for + * the given interface + * @hard_iface: the interface for which the windows have to be shifted + */ + static void + batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) + { + struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_hashtable *hash = bat_priv->orig_hash; + struct hlist_head *head; + struct batadv_orig_node *orig_node; + unsigned long *word; + uint32_t i; + size_t word_index; + uint8_t *w; + + for (i = 0; i < hash->size; i++) { + head = &hash->table[i]; + + rcu_read_lock(); + hlist_for_each_entry_rcu(orig_node, head, hash_entry) { + spin_lock_bh(&orig_node->ogm_cnt_lock); + word_index = hard_iface->if_num * BATADV_NUM_WORDS; + word = &(orig_node->bcast_own[word_index]); + + batadv_bit_get_packet(bat_priv, word, 1, 0); + w = &orig_node->bcast_own_sum[hard_iface->if_num]; + *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE); + spin_unlock_bh(&orig_node->ogm_cnt_lock); + } + rcu_read_unlock(); + } + } + static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); @@@ -649,7 -707,7 +723,7 @@@ batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS; }
- batadv_slide_own_bcast_window(hard_iface); + batadv_iv_ogm_slide_own_bcast_window(hard_iface); batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff, hard_iface->bat_iv.ogm_buff_len, hard_iface, 1, batadv_iv_ogm_emit_send_time(bat_priv)); @@@ -665,7 -723,7 +739,7 @@@ batadv_iv_ogm_orig_update(struct batadv const struct batadv_ogm_packet *batadv_ogm_packet, struct batadv_hard_iface *if_incoming, const unsigned char *tt_buff, - int is_duplicate) + enum batadv_dup_status dup_status) { struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; struct batadv_neigh_node *router = NULL; @@@ -685,13 -743,13 +759,13 @@@ if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && tmp_neigh_node->if_incoming == if_incoming && atomic_inc_not_zero(&tmp_neigh_node->refcount)) { - if (neigh_node) + if (WARN(neigh_node, "too many matching neigh_nodes")) batadv_neigh_node_free_ref(neigh_node); neigh_node = tmp_neigh_node; continue; }
- if (is_duplicate) + if (dup_status != BATADV_NO_DUP) continue;
spin_lock_bh(&tmp_neigh_node->lq_update_lock); @@@ -711,8 -769,7 +785,7 @@@
neigh_node = batadv_iv_ogm_neigh_new(if_incoming, ethhdr->h_source, - orig_node, orig_tmp, - batadv_ogm_packet->seqno); + orig_node, orig_tmp);
batadv_orig_node_free_ref(orig_tmp); if (!neigh_node) @@@ -733,7 -790,7 +806,7 @@@ neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv); spin_unlock_bh(&neigh_node->lq_update_lock);
- if (!is_duplicate) { + if (dup_status == BATADV_NO_DUP) { orig_node->last_ttl = batadv_ogm_packet->header.ttl; neigh_node->last_ttl = batadv_ogm_packet->header.ttl; } @@@ -844,8 -901,7 +917,7 @@@ static int batadv_iv_ogm_calc_tq(struc neigh_node = batadv_iv_ogm_neigh_new(if_incoming, orig_neigh_node->orig, orig_neigh_node, - orig_neigh_node, - batadv_ogm_packet->seqno); + orig_neigh_node);
if (!neigh_node) goto out; @@@ -917,16 -973,15 +989,16 @@@ out return ret; }
-/* processes a batman packet for all interfaces, adjusts the sequence number and - * finds out whether it is a duplicate. - * returns: - * 1 the packet is a duplicate - * 0 the packet has not yet been received - * -1 the packet is old and has been received while the seqno window - * was protected. Caller should drop it. +/** + * batadv_iv_ogm_update_seqnos - process a batman packet for all interfaces, + * adjust the sequence number and find out whether it is a duplicate + * @ethhdr: ethernet header of the packet + * @batadv_ogm_packet: OGM packet to be considered + * @if_incoming: interface on which the OGM packet was received + * + * Returns duplicate status as enum batadv_dup_status */ -static int +static enum batadv_dup_status batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, const struct batadv_ogm_packet *batadv_ogm_packet, const struct batadv_hard_iface *if_incoming) @@@ -934,18 -989,17 +1006,18 @@@ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_orig_node *orig_node; struct batadv_neigh_node *tmp_neigh_node; - int is_duplicate = 0; + int is_dup; int32_t seq_diff; int need_update = 0; - int set_mark, ret = -1; + int set_mark; + enum batadv_dup_status ret = BATADV_NO_DUP; uint32_t seqno = ntohl(batadv_ogm_packet->seqno); uint8_t *neigh_addr; uint8_t packet_count;
orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig); if (!orig_node) - return 0; + return BATADV_NO_DUP;
spin_lock_bh(&orig_node->ogm_cnt_lock); seq_diff = seqno - orig_node->last_real_seqno; @@@ -953,29 -1007,22 +1025,29 @@@ /* signalize caller that the packet is to be dropped. */ if (!hlist_empty(&orig_node->neigh_list) && batadv_window_protected(bat_priv, seq_diff, - &orig_node->batman_seqno_reset)) + &orig_node->batman_seqno_reset)) { + ret = BATADV_PROTECTED; goto out; + }
rcu_read_lock(); hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) { - is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits, - orig_node->last_real_seqno, - seqno); - neigh_addr = tmp_neigh_node->addr; + is_dup = batadv_test_bit(tmp_neigh_node->real_bits, + orig_node->last_real_seqno, + seqno); + if (batadv_compare_eth(neigh_addr, ethhdr->h_source) && - tmp_neigh_node->if_incoming == if_incoming) + tmp_neigh_node->if_incoming == if_incoming) { set_mark = 1; - else + if (is_dup) + ret = BATADV_NEIGH_DUP; + } else { set_mark = 0; + if (is_dup && (ret != BATADV_NEIGH_DUP)) + ret = BATADV_ORIG_DUP; + }
/* if the window moved, set the update flag. */ need_update |= batadv_bit_get_packet(bat_priv, @@@ -995,6 -1042,8 +1067,6 @@@ orig_node->last_real_seqno = seqno; }
- ret = is_duplicate; - out: spin_unlock_bh(&orig_node->ogm_cnt_lock); batadv_orig_node_free_ref(orig_node); @@@ -1013,11 -1062,10 +1085,11 @@@ static void batadv_iv_ogm_process(cons struct batadv_neigh_node *orig_neigh_router = NULL; int has_directlink_flag; int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; - int is_broadcast = 0, is_bidirect; + int is_bidirect; bool is_single_hop_neigh = false; bool is_from_best_next_hop = false; - int is_duplicate, sameseq, simlar_ttl; + int sameseq, similar_ttl; + enum batadv_dup_status dup_status; uint32_t if_incoming_seqno; uint8_t *prev_sender;
@@@ -1077,19 -1125,9 +1149,9 @@@ if (batadv_compare_eth(batadv_ogm_packet->prev_sender, hard_iface->net_dev->dev_addr)) is_my_oldorig = 1; } rcu_read_unlock();
- if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) { - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Drop packet: incompatible batman version (%i)\n", - batadv_ogm_packet->header.version); - return; - } - if (is_my_addr) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: received my own broadcast (sender: %pM)\n", @@@ -1097,13 -1135,6 +1159,6 @@@ return; }
- if (is_broadcast) { - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n", - ethhdr->h_source); - return; - } - if (is_my_orig) { unsigned long *word; int offset; @@@ -1161,10 -1192,10 +1216,10 @@@ if (!orig_node) return;
- is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, - if_incoming); + dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet, + if_incoming);
- if (is_duplicate == -1) { + if (dup_status == BATADV_PROTECTED) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: packet within seqno protection time (sender: %pM)\n", ethhdr->h_source); @@@ -1234,12 -1265,11 +1289,12 @@@ * seqno and similar ttl as the non-duplicate */ sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno); - simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; - if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl))) + similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl; + if (is_bidirect && ((dup_status == BATADV_NO_DUP) || + (sameseq && similar_ttl))) batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr, batadv_ogm_packet, if_incoming, - tt_buff, is_duplicate); + tt_buff, dup_status);
/* is single hop (direct) neighbor */ if (is_single_hop_neigh) { @@@ -1260,7 -1290,7 +1315,7 @@@ goto out_neigh; }
- if (is_duplicate) { + if (dup_status == BATADV_NEIGH_DUP) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "Drop packet: duplicate packet received\n"); goto out_neigh; @@@ -1312,7 -1342,7 +1367,7 @@@ static int batadv_iv_ogm_receive(struc skb->len + ETH_HLEN);
packet_len = skb_headlen(skb); - ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb); packet_buff = skb->data; batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
diff --combined net/batman-adv/bridge_loop_avoidance.c index de27b31,e9d8e0b..e14531f --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -180,7 -180,7 +180,7 @@@ static struct batadv_bla_clai */ static struct batadv_bla_backbone_gw * batadv_backbone_hash_find(struct batadv_priv *bat_priv, - uint8_t *addr, short vid) + uint8_t *addr, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; struct hlist_head *head; @@@ -257,7 -257,7 +257,7 @@@ batadv_bla_del_backbone_claims(struct b * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) */ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac, - short vid, int claimtype) + unsigned short vid, int claimtype) { struct sk_buff *skb; struct ethhdr *ethhdr; @@@ -307,7 -307,8 +307,8 @@@ */ memcpy(ethhdr->h_source, mac, ETH_ALEN); batadv_dbg(BATADV_DBG_BLA, bat_priv, - "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid); + "bla_send_claim(): CLAIM %pM on vid %d\n", mac, + BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_UNCLAIM: /* unclaim frame @@@ -316,7 -317,7 +317,7 @@@ memcpy(hw_src, mac, ETH_ALEN); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, - vid); + BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_ANNOUNCE: /* announcement frame @@@ -325,7 -326,7 +326,7 @@@ memcpy(hw_src, mac, ETH_ALEN); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_send_claim(): ANNOUNCE of %pM on vid %d\n", - ethhdr->h_source, vid); + ethhdr->h_source, BATADV_PRINT_VID(vid)); break; case BATADV_CLAIM_TYPE_REQUEST: /* request frame @@@ -335,13 -336,15 +336,15 @@@ memcpy(hw_src, mac, ETH_ALEN); memcpy(ethhdr->h_dest, mac, ETH_ALEN); batadv_dbg(BATADV_DBG_BLA, bat_priv, - "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n", - ethhdr->h_source, ethhdr->h_dest, vid); + "bla_send_claim(): REQUEST of %pM to %pM on vid %d\n", + ethhdr->h_source, ethhdr->h_dest, + BATADV_PRINT_VID(vid)); break; }
- if (vid != -1) - skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid); + if (vid & BATADV_VLAN_HAS_TAG) + skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), + vid & VLAN_VID_MASK);
skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, soft_iface); @@@ -367,7 -370,7 +370,7 @@@ out */ static struct batadv_bla_backbone_gw * batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig, - short vid, bool own_backbone) + unsigned short vid, bool own_backbone) { struct batadv_bla_backbone_gw *entry; struct batadv_orig_node *orig_node; @@@ -380,7 -383,7 +383,7 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n", - orig, vid); + orig, BATADV_PRINT_VID(vid));
entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) @@@ -434,7 -437,7 +437,7 @@@ static void batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - short vid) + unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw;
@@@ -456,7 -459,7 +459,7 @@@ */ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - short vid) + unsigned short vid) { struct hlist_head *head; struct batadv_hashtable *hash; @@@ -547,7 -550,7 +550,7 @@@ static void batadv_bla_send_announce(st * @backbone_gw: the backbone gateway which claims it */ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, - const uint8_t *mac, const short vid, + const uint8_t *mac, const unsigned short vid, struct batadv_bla_backbone_gw *backbone_gw) { struct batadv_bla_claim *claim; @@@ -572,7 -575,7 +575,7 @@@ atomic_set(&claim->refcount, 2); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n", - mac, vid); + mac, BATADV_PRINT_VID(vid)); hash_added = batadv_hash_add(bat_priv->bla.claim_hash, batadv_compare_claim, batadv_choose_claim, claim, @@@ -591,7 -594,7 +594,7 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_add_claim(): changing ownership for %pM, vid %d\n", - mac, vid); + mac, BATADV_PRINT_VID(vid));
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); batadv_backbone_gw_free_ref(claim->backbone_gw); @@@ -611,7 -614,7 +614,7 @@@ claim_free_ref * given mac address and vid. */ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, - const uint8_t *mac, const short vid) + const uint8_t *mac, const unsigned short vid) { struct batadv_bla_claim search_claim, *claim;
@@@ -622,7 -625,7 +625,7 @@@ return;
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", - mac, vid); + mac, BATADV_PRINT_VID(vid));
batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, batadv_choose_claim, claim); @@@ -637,7 -640,7 +640,7 @@@ /* check for ANNOUNCE frame, return 1 if handled */ static int batadv_handle_announce(struct batadv_priv *bat_priv, uint8_t *an_addr, uint8_t *backbone_addr, - short vid) + unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; uint16_t crc; @@@ -658,12 -661,13 +661,13 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n", - vid, backbone_gw->orig, crc); + BATADV_PRINT_VID(vid), backbone_gw->orig, crc);
if (backbone_gw->crc != crc) { batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n", - backbone_gw->orig, backbone_gw->vid, + backbone_gw->orig, + BATADV_PRINT_VID(backbone_gw->vid), backbone_gw->crc, crc);
batadv_bla_send_request(backbone_gw); @@@ -685,7 -689,7 +689,7 @@@ static int batadv_handle_request(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, uint8_t *backbone_addr, - struct ethhdr *ethhdr, short vid) + struct ethhdr *ethhdr, unsigned short vid) { /* check for REQUEST frame */ if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest)) @@@ -699,7 -703,7 +703,7 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_request(): REQUEST vid %d (sent by %pM)...\n", - vid, ethhdr->h_source); + BATADV_PRINT_VID(vid), ethhdr->h_source);
batadv_bla_answer_request(bat_priv, primary_if, vid); return 1; @@@ -709,7 -713,7 +713,7 @@@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, uint8_t *backbone_addr, - uint8_t *claim_addr, short vid) + uint8_t *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw;
@@@ -727,7 -731,7 +731,7 @@@ /* this must be an UNCLAIM frame */ batadv_dbg(BATADV_DBG_BLA, bat_priv, "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n", - claim_addr, vid, backbone_gw->orig); + claim_addr, BATADV_PRINT_VID(vid), backbone_gw->orig);
batadv_bla_del_claim(bat_priv, claim_addr, vid); batadv_backbone_gw_free_ref(backbone_gw); @@@ -738,7 -742,7 +742,7 @@@ static int batadv_handle_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, uint8_t *backbone_addr, uint8_t *claim_addr, - short vid) + unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw;
@@@ -861,14 -865,15 +865,15 @@@ static int batadv_bla_process_claim(str struct batadv_bla_claim_dst *bla_dst; uint16_t proto; int headlen; - short vid = -1; + unsigned short vid = BATADV_NO_FLAGS; int ret;
- ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb);
if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) { vhdr = (struct vlan_ethhdr *)ethhdr; vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + vid |= BATADV_VLAN_HAS_TAG; proto = ntohs(vhdr->h_vlan_encapsulated_proto); headlen = sizeof(*vhdr); } else { @@@ -885,7 -890,7 +890,7 @@@ return 0;
/* pskb_may_pull() may have modified the pointers, get ethhdr again */ - ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb); arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
/* Check whether the ARP frame carries a valid @@@ -910,7 -915,8 +915,8 @@@ if (ret == 1) batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", - ethhdr->h_source, vid, hw_src, hw_dst); + ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, + hw_dst);
if (ret < 2) return ret; @@@ -945,7 -951,7 +951,7 @@@
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", - ethhdr->h_source, vid, hw_src, hw_dst); + ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst); return 1; }
@@@ -1067,10 -1073,6 +1073,10 @@@ void batadv_bla_update_orig_address(str group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); bat_priv->bla.claim_dest.group = group;
+ /* purge everything when bridge loop avoidance is turned off */ + if (!atomic_read(&bat_priv->bridge_loop_avoidance)) + oldif = NULL; + if (!oldif) { batadv_bla_purge_claims(bat_priv, NULL, 1); batadv_bla_purge_backbone_gw(bat_priv, 1); @@@ -1362,7 -1364,7 +1368,7 @@@ int batadv_bla_is_backbone_gw(struct sk struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; struct batadv_bla_backbone_gw *backbone_gw; - short vid = -1; + unsigned short vid = BATADV_NO_FLAGS;
if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) return 0; @@@ -1379,6 -1381,7 +1385,7 @@@
vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size); vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; + vid |= BATADV_VLAN_HAS_TAG; }
/* see if this originator is a backbone gw for this VLAN */ @@@ -1428,15 -1431,15 +1435,15 @@@ void batadv_bla_free(struct batadv_pri * returns 1, otherwise it returns 0 and the caller shall further * process the skb. */ - int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid, - bool is_bcast) + int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid, bool is_bcast) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; int ret;
- ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb);
primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) @@@ -1523,7 -1526,8 +1530,8 @@@ out * returns 1, otherwise it returns 0 and the caller shall further * process the skb. */ - int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid) + int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, + unsigned short vid) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; @@@ -1543,7 -1547,7 +1551,7 @@@ if (batadv_bla_process_claim(bat_priv, primary_if, skb)) goto handled;
- ethhdr = (struct ethhdr *)skb_mac_header(skb); + ethhdr = eth_hdr(skb);
if (unlikely(atomic_read(&bat_priv->bla.num_requests))) /* don't allow broadcasts while requests are in flight */ @@@ -1627,8 -1631,8 +1635,8 @@@ int batadv_bla_claim_table_seq_print_te hlist_for_each_entry_rcu(claim, head, hash_entry) { is_own = batadv_compare_eth(claim->backbone_gw->orig, primary_addr); - seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n", - claim->addr, claim->vid, + seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", + claim->addr, BATADV_PRINT_VID(claim->vid), claim->backbone_gw->orig, (is_own ? 'x' : ' '), claim->backbone_gw->crc); @@@ -1680,10 -1684,10 +1688,10 @@@ int batadv_bla_backbone_table_seq_print if (is_own) continue;
- seq_printf(seq, - " * %pM on % 5d % 4i.%03is (%#.4x)\n", - backbone_gw->orig, backbone_gw->vid, - secs, msecs, backbone_gw->crc); + seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n", + backbone_gw->orig, + BATADV_PRINT_VID(backbone_gw->vid), secs, + msecs, backbone_gw->crc); } rcu_read_unlock(); } diff --combined net/ipv4/ip_tunnel.c index 7fa8f08,7c79cf8..e189db4 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@@ -487,7 -487,7 +487,7 @@@ drop EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, - const struct iphdr *tnl_params) + const struct iphdr *tnl_params, const u8 protocol) { struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *inner_iph; @@@ -670,7 -670,7 +670,7 @@@ iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; - iph->protocol = tnl_params->protocol; + iph->protocol = protocol; iph->tos = ip_tunnel_ecn_encap(tos, inner_iph, skb); iph->daddr = fl4.daddr; iph->saddr = fl4.saddr; @@@ -853,7 -853,7 +853,7 @@@ void ip_tunnel_dellink(struct net_devic } EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
-int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, +int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id, struct rtnl_link_ops *ops, char *devname) { struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); @@@ -899,7 -899,7 +899,7 @@@ static void ip_tunnel_destroy(struct ip unregister_netdevice_queue(itn->fb_tunnel_dev, head); }
-void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn) +void ip_tunnel_delete_net(struct ip_tunnel_net *itn) { LIST_HEAD(list);
diff --combined net/mac80211/cfg.c index 4fdb306e,3062210..a1c6e1c --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@@ -73,16 -73,19 +73,19 @@@ static int ieee80211_change_iface(struc struct ieee80211_local *local = sdata->local;
if (ieee80211_sdata_running(sdata)) { + u32 mask = MONITOR_FLAG_COOK_FRAMES | + MONITOR_FLAG_ACTIVE; + /* - * Prohibit MONITOR_FLAG_COOK_FRAMES to be - * changed while the interface is up. + * Prohibit MONITOR_FLAG_COOK_FRAMES and + * MONITOR_FLAG_ACTIVE to be changed while the + * interface is up. * Else we would need to add a lot of cruft * to update everything: * cooked_mntrs, monitor and all fif_* counters * reconfigure hardware */ - if ((*flags & MONITOR_FLAG_COOK_FRAMES) != - (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) + if ((*flags & mask) != (sdata->u.mntr_flags & mask)) return -EBUSY;
ieee80211_adjust_monitor_flags(sdata, -1); @@@ -444,7 -447,7 +447,7 @@@ static void sta_set_sinfo(struct sta_in struct ieee80211_local *local = sdata->local; struct timespec uptime; u64 packets = 0; - int ac; + int i, ac;
sinfo->generation = sdata->local->sta_generation;
@@@ -488,6 -491,17 +491,17 @@@ sinfo->signal = (s8)sta->last_signal; sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); } + if (sta->chains) { + sinfo->filled |= STATION_INFO_CHAIN_SIGNAL | + STATION_INFO_CHAIN_SIGNAL_AVG; + + sinfo->chains = sta->chains; + for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) { + sinfo->chain_signal[i] = sta->chain_signal_last[i]; + sinfo->chain_signal_avg[i] = + (s8) -ewma_read(&sta->chain_signal_avg[i]); + } + }
sta_set_rate_info_tx(sta, &sta->last_tx_rate, &sinfo->txrate); sta_set_rate_info_rx(sta, &sinfo->rxrate); @@@ -728,7 -742,7 +742,7 @@@ static void ieee80211_get_et_strings(st
if (sset == ETH_SS_STATS) { sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); - memcpy(data, *ieee80211_gstrings_sta_stats, sz_sta_stats); + memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats); } drv_get_et_strings(sdata, sset, &(data[sz_sta_stats])); } @@@ -1057,12 -1071,6 +1071,12 @@@ static int ieee80211_stop_ap(struct wip clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
+ if (sdata->wdev.cac_started) { + cancel_delayed_work_sync(&sdata->dfs_cac_timer_work); + cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED, + GFP_KERNEL); + } + drv_stop_ap(sdata->local, sdata);
/* free all potentially still buffered bcast frames */ @@@ -1741,6 -1749,7 +1755,7 @@@ static int copy_mesh_setup(struct ieee8 ifmsh->mesh_pp_id = setup->path_sel_proto; ifmsh->mesh_pm_id = setup->path_metric; ifmsh->user_mpm = setup->user_mpm; + ifmsh->mesh_auth_id = setup->auth_id; ifmsh->security = IEEE80211_MESH_SEC_NONE; if (setup->is_authenticated) ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; @@@ -2312,7 -2321,7 +2327,7 @@@ int __ieee80211_request_smps(struct iee enum ieee80211_smps_mode old_req; int err;
- lockdep_assert_held(&sdata->u.mgd.mtx); + lockdep_assert_held(&sdata->wdev.mtx);
old_req = sdata->u.mgd.req_smps; sdata->u.mgd.req_smps = smps_mode; @@@ -2369,9 -2378,9 +2384,9 @@@ static int ieee80211_set_power_mgmt(str local->dynamic_ps_forced_timeout = timeout;
/* no change, but if automatic follow powersave */ - mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata);
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); diff --combined net/mac80211/ieee80211_i.h index 9ca8e32,9eed6f1..923e177 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@@ -394,7 -394,6 +394,6 @@@ struct ieee80211_if_managed bool nullfunc_failed; bool connection_loss;
- struct mutex mtx; struct cfg80211_bss *associated; struct ieee80211_mgd_auth_data *auth_data; struct ieee80211_mgd_assoc_data *assoc_data; @@@ -488,8 -487,6 +487,6 @@@ struct ieee80211_if_ibss { struct timer_list timer;
- struct mutex mtx; - unsigned long last_scan_completed;
u32 basic_rates; @@@ -580,8 -577,6 +577,6 @@@ struct ieee80211_if_mesh bool accepting_plinks; int num_gates; struct beacon_data __rcu *beacon; - /* just protects beacon updates for now */ - struct mutex mtx; const u8 *ie; u8 ie_len; enum { @@@ -778,6 -773,26 +773,26 @@@ struct ieee80211_sub_if_data *vif_to_sd return container_of(p, struct ieee80211_sub_if_data, vif); }
+ static inline void sdata_lock(struct ieee80211_sub_if_data *sdata) + __acquires(&sdata->wdev.mtx) + { + mutex_lock(&sdata->wdev.mtx); + __acquire(&sdata->wdev.mtx); + } + + static inline void sdata_unlock(struct ieee80211_sub_if_data *sdata) + __releases(&sdata->wdev.mtx) + { + mutex_unlock(&sdata->wdev.mtx); + __release(&sdata->wdev.mtx); + } + + static inline void + sdata_assert_lock(struct ieee80211_sub_if_data *sdata) + { + lockdep_assert_held(&sdata->wdev.mtx); + } + static inline enum ieee80211_band ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata) { @@@ -1497,19 -1512,15 +1512,16 @@@ static inline void ieee80211_tx_skb(str ieee80211_tx_skb_tid(sdata, skb, 7); }
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, +u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc); -static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action, +static inline void ieee802_11_parse_elems(const u8 *start, size_t len, + bool action, struct ieee802_11_elems *elems) { ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0); }
- u32 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band); - void ieee80211_dynamic_ps_enable_work(struct work_struct *work); void ieee80211_dynamic_ps_disable_work(struct work_struct *work); void ieee80211_dynamic_ps_timer(unsigned long data); diff --combined net/mac80211/mlme.c index 741448b,f44f4ca..118540b --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@@ -91,41 -91,6 +91,6 @@@ MODULE_PARM_DESC(probe_wait_ms #define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
/* - * All cfg80211 functions have to be called outside a locked - * section so that they can acquire a lock themselves... This - * is much simpler than queuing up things in cfg80211, but we - * do need some indirection for that here. - */ - enum rx_mgmt_action { - /* no action required */ - RX_MGMT_NONE, - - /* caller must call cfg80211_send_deauth() */ - RX_MGMT_CFG80211_DEAUTH, - - /* caller must call cfg80211_send_disassoc() */ - RX_MGMT_CFG80211_DISASSOC, - - /* caller must call cfg80211_send_rx_auth() */ - RX_MGMT_CFG80211_RX_AUTH, - - /* caller must call cfg80211_send_rx_assoc() */ - RX_MGMT_CFG80211_RX_ASSOC, - - /* caller must call cfg80211_send_assoc_timeout() */ - RX_MGMT_CFG80211_ASSOC_TIMEOUT, - - /* used when a processed beacon causes a deauth */ - RX_MGMT_CFG80211_TX_DEAUTH, - }; - - /* utils */ - static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd) - { - lockdep_assert_held(&ifmgd->mtx); - } - - /* * We can have multiple work items (and connection probing) * scheduling this timer, but we need to take care to only * reschedule it when it should fire _earlier_ than it was @@@ -135,13 -100,14 +100,14 @@@ * has happened -- the work that runs from this timer will * do that. */ - static void run_again(struct ieee80211_if_managed *ifmgd, unsigned long timeout) + static void run_again(struct ieee80211_sub_if_data *sdata, + unsigned long timeout) { - ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
- if (!timer_pending(&ifmgd->timer) || - time_before(timeout, ifmgd->timer.expires)) - mod_timer(&ifmgd->timer, timeout); + if (!timer_pending(&sdata->u.mgd.timer) || + time_before(timeout, sdata->u.mgd.timer.expires)) + mod_timer(&sdata->u.mgd.timer, timeout); }
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) @@@ -652,7 -618,7 +618,7 @@@ static void ieee80211_send_assoc(struc struct ieee80211_channel *chan; u32 rates = 0;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); @@@ -962,7 -928,7 +928,7 @@@ static void ieee80211_chswitch_work(str if (!ieee80211_sdata_running(sdata)) return;
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) goto out;
@@@ -985,7 -951,7 +951,7 @@@ IEEE80211_QUEUE_STOP_REASON_CSA); out: ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED; - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); }
void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success) @@@ -1036,7 -1002,7 +1002,7 @@@ ieee80211_sta_process_chanswitch(struc const struct ieee80211_ht_operation *ht_oper; int secondary_channel_offset = -1;
- ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
if (!cbss) return; @@@ -1390,6 -1356,9 +1356,9 @@@ static bool ieee80211_powersave_allowed IEEE80211_STA_CONNECTION_POLL)) return false;
+ if (!sdata->vif.bss_conf.dtim_period) + return false; + rcu_read_lock(); sta = sta_info_get(sdata, mgd->bssid); if (sta) @@@ -1842,7 -1811,7 +1811,7 @@@ static void ieee80211_set_disassoc(stru struct ieee80211_local *local = sdata->local; u32 changed = 0;
- ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
if (WARN_ON_ONCE(tx && !frame_buf)) return; @@@ -2051,7 -2020,7 +2020,7 @@@ static void ieee80211_mgd_probe_ap_send }
ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); - run_again(ifmgd, ifmgd->probe_timeout); + run_again(sdata, ifmgd->probe_timeout); if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ieee80211_flush_queues(sdata->local, sdata); } @@@ -2065,7 -2034,7 +2034,7 @@@ static void ieee80211_mgd_probe_ap(stru if (!ieee80211_sdata_running(sdata)) return;
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata);
if (!ifmgd->associated) goto out; @@@ -2119,7 -2088,7 +2088,7 @@@ ifmgd->probe_send_count = 0; ieee80211_mgd_probe_ap_send(sdata); out: - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); }
struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw, @@@ -2135,7 -2104,7 +2104,7 @@@ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return NULL;
- ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
if (ifmgd->associated) cbss = ifmgd->associated; @@@ -2168,9 -2137,9 +2137,9 @@@ static void __ieee80211_disconnect(stru struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; }
@@@ -2181,13 -2150,9 +2150,9 @@@ ieee80211_wake_queues_by_reason(&sdata->local->hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_CSA); - mutex_unlock(&ifmgd->mtx);
- /* - * must be outside lock due to cfg80211, - * but that's not a problem. - */ cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); + sdata_unlock(sdata); }
static void ieee80211_beacon_connection_loss_work(struct work_struct *work) @@@ -2254,7 -2219,7 +2219,7 @@@ static void ieee80211_destroy_auth_data { struct ieee80211_mgd_auth_data *auth_data = sdata->u.mgd.auth_data;
- lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata);
if (!assoc) { sta_info_destroy_addr(sdata, auth_data->bss->bssid); @@@ -2295,27 -2260,26 +2260,26 @@@ static void ieee80211_auth_challenge(st auth_data->key_idx, tx_flags); }
- static enum rx_mgmt_action __must_check - ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) + static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 bssid[ETH_ALEN]; u16 auth_alg, auth_transaction, status_code; struct sta_info *sta;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (len < 24 + 6) - return RX_MGMT_NONE; + return;
if (!ifmgd->auth_data || ifmgd->auth_data->done) - return RX_MGMT_NONE; + return;
memcpy(bssid, ifmgd->auth_data->bss->bssid, ETH_ALEN);
if (!ether_addr_equal(bssid, mgmt->bssid)) - return RX_MGMT_NONE; + return;
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); @@@ -2327,14 -2291,15 +2291,15 @@@ mgmt->sa, auth_alg, ifmgd->auth_data->algorithm, auth_transaction, ifmgd->auth_data->expected_transaction); - return RX_MGMT_NONE; + return; }
if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied authentication (status %d)\n", mgmt->sa, status_code); ieee80211_destroy_auth_data(sdata, false); - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; }
switch (ifmgd->auth_data->algorithm) { @@@ -2347,20 -2312,20 +2312,20 @@@ if (ifmgd->auth_data->expected_transaction != 4) { ieee80211_auth_challenge(sdata, mgmt, len); /* need another frame */ - return RX_MGMT_NONE; + return; } break; default: WARN_ONCE(1, "invalid auth alg %d", ifmgd->auth_data->algorithm); - return RX_MGMT_NONE; + return; }
sdata_info(sdata, "authenticated\n"); ifmgd->auth_data->done = true; ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout);
if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE && ifmgd->auth_data->expected_transaction != 2) { @@@ -2368,7 -2333,8 +2333,8 @@@ * Report auth frame to user space for processing since another * round of Authentication frames is still needed. */ - return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; }
/* move station state to auth */ @@@ -2384,30 -2350,29 +2350,29 @@@ } mutex_unlock(&sdata->local->sta_mtx);
- return RX_MGMT_CFG80211_RX_AUTH; + cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, len); + return; out_err: mutex_unlock(&sdata->local->sta_mtx); /* ignore frame -- wait for timeout */ }
- static enum rx_mgmt_action __must_check - ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) + static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *bssid = NULL; u16 reason_code;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (len < 24 + 2) - return RX_MGMT_NONE; + return;
if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return;
bssid = ifmgd->associated->bssid;
@@@ -2418,25 -2383,24 +2383,24 @@@
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- return RX_MGMT_CFG80211_DEAUTH; + cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, len); }
- static enum rx_mgmt_action __must_check - ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len) + static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u16 reason_code;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (len < 24 + 2) - return RX_MGMT_NONE; + return;
if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
@@@ -2445,7 -2409,7 +2409,7 @@@
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- return RX_MGMT_CFG80211_DISASSOC; + cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, len); }
static void ieee80211_get_rates(struct ieee80211_supported_band *sband, @@@ -2495,7 -2459,7 +2459,7 @@@ static void ieee80211_destroy_assoc_dat { struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
- lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata);
if (!assoc) { sta_info_destroy_addr(sdata, assoc_data->bss->bssid); @@@ -2522,11 -2486,8 +2486,11 @@@ static bool ieee80211_assoc_success(str u16 capab_info, aid; struct ieee802_11_elems elems; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + const struct cfg80211_bss_ies *bss_ies = NULL; + struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; u32 changed = 0; int err; + bool ret;
/* AssocResp and ReassocResp have identical structure */
@@@ -2558,86 -2519,21 +2522,86 @@@ ifmgd->aid = aid;
/* + * Some APs are erroneously not including some information in their + * (re)association response frames. Try to recover by using the data + * from the beacon or probe response. This seems to afflict mobile + * 2G/3G/4G wifi routers, reported models include the "Onda PN51T", + * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device. + */ + if ((assoc_data->wmm && !elems.wmm_param) || + (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && + (!elems.ht_cap_elem || !elems.ht_operation)) || + (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && + (!elems.vht_cap_elem || !elems.vht_operation))) { + const struct cfg80211_bss_ies *ies; + struct ieee802_11_elems bss_elems; + + rcu_read_lock(); + ies = rcu_dereference(cbss->ies); + if (ies) + bss_ies = kmemdup(ies, sizeof(*ies) + ies->len, + GFP_ATOMIC); + rcu_read_unlock(); + if (!bss_ies) + return false; + + ieee802_11_parse_elems(bss_ies->data, bss_ies->len, + false, &bss_elems); + if (assoc_data->wmm && + !elems.wmm_param && bss_elems.wmm_param) { + elems.wmm_param = bss_elems.wmm_param; + sdata_info(sdata, + "AP bug: WMM param missing from AssocResp\n"); + } + + /* + * Also check if we requested HT/VHT, otherwise the AP doesn't + * have to include the IEs in the (re)association response. + */ + if (!elems.ht_cap_elem && bss_elems.ht_cap_elem && + !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { + elems.ht_cap_elem = bss_elems.ht_cap_elem; + sdata_info(sdata, + "AP bug: HT capability missing from AssocResp\n"); + } + if (!elems.ht_operation && bss_elems.ht_operation && + !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { + elems.ht_operation = bss_elems.ht_operation; + sdata_info(sdata, + "AP bug: HT operation missing from AssocResp\n"); + } + if (!elems.vht_cap_elem && bss_elems.vht_cap_elem && + !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { + elems.vht_cap_elem = bss_elems.vht_cap_elem; + sdata_info(sdata, + "AP bug: VHT capa missing from AssocResp\n"); + } + if (!elems.vht_operation && bss_elems.vht_operation && + !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) { + elems.vht_operation = bss_elems.vht_operation; + sdata_info(sdata, + "AP bug: VHT operation missing from AssocResp\n"); + } + } + + /* * We previously checked these in the beacon/probe response, so * they should be present here. This is just a safety net. */ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) { sdata_info(sdata, - "HT AP is missing WMM params or HT capability/operation in AssocResp\n"); - return false; + "HT AP is missing WMM params or HT capability/operation\n"); + ret = false; + goto out; }
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) && (!elems.vht_cap_elem || !elems.vht_operation)) { sdata_info(sdata, - "VHT AP is missing VHT capability/operation in AssocResp\n"); - return false; + "VHT AP is missing VHT capability/operation\n"); + ret = false; + goto out; }
mutex_lock(&sdata->local->sta_mtx); @@@ -2648,8 -2544,7 +2612,8 @@@ sta = sta_info_get(sdata, cbss->bssid); if (WARN_ON(!sta)) { mutex_unlock(&sdata->local->sta_mtx); - return false; + ret = false; + goto out; }
sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)]; @@@ -2702,8 -2597,7 +2666,8 @@@ sta->sta.addr); WARN_ON(__sta_info_destroy(sta)); mutex_unlock(&sdata->local->sta_mtx); - return false; + ret = false; + goto out; }
mutex_unlock(&sdata->local->sta_mtx); @@@ -2743,16 -2637,12 +2707,15 @@@ ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); ieee80211_sta_reset_beacon_monitor(sdata);
- return true; + ret = true; + out: + kfree(bss_ies); + return ret; }
- static enum rx_mgmt_action __must_check - ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len, - struct cfg80211_bss **bss) + static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, + size_t len) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data; @@@ -2760,13 -2650,14 +2723,14 @@@ struct ieee802_11_elems elems; u8 *pos; bool reassoc; + struct cfg80211_bss *bss;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (!assoc_data) - return RX_MGMT_NONE; + return; if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid)) - return RX_MGMT_NONE; + return;
/* * AssocResp and ReassocResp have identical structure, so process both @@@ -2774,7 -2665,7 +2738,7 @@@ */
if (len < 24 + 6) - return RX_MGMT_NONE; + return;
reassoc = ieee80211_is_reassoc_req(mgmt->frame_control); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); @@@ -2801,22 -2692,23 +2765,23 @@@ assoc_data->timeout = jiffies + msecs_to_jiffies(ms); assoc_data->timeout_started = true; if (ms > IEEE80211_ASSOC_TIMEOUT) - run_again(ifmgd, assoc_data->timeout); - return RX_MGMT_NONE; + run_again(sdata, assoc_data->timeout); + return; }
- *bss = assoc_data->bss; + bss = assoc_data->bss;
if (status_code != WLAN_STATUS_SUCCESS) { sdata_info(sdata, "%pM denied association (code=%d)\n", mgmt->sa, status_code); ieee80211_destroy_assoc_data(sdata, false); } else { - if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) { + if (!ieee80211_assoc_success(sdata, bss, mgmt, len)) { /* oops -- internal error -- send timeout for now */ ieee80211_destroy_assoc_data(sdata, false); - cfg80211_put_bss(sdata->local->hw.wiphy, *bss); - return RX_MGMT_CFG80211_ASSOC_TIMEOUT; + cfg80211_put_bss(sdata->local->hw.wiphy, bss); + cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); + return; } sdata_info(sdata, "associated\n");
@@@ -2828,7 -2720,7 +2793,7 @@@ ieee80211_destroy_assoc_data(sdata, true); }
- return RX_MGMT_CFG80211_RX_ASSOC; + cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, len); }
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, @@@ -2842,7 -2734,7 +2807,7 @@@ struct ieee80211_channel *channel; bool need_ps = false;
- lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata);
if ((sdata->u.mgd.associated && ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid)) || @@@ -2901,7 -2793,7 +2866,7 @@@ static void ieee80211_rx_mgmt_probe_res
ifmgd = &sdata->u.mgd;
- ASSERT_MGD_MTX(ifmgd); + sdata_assert_lock(sdata);
if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) return; /* ignore ProbeResp to foreign address */ @@@ -2926,7 -2818,7 +2891,7 @@@ ifmgd->auth_data->tries = 0; ifmgd->auth_data->timeout = jiffies; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); } }
@@@ -2951,10 -2843,9 +2916,9 @@@ static const u64 care_about_ies (1ULL << WLAN_EID_HT_CAPABILITY) | (1ULL << WLAN_EID_HT_OPERATION);
- static enum rx_mgmt_action - ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, - struct ieee80211_mgmt *mgmt, size_t len, - u8 *deauth_buf, struct ieee80211_rx_status *rx_status) + static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len, + struct ieee80211_rx_status *rx_status) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; @@@ -2969,24 -2860,25 +2933,25 @@@ u8 erp_value = 0; u32 ncrc; u8 *bssid; + u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
/* Process beacon from the current BSS */ baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; if (baselen > len) - return RX_MGMT_NONE; + return;
rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); - return RX_MGMT_NONE; + return; }
if (rx_status->freq != chanctx_conf->def.chan->center_freq) { rcu_read_unlock(); - return RX_MGMT_NONE; + return; } chan = chanctx_conf->def.chan; rcu_read_unlock(); @@@ -3013,13 -2905,13 +2978,13 @@@ /* continue assoc process */ ifmgd->assoc_data->timeout = jiffies; ifmgd->assoc_data->timeout_started = true; - run_again(ifmgd, ifmgd->assoc_data->timeout); - return RX_MGMT_NONE; + run_again(sdata, ifmgd->assoc_data->timeout); + return; }
if (!ifmgd->associated || !ether_addr_equal(mgmt->bssid, ifmgd->associated->bssid)) - return RX_MGMT_NONE; + return; bssid = ifmgd->associated->bssid;
/* Track average RSSI from the Beacon frames of the current AP */ @@@ -3165,7 -3057,7 +3130,7 @@@ }
if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) - return RX_MGMT_NONE; + return; ifmgd->beacon_crc = ncrc; ifmgd->beacon_crc_valid = true;
@@@ -3199,6 -3091,7 +3164,7 @@@ }
changed |= BSS_CHANGED_DTIM_PERIOD; + ieee80211_recalc_ps_vif(sdata); }
if (elems.erp_info) { @@@ -3220,7 -3113,9 +3186,9 @@@ ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, true, deauth_buf); - return RX_MGMT_CFG80211_TX_DEAUTH; + cfg80211_send_deauth(sdata->dev, deauth_buf, + sizeof(deauth_buf)); + return; }
if (sta && elems.opmode_notif) @@@ -3237,19 -3132,13 +3205,13 @@@ elems.pwr_constr_elem);
ieee80211_bss_info_change_notify(sdata, changed); - - return RX_MGMT_NONE; }
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_rx_status *rx_status; struct ieee80211_mgmt *mgmt; - struct cfg80211_bss *bss = NULL; - enum rx_mgmt_action rma = RX_MGMT_NONE; - u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN]; u16 fc; struct ieee802_11_elems elems; int ies_len; @@@ -3258,28 -3147,27 +3220,27 @@@ mgmt = (struct ieee80211_mgmt *) skb->data; fc = le16_to_cpu(mgmt->frame_control);
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata);
switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_BEACON: - rma = ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, - deauth_buf, rx_status); + ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status); break; case IEEE80211_STYPE_PROBE_RESP: ieee80211_rx_mgmt_probe_resp(sdata, skb); break; case IEEE80211_STYPE_AUTH: - rma = ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DEAUTH: - rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_DISASSOC: - rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); + ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ASSOC_RESP: case IEEE80211_STYPE_REASSOC_RESP: - rma = ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, &bss); + ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len); break; case IEEE80211_STYPE_ACTION: if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { @@@ -3325,34 -3213,7 +3286,7 @@@ } break; } - mutex_unlock(&ifmgd->mtx); - - switch (rma) { - case RX_MGMT_NONE: - /* no action */ - break; - case RX_MGMT_CFG80211_DEAUTH: - cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_DISASSOC: - cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_RX_AUTH: - cfg80211_send_rx_auth(sdata->dev, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_RX_ASSOC: - cfg80211_send_rx_assoc(sdata->dev, bss, (u8 *)mgmt, skb->len); - break; - case RX_MGMT_CFG80211_ASSOC_TIMEOUT: - cfg80211_send_assoc_timeout(sdata->dev, mgmt->bssid); - break; - case RX_MGMT_CFG80211_TX_DEAUTH: - cfg80211_send_deauth(sdata->dev, deauth_buf, - sizeof(deauth_buf)); - break; - default: - WARN(1, "unexpected: %d", rma); - } + sdata_unlock(sdata); }
static void ieee80211_sta_timer(unsigned long data) @@@ -3366,20 -3227,12 +3300,12 @@@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, u8 *bssid, u8 reason, bool tx) { - struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, tx, frame_buf); - mutex_unlock(&ifmgd->mtx);
- /* - * must be outside lock due to cfg80211, - * but that's not a problem. - */ cfg80211_send_deauth(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); - - mutex_lock(&ifmgd->mtx); }
static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) @@@ -3389,7 -3242,7 +3315,7 @@@ struct ieee80211_mgd_auth_data *auth_data = ifmgd->auth_data; u32 tx_flags = 0;
- lockdep_assert_held(&ifmgd->mtx); + sdata_assert_lock(sdata);
if (WARN_ON_ONCE(!auth_data)) return -EINVAL; @@@ -3462,7 -3315,7 +3388,7 @@@ if (tx_flags == 0) { auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; ifmgd->auth_data->timeout_started = true; - run_again(ifmgd, auth_data->timeout); + run_again(sdata, auth_data->timeout); } else { auth_data->timeout_started = false; } @@@ -3475,7 -3328,7 +3401,7 @@@ static int ieee80211_do_assoc(struct ie struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data; struct ieee80211_local *local = sdata->local;
- lockdep_assert_held(&sdata->u.mgd.mtx); + sdata_assert_lock(sdata);
assoc_data->tries++; if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) { @@@ -3499,7 -3352,7 +3425,7 @@@ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) { assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT; assoc_data->timeout_started = true; - run_again(&sdata->u.mgd, assoc_data->timeout); + run_again(sdata, assoc_data->timeout); } else { assoc_data->timeout_started = false; } @@@ -3524,7 -3377,7 +3450,7 @@@ void ieee80211_sta_work(struct ieee8021 struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata);
if (ifmgd->status_received) { __le16 fc = ifmgd->status_fc; @@@ -3536,7 -3389,7 +3462,7 @@@ if (status_acked) { ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT_SHORT; - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout); } else { ifmgd->auth_data->timeout = jiffies - 1; } @@@ -3547,7 -3400,7 +3473,7 @@@ if (status_acked) { ifmgd->assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT_SHORT; - run_again(ifmgd, ifmgd->assoc_data->timeout); + run_again(sdata, ifmgd->assoc_data->timeout); } else { ifmgd->assoc_data->timeout = jiffies - 1; } @@@ -3570,12 -3423,10 +3496,10 @@@
ieee80211_destroy_auth_data(sdata, false);
- mutex_unlock(&ifmgd->mtx); cfg80211_send_auth_timeout(sdata->dev, bssid); - mutex_lock(&ifmgd->mtx); } } else if (ifmgd->auth_data && ifmgd->auth_data->timeout_started) - run_again(ifmgd, ifmgd->auth_data->timeout); + run_again(sdata, ifmgd->auth_data->timeout);
if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started && time_after(jiffies, ifmgd->assoc_data->timeout)) { @@@ -3588,12 -3439,10 +3512,10 @@@
ieee80211_destroy_assoc_data(sdata, false);
- mutex_unlock(&ifmgd->mtx); cfg80211_send_assoc_timeout(sdata->dev, bssid); - mutex_lock(&ifmgd->mtx); } } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started) - run_again(ifmgd, ifmgd->assoc_data->timeout); + run_again(sdata, ifmgd->assoc_data->timeout);
if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL | IEEE80211_STA_CONNECTION_POLL) && @@@ -3627,7 -3476,7 +3549,7 @@@ false); } } else if (time_is_after_jiffies(ifmgd->probe_timeout)) - run_again(ifmgd, ifmgd->probe_timeout); + run_again(sdata, ifmgd->probe_timeout); else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { mlme_dbg(sdata, "Failed to send nullfunc to AP %pM after %dms, disconnecting\n", @@@ -3656,7 -3505,7 +3578,7 @@@ } }
- mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); }
static void ieee80211_sta_bcn_mon_timer(unsigned long data) @@@ -3717,9 -3566,9 +3639,9 @@@ void ieee80211_sta_restart(struct ieee8 { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (!ifmgd->associated) { - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; }
@@@ -3730,10 -3579,10 +3652,10 @@@ ifmgd->associated->bssid, WLAN_REASON_UNSPECIFIED, true); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); return; } - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); } #endif
@@@ -3765,8 -3614,6 +3687,6 @@@ void ieee80211_sta_setup_sdata(struct i ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len; ifmgd->p2p_noa_index = -1;
- mutex_init(&ifmgd->mtx); - if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC; else @@@ -4122,8 -3969,6 +4042,6 @@@ int ieee80211_mgd_auth(struct ieee80211
/* try to authenticate/probe */
- mutex_lock(&ifmgd->mtx); - if ((ifmgd->auth_data && !ifmgd->auth_data->done) || ifmgd->assoc_data) { err = -EBUSY; @@@ -4143,8 -3988,8 +4061,8 @@@ WLAN_REASON_UNSPECIFIED, false, frame_buf);
- __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); }
sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@@ -4161,8 -4006,7 +4079,7 @@@
/* hold our own reference */ cfg80211_ref_bss(local->hw.wiphy, auth_data->bss); - err = 0; - goto out_unlock; + return 0;
err_clear: memset(ifmgd->bssid, 0, ETH_ALEN); @@@ -4170,9 -4014,6 +4087,6 @@@ ifmgd->auth_data = NULL; err_free: kfree(auth_data); - out_unlock: - mutex_unlock(&ifmgd->mtx); - return err; }
@@@ -4203,8 -4044,6 +4117,6 @@@ int ieee80211_mgd_assoc(struct ieee8021 assoc_data->ssid_len = ssidie[1]; rcu_read_unlock();
- mutex_lock(&ifmgd->mtx); - if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
@@@ -4212,8 -4051,8 +4124,8 @@@ WLAN_REASON_UNSPECIFIED, false, frame_buf);
- __cfg80211_send_deauth(sdata->dev, frame_buf, - sizeof(frame_buf)); + cfg80211_send_deauth(sdata->dev, frame_buf, + sizeof(frame_buf)); }
if (ifmgd->auth_data && !ifmgd->auth_data->done) { @@@ -4407,7 -4246,7 +4319,7 @@@ } rcu_read_unlock();
- run_again(ifmgd, assoc_data->timeout); + run_again(sdata, assoc_data->timeout);
if (bss->corrupt_data) { char *corrupt_type = "data"; @@@ -4423,17 -4262,13 +4335,13 @@@ corrupt_type); }
- err = 0; - goto out; + return 0; err_clear: memset(ifmgd->bssid, 0, ETH_ALEN); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->assoc_data = NULL; err_free: kfree(assoc_data); - out: - mutex_unlock(&ifmgd->mtx); - return err; }
@@@ -4445,8 -4280,6 +4353,6 @@@ int ieee80211_mgd_deauth(struct ieee802 bool tx = !req->local_state_change; bool report_frame = false;
- mutex_lock(&ifmgd->mtx); - sdata_info(sdata, "deauthenticating from %pM by local choice (reason=%d)\n", req->bssid, req->reason_code); @@@ -4458,7 -4291,6 +4364,6 @@@ req->reason_code, tx, frame_buf); ieee80211_destroy_auth_data(sdata, false); - mutex_unlock(&ifmgd->mtx);
report_frame = true; goto out; @@@ -4470,12 -4302,11 +4375,11 @@@ req->reason_code, tx, frame_buf); report_frame = true; }
out: if (report_frame) - __cfg80211_send_deauth(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_send_deauth(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN);
return 0; } @@@ -4487,18 -4318,14 +4391,14 @@@ int ieee80211_mgd_disassoc(struct ieee8 u8 bssid[ETH_ALEN]; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
- mutex_lock(&ifmgd->mtx); - /* * cfg80211 should catch this ... but it's racy since * we can receive a disassoc frame, process it, hand it * to cfg80211 while that's in a locked section already * trying to tell us that the user wants to disconnect. */ - if (ifmgd->associated != req->bss) { - mutex_unlock(&ifmgd->mtx); + if (ifmgd->associated != req->bss) return -ENOLINK; - }
sdata_info(sdata, "disassociating from %pM by local choice (reason=%d)\n", @@@ -4508,10 -4335,9 +4408,9 @@@ ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC, req->reason_code, !req->local_state_change, frame_buf); - mutex_unlock(&ifmgd->mtx);
- __cfg80211_send_disassoc(sdata->dev, frame_buf, - IEEE80211_DEAUTH_FRAME_LEN); + cfg80211_send_disassoc(sdata->dev, frame_buf, + IEEE80211_DEAUTH_FRAME_LEN);
return 0; } @@@ -4531,13 -4357,13 +4430,13 @@@ void ieee80211_mgd_stop(struct ieee8021 cancel_work_sync(&ifmgd->csa_connection_drop_work); cancel_work_sync(&ifmgd->chswitch_work);
- mutex_lock(&ifmgd->mtx); + sdata_lock(sdata); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, false); if (ifmgd->auth_data) ieee80211_destroy_auth_data(sdata, false); del_timer_sync(&ifmgd->timer); - mutex_unlock(&ifmgd->mtx); + sdata_unlock(sdata); }
void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, diff --combined net/mac80211/util.c index 72e6292,89a8377..c75d3db --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@@ -560,6 -560,9 +560,9 @@@ void ieee80211_iterate_active_interface list_for_each_entry(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: + if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)) + continue; + break; case NL80211_IFTYPE_AP_VLAN: continue; default: @@@ -598,6 -601,9 +601,9 @@@ void ieee80211_iterate_active_interface list_for_each_entry_rcu(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: + if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE)) + continue; + break; case NL80211_IFTYPE_AP_VLAN: continue; default: @@@ -661,12 -667,12 +667,12 @@@ void ieee80211_queue_delayed_work(struc } EXPORT_SYMBOL(ieee80211_queue_delayed_work);
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action, +u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action, struct ieee802_11_elems *elems, u64 filter, u32 crc) { size_t left = len; - u8 *pos = start; + const u8 *pos = start; bool calc_crc = filter != 0; DECLARE_BITMAP(seen_elems, 256); const u8 *ie; @@@ -1072,32 -1078,6 +1078,6 @@@ void ieee80211_sta_def_wmm_params(struc ieee80211_set_wmm_default(sdata, true); }
- u32 ieee80211_mandatory_rates(struct ieee80211_local *local, - enum ieee80211_band band) - { - struct ieee80211_supported_band *sband; - struct ieee80211_rate *bitrates; - u32 mandatory_rates; - enum ieee80211_rate_flags mandatory_flag; - int i; - - sband = local->hw.wiphy->bands[band]; - if (WARN_ON(!sband)) - return 1; - - if (band == IEEE80211_BAND_2GHZ) - mandatory_flag = IEEE80211_RATE_MANDATORY_B; - else - mandatory_flag = IEEE80211_RATE_MANDATORY_A; - - bitrates = sband->bitrates; - mandatory_rates = 0; - for (i = 0; i < sband->n_bitrates; i++) - if (bitrates[i].flags & mandatory_flag) - mandatory_rates |= BIT(i); - return mandatory_rates; - } - void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *da, @@@ -1607,9 -1587,9 +1587,9 @@@ int ieee80211_reconfig(struct ieee80211 if (sdata->u.mgd.dtim_period) changed |= BSS_CHANGED_DTIM_PERIOD;
- mutex_lock(&sdata->u.mgd.mtx); + sdata_lock(sdata); ieee80211_bss_info_change_notify(sdata, changed); - mutex_unlock(&sdata->u.mgd.mtx); + sdata_unlock(sdata); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; diff --combined net/netfilter/ipvs/ip_vs_ctl.c index 9e6c2a0,edb88fb..47e5108 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@@ -1487,9 -1487,9 +1487,9 @@@ ip_vs_forget_dev(struct ip_vs_dest *des * Currently only NETDEV_DOWN is handled to release refs to cached dsts */ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, - void *ptr) + void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_service *svc; @@@ -1575,7 -1575,7 +1575,7 @@@ static int zero static int three = 3;
static int - proc_do_defense_mode(ctl_table *table, int write, + proc_do_defense_mode(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net = current->nsproxy->net_ns; @@@ -1596,7 -1596,7 +1596,7 @@@ }
static int - proc_do_sync_threshold(ctl_table *table, int write, + proc_do_sync_threshold(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; @@@ -1616,7 -1616,7 +1616,7 @@@ }
static int - proc_do_sync_mode(ctl_table *table, int write, + proc_do_sync_mode(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; @@@ -1634,7 -1634,7 +1634,7 @@@ }
static int - proc_do_sync_ports(ctl_table *table, int write, + proc_do_sync_ports(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; @@@ -1716,9 -1716,9 +1716,9 @@@ static struct ctl_table vs_vars[] = }, { .procname = "sync_qlen_max", - .maxlen = sizeof(int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "sync_sock_size", @@@ -2542,7 -2542,6 +2542,7 @@@ __ip_vs_get_dest_entries(struct net *ne struct ip_vs_dest *dest; struct ip_vs_dest_entry entry;
+ memset(&entry, 0, sizeof(entry)); list_for_each_entry(dest, &svc->destinations, n_list) { if (count >= get->num_dests) break; diff --combined net/netfilter/nfnetlink_queue_core.c index 5352b2d,c011543..299a48a --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@@ -41,6 -41,14 +41,14 @@@
#define NFQNL_QMAX_DEFAULT 1024
+ /* We're using struct nlattr which has 16bit nla_len. Note that nla_len + * includes the header length. Thus, the maximum packet length that we + * support is 65531 bytes. We send truncated packets if the specified length + * is larger than that. Userspace can check for presence of NFQA_CAP_LEN + * attribute to detect truncation. + */ + #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN) + struct nfqnl_instance { struct hlist_node hlist; /* global list of queues */ struct rcu_head rcu; @@@ -122,7 -130,7 +130,7 @@@ instance_create(struct nfnl_queue_net * inst->queue_num = queue_num; inst->peer_portid = portid; inst->queue_maxlen = NFQNL_QMAX_DEFAULT; - inst->copy_range = 0xffff; + inst->copy_range = NFQNL_MAX_COPY_RANGE; inst->copy_mode = NFQNL_COPY_NONE; spin_lock_init(&inst->lock); INIT_LIST_HEAD(&inst->queue_list); @@@ -333,10 -341,9 +341,9 @@@ nfqnl_build_packet_message(struct nfqnl return NULL;
data_len = ACCESS_ONCE(queue->copy_range); - if (data_len == 0 || data_len > entskb->len) + if (data_len > entskb->len) data_len = entskb->len;
- if (!entskb->head_frag || skb_headlen(entskb) < L1_CACHE_BYTES || skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS) @@@ -465,7 -472,8 +472,8 @@@ if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0) goto nla_put_failure;
- if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) + if (cap_len > data_len && + nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len))) goto nla_put_failure;
if (nfqnl_put_packet_info(skb, entskb)) @@@ -509,10 -517,6 +517,6 @@@ __nfqnl_enqueue_packet(struct net *net } spin_lock_bh(&queue->lock);
- if (!queue->peer_portid) { - err = -EINVAL; - goto err_out_free_nskb; - } if (queue->queue_total >= queue->queue_maxlen) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; @@@ -637,6 -641,9 +641,6 @@@ nfqnl_enqueue_packet(struct nf_queue_en if (queue->copy_mode == NFQNL_COPY_NONE) return -EINVAL;
- if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(entry->skb)) - return __nfqnl_enqueue_packet(net, queue, entry); - skb = entry->skb;
switch (entry->pf) { @@@ -648,9 -655,6 +652,9 @@@ break; }
+ if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) + return __nfqnl_enqueue_packet(net, queue, entry); + nf_bridge_adjust_skb_data(skb); segs = skb_gso_segment(skb, 0); /* Does not use PTR_ERR to limit the number of error codes that can be @@@ -731,13 -735,8 +735,8 @@@ nfqnl_set_mode(struct nfqnl_instance *q
case NFQNL_COPY_PACKET: queue->copy_mode = mode; - /* We're using struct nlattr which has 16bit nla_len. Note that - * nla_len includes the header length. Thus, the maximum packet - * length that we support is 65531 bytes. We send truncated - * packets if the specified length is larger than that. - */ - if (range > 0xffff - NLA_HDRLEN) - queue->copy_range = 0xffff - NLA_HDRLEN; + if (range == 0 || range > NFQNL_MAX_COPY_RANGE) + queue->copy_range = NFQNL_MAX_COPY_RANGE; else queue->copy_range = range; break; @@@ -800,7 -799,7 +799,7 @@@ static in nfqnl_rcv_dev_event(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
/* Drop any packets associated with the downed device */ if (event == NETDEV_DOWN) diff --combined net/netlink/af_netlink.c index 57ee84d,8978755..275d901 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -371,7 -371,7 +371,7 @@@ static int netlink_mmap(struct file *fi err = 0; out: mutex_unlock(&nlk->pg_vec_lock); - return 0; + return err; }
static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) @@@ -750,6 -750,10 +750,10 @@@ static void netlink_skb_destructor(stru skb->head = NULL; } #endif + if (is_vmalloc_addr(skb->head)) { + vfree(skb->head); + skb->head = NULL; + } if (skb->sk != NULL) sock_rfree(skb); } @@@ -854,16 -858,23 +858,23 @@@ netlink_unlock_table(void wake_up(&nl_table_wait); }
+ static bool netlink_compare(struct net *net, struct sock *sk) + { + return net_eq(sock_net(sk), net); + } + static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) { - struct nl_portid_hash *hash = &nl_table[protocol].hash; + struct netlink_table *table = &nl_table[protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; struct sock *sk;
read_lock(&nl_table_lock); head = nl_portid_hashfn(hash, portid); sk_for_each(sk, head) { - if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { + if (table->compare(net, sk) && + (nlk_sk(sk)->portid == portid)) { sock_hold(sk); goto found; } @@@ -976,7 -987,8 +987,8 @@@ netlink_update_listeners(struct sock *s
static int netlink_insert(struct sock *sk, struct net *net, u32 portid) { - struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; + struct netlink_table *table = &nl_table[sk->sk_protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; int err = -EADDRINUSE; struct sock *osk; @@@ -986,7 -998,8 +998,8 @@@ head = nl_portid_hashfn(hash, portid); len = 0; sk_for_each(osk, head) { - if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) + if (table->compare(net, osk) && + (nlk_sk(osk)->portid == portid)) break; len++; } @@@ -1183,7 -1196,8 +1196,8 @@@ static int netlink_autobind(struct sock { struct sock *sk = sock->sk; struct net *net = sock_net(sk); - struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; + struct netlink_table *table = &nl_table[sk->sk_protocol]; + struct nl_portid_hash *hash = &table->hash; struct hlist_head *head; struct sock *osk; s32 portid = task_tgid_vnr(current); @@@ -1195,7 -1209,7 +1209,7 @@@ retry netlink_table_grab(); head = nl_portid_hashfn(hash, portid); sk_for_each(osk, head) { - if (!net_eq(sock_net(osk), net)) + if (!table->compare(net, osk)) continue; if (nlk_sk(osk)->portid == portid) { /* Bind collision, search negative portid values. */ @@@ -1420,6 -1434,35 +1434,35 @@@ struct sock *netlink_getsockbyfilp(stru return sock; }
+ static struct sk_buff *netlink_alloc_large_skb(unsigned int size) + { + struct sk_buff *skb; + void *data; + + if (size <= NLMSG_GOODSIZE) + return alloc_skb(size, GFP_KERNEL); + + skb = alloc_skb_head(GFP_KERNEL); + if (skb == NULL) + return NULL; + + data = vmalloc(size); + if (data == NULL) + goto err; + + skb->head = data; + skb->data = data; + skb_reset_tail_pointer(skb); + skb->end = skb->tail + size; + skb->len = 0; + skb->destructor = netlink_skb_destructor; + + return skb; + err: + kfree_skb(skb); + return NULL; + } + /* * Attach a skb to a netlink socket. * The caller must hold a reference to the destination socket. On error, the @@@ -1510,7 -1553,7 +1553,7 @@@ static struct sk_buff *netlink_trim(str return skb;
delta = skb->end - skb->tail; - if (delta * 2 < skb->truesize) + if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize) return skb;
if (skb_shared(skb)) { @@@ -2096,7 -2139,7 +2139,7 @@@ static int netlink_sendmsg(struct kioc if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; - skb = alloc_skb(len, GFP_KERNEL); + skb = netlink_alloc_large_skb(len); if (skb == NULL) goto out;
@@@ -2285,6 -2328,8 +2328,8 @@@ __netlink_kernel_create(struct net *net if (cfg) { nl_table[unit].bind = cfg->bind; nl_table[unit].flags = cfg->flags; + if (cfg->compare) + nl_table[unit].compare = cfg->compare; } nl_table[unit].registered = 1; } else { @@@ -2707,6 -2752,7 +2752,7 @@@ static void *netlink_seq_next(struct se { struct sock *s; struct nl_seq_iter *iter; + struct net *net; int i, j;
++*pos; @@@ -2714,11 -2760,12 +2760,12 @@@ if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0);
+ net = seq_file_net(seq); iter = seq->private; s = v; do { s = sk_next(s); - } while (s && sock_net(s) != seq_file_net(seq)); + } while (s && !nl_table[s->sk_protocol].compare(net, s)); if (s) return s;
@@@ -2730,7 -2777,8 +2777,8 @@@
for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); - while (s && sock_net(s) != seq_file_net(seq)) + + while (s && !nl_table[s->sk_protocol].compare(net, s)) s = sk_next(s); if (s) { iter->link = i; @@@ -2923,6 -2971,8 +2971,8 @@@ static int __init netlink_proto_init(vo hash->shift = 0; hash->mask = 0; hash->rehash_time = jiffies; + + nl_table[i].compare = netlink_compare; }
netlink_add_usersock_entry(); diff --combined net/packet/af_packet.c index 20a1bd0,79fe632..4b66c75 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@@ -2851,11 -2851,12 +2851,11 @@@ static int packet_getname_spkt(struct s return -EOPNOTSUPP;
uaddr->sa_family = AF_PACKET; + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) - strncpy(uaddr->sa_data, dev->name, 14); - else - memset(uaddr->sa_data, 0, 14); + strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr);
@@@ -3330,10 -3331,11 +3330,11 @@@ static int packet_getsockopt(struct soc }
- static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data) + static int packet_notifier(struct notifier_block *this, + unsigned long msg, void *ptr) { struct sock *sk; - struct net_device *dev = data; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev);
rcu_read_lock(); diff --combined net/sctp/socket.c index 6abb1ca,510dc79..00a6a27 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@@ -3862,7 -3862,6 +3862,6 @@@ out SCTP_STATIC int sctp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); - struct sctp_endpoint *ep; struct sctp_sock *sp;
SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk); @@@ -3971,11 -3970,10 +3970,10 @@@ * change the data structure relationships, this may still * be useful for storing pre-connect address information. */ - ep = sctp_endpoint_new(sk, GFP_KERNEL); - if (!ep) + sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); + if (!sp->ep) return -ENOMEM;
- sp->ep = ep; sp->hmac = NULL;
SCTP_DBG_OBJCNT_INC(sock); @@@ -4003,12 -4001,6 +4001,12 @@@ SCTP_STATIC void sctp_destroy_sock(stru
/* Release our hold on the endpoint. */ sp = sctp_sk(sk); + /* This could happen during socket init, thus we bail out + * early, since the rest of the below is not setup either. + */ + if (sp->ep == NULL) + return; + if (sp->do_auto_asconf) { sp->do_auto_asconf = 0; list_del(&sp->auto_asconf_list); diff --combined net/sunrpc/xprtsock.c index 5d6b0da,412de7c..ddf0602 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@@ -87,7 -87,7 +87,7 @@@ static struct ctl_table_header *sunrpc_ * FIXME: changing the UDP slot table size should also resize the UDP * socket buffers for existing UDP transports */ - static ctl_table xs_tunables_table[] = { + static struct ctl_table xs_tunables_table[] = { { .procname = "udp_slot_table_entries", .data = &xprt_udp_slot_table_entries, @@@ -143,7 -143,7 +143,7 @@@ { }, };
- static ctl_table sunrpc_table[] = { + static struct ctl_table sunrpc_table[] = { { .procname = "sunrpc", .mode = 0555, @@@ -2534,6 -2534,7 +2534,6 @@@ static struct rpc_xprt_ops bc_tcp_ops .reserve_xprt = xprt_reserve_xprt, .release_xprt = xprt_release_xprt, .alloc_slot = xprt_alloc_slot, - .rpcbind = xs_local_rpcbind, .buf_alloc = bc_malloc, .buf_free = bc_free, .send_request = bc_send_request,
linux-merge@lists.open-mesh.org