The following commit has been merged in the master branch: commit cef8826c48c5f581ed1215da07120b12e20751e3 Merge: 62b804b9a2462bdbe556b0fa8b74e86041cc3706 b43faac69062f0fc75bd3230d67da64e184232d1 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Dec 14 13:02:10 2011 +1100
Merge remote-tracking branch 'net-next/master'
Conflicts: drivers/net/ethernet/freescale/fsl_pq_mdio.c net/batman-adv/translation-table.c
diff --combined Documentation/feature-removal-schedule.txt index dca11ea,33f7327..a57dc2e --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@@ -263,8 -263,7 +263,7 @@@ Who: Ravikiran Thirumalai <kiran@scalex
What: Code that is now under CONFIG_WIRELESS_EXT_SYSFS (in net/core/net-sysfs.c) - When: After the only user (hal) has seen a release with the patches - for enough time, probably some time in 2010. + When: 3.5 Why: Over 1K .text/.data size reduction, data is available in other ways (ioctls) Who: Johannes Berg johannes@sipsolutions.net @@@ -551,15 -550,3 +550,15 @@@ When: 3. Why: The iwlagn module has been renamed iwlwifi. The alias will be around for backward compatibility for several cycles and then dropped. Who: Don Fry donald.h.fry@intel.com + +---------------------------- + +What: pci_scan_bus_parented() +When: 3.5 +Why: The pci_scan_bus_parented() interface creates a new root bus. The + bus is created with default resources (ioport_resource and + iomem_resource) that are always wrong, so we rely on arch code to + correct them later. Callers of pci_scan_bus_parented() should + convert to using pci_scan_root_bus() so they can supply a list of + bus resources when the bus is created. +Who: Bjorn Helgaas bhelgaas@google.com diff --combined MAINTAINERS index af23d6d,209ad06..02e270b --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -511,8 -511,8 +511,8 @@@ M: Joerg Roedel <joerg.roedel@amd.com L: iommu@lists.linux-foundation.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git S: Supported -F: arch/x86/kernel/amd_iommu*.c -F: arch/x86/include/asm/amd_iommu*.h +F: drivers/iommu/amd_iommu*.[ch] +F: include/linux/amd-iommu.h
AMD MICROCODE UPDATE SUPPORT M: Andreas Herrmann andreas.herrmann3@amd.com @@@ -749,7 -749,6 +749,7 @@@ M: Barry Song <baohua.song@csr.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-prima2/ +F: drivers/dma/sirf-dma*
ARM/EBSA110 MACHINE SUPPORT M: Russell King linux@arm.linux.org.uk @@@ -1055,18 -1054,35 +1055,18 @@@ ARM/SAMSUNG ARM ARCHITECTURE M: Ben Dooks ben-linux@fluff.org M: Kukjin Kim kgene.kim@samsung.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) W: http://www.fluff.org/ben/linux/ S: Maintained F: arch/arm/plat-samsung/ F: arch/arm/plat-s3c24xx/ F: arch/arm/plat-s5p/ +F: arch/arm/mach-s3c24*/ +F: arch/arm/mach-s3c64xx/ F: drivers/*/*s3c2410* F: drivers/*/*/*s3c2410* - -ARM/S3C2410 ARM ARCHITECTURE -M: Ben Dooks ben-linux@fluff.org -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.fluff.org/ben/linux/ -S: Maintained -F: arch/arm/mach-s3c2410/ - -ARM/S3C244x ARM ARCHITECTURE -M: Ben Dooks ben-linux@fluff.org -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.fluff.org/ben/linux/ -S: Maintained -F: arch/arm/mach-s3c2440/ -F: arch/arm/mach-s3c2443/ - -ARM/S3C64xx ARM ARCHITECTURE -M: Ben Dooks ben-linux@fluff.org -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -W: http://www.fluff.org/ben/linux/ -S: Maintained -F: arch/arm/mach-s3c64xx/ +F: drivers/spi/spi-s3c* +F: sound/soc/samsung/*
ARM/S5P EXYNOS ARM ARCHITECTURES M: Kukjin Kim kgene.kim@samsung.com @@@ -1662,14 -1678,6 +1662,14 @@@ T: git git://git.alsa-project.org/alsa- S: Maintained F: sound/pci/oxygen/
+C6X ARCHITECTURE +M: Mark Salter msalter@redhat.com +M: Aurelien Jacquiot a-jacquiot@ti.com +L: linux-c6x-dev@linux-c6x.org +W: http://www.linux-c6x.org/wiki/index.php/Main_Page +S: Maintained +F: arch/c6x/ + CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS M: David Howells dhowells@redhat.com L: linux-cachefs@redhat.com @@@ -2846,14 -2854,6 +2846,14 @@@ L: platform-driver-x86@vger.kernel.or S: Maintained F: drivers/platform/x86/fujitsu-laptop.c
+FUJITSU M-5MO LS CAMERA ISP DRIVER +M: Kyungmin Park kyungmin.park@samsung.com +M: Heungjun Kim riverful.kim@samsung.com +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/video/m5mols/ +F: include/media/m5mols.h + FUSE: FILESYSTEM IN USERSPACE M: Miklos Szeredi miklos@szeredi.hu L: fuse-devel@lists.sourceforge.net @@@ -3796,6 -3796,7 +3796,6 @@@ S: Odd Fixe
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS M: "J. Bruce Fields" bfields@fieldses.org -M: Neil Brown neilb@suse.de L: linux-nfs@vger.kernel.org W: http://nfs.sourceforge.net/ S: Supported @@@ -4027,7 -4028,7 +4027,7 @@@ M: Josh Boyer <jwboyer@gmail.com M: Matt Porter mporter@kernel.crashing.org W: http://www.penguinppc.org/ L: linuxppc-dev@lists.ozlabs.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx.git +T: git git://git.infradead.org/users/jwboyer/powerpc-4xx.git S: Maintained F: arch/powerpc/platforms/40x/ F: arch/powerpc/platforms/44x/ @@@ -4131,7 -4132,6 +4131,7 @@@ F: fs/partitions/ldm.
LogFS M: Joern Engel joern@logfs.org +M: Prasad Joshi prasadjoshi.linux@gmail.com L: logfs@logfs.org W: logfs.org S: Maintained @@@ -4297,9 -4297,7 +4297,9 @@@ T: git git://git.kernel.org/pub/scm/lin S: Maintained F: Documentation/dvb/ F: Documentation/video4linux/ +F: Documentation/DocBook/media/ F: drivers/media/ +F: drivers/staging/media/ F: include/media/ F: include/linux/dvb/ F: include/linux/videodev*.h @@@ -4321,9 -4319,8 +4321,9 @@@ F: include/linux/mm. F: mm/
MEMORY RESOURCE CONTROLLER +M: Johannes Weiner hannes@cmpxchg.org +M: Michal Hocko mhocko@suse.cz M: Balbir Singh bsingharora@gmail.com -M: Daisuke Nishimura nishimura@mxp.nes.nec.co.jp M: KAMEZAWA Hiroyuki kamezawa.hiroyu@jp.fujitsu.com L: cgroups@vger.kernel.org L: linux-mm@kvack.org @@@ -4871,6 -4868,14 +4871,14 @@@ S: Maintaine T: git git://openrisc.net/~jonas/linux F: arch/openrisc
+ OPENVSWITCH + M: Jesse Gross jesse@nicira.com + L: dev@openvswitch.org + W: http://openvswitch.org + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git + S: Maintained + F: net/openvswitch/ + OPL4 DRIVER M: Clemens Ladisch clemens@ladisch.de L: alsa-devel@alsa-project.org (moderated for non-subscribers) @@@ -6513,6 -6518,13 +6521,13 @@@ W: http://tcp-lp-mod.sourceforge.net S: Maintained F: net/ipv4/tcp_lp.c
+ TEAM DRIVER + M: Jiri Pirko jpirko@redhat.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/team/ + F: include/linux/if_team.h + TEGRA SUPPORT M: Colin Cross ccross@android.com M: Olof Johansson olof@lixom.net diff --combined drivers/net/ethernet/Makefile index 94b7f28,cd6d69a..08d5f03 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@@ -10,10 -10,11 +10,11 @@@ obj-$(CONFIG_NET_VENDOR_ALTEON) += alte obj-$(CONFIG_NET_VENDOR_AMD) += amd/ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ -obj-$(CONFIG_NET_ATMEL) += cadence/ +obj-$(CONFIG_NET_CADENCE) += cadence/ obj-$(CONFIG_NET_BFIN) += adi/ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ + obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ diff --combined drivers/net/ethernet/freescale/fec.c index c136230,01ee9cc..4ea2bdc --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@@ -99,7 -99,7 +99,7 @@@ static struct platform_device_id fec_de MODULE_DEVICE_TABLE(platform, fec_devtype);
enum imx_fec_type { - IMX25_FEC = 1, /* runs on i.mx25/50/53 */ + IMX25_FEC = 1, /* runs on i.mx25/50/53 */ IMX27_FEC, /* runs on i.mx27/35/51 */ IMX28_FEC, IMX6Q_FEC, @@@ -132,7 -132,7 +132,7 @@@ MODULE_PARM_DESC(macaddr, "FEC Etherne #elif defined (CONFIG_M5272C3) #define FEC_FLASHMAC (0xffe04000 + 4) #elif defined(CONFIG_MOD5272) - #define FEC_FLASHMAC 0xffc0406b + #define FEC_FLASHMAC 0xffc0406b #else #define FEC_FLASHMAC 0 #endif @@@ -232,7 -232,6 +232,7 @@@ struct fec_enet_private struct platform_device *pdev;
int opened; + int dev_id;
/* Phylib and MDIO interface */ struct mii_bus *mii_bus; @@@ -260,6 -259,8 +260,8 @@@ /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ)
+ static int mii_cnt; + static void *swap_buffer(void *bufaddr, int len) { int i; @@@ -516,6 -517,7 +518,7 @@@ fec_stop(struct net_device *ndev struct fec_enet_private *fep = netdev_priv(ndev); const struct platform_device_id *id_entry = platform_get_device_id(fep->pdev); + u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
/* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) { @@@ -532,8 -534,10 +535,10 @@@ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { writel(2, fep->hwp + FEC_ECNTRL); + writel(rmii_mode, fep->hwp + FEC_R_CNTRL); + } }
@@@ -819,7 -823,7 +824,7 @@@ static void __inline__ fec_get_mac(stru iap = (unsigned char *)FEC_FLASHMAC; #else if (pdata) - memcpy(iap, pdata->mac, ETH_ALEN); + iap = (unsigned char *)&pdata->mac; #endif }
@@@ -838,7 -842,7 +843,7 @@@
/* Adjust MAC if using macaddr */ if (iap == macaddr) - ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id; + ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; }
/* ------------------------------------------------------------------------- */ @@@ -866,6 -870,8 +871,8 @@@ static void fec_enet_adjust_link(struc if (phy_dev->link) { if (fep->full_duplex != phy_dev->duplex) { fec_restart(ndev, phy_dev->duplex); + /* prevent unnecessary second fec_restart() below */ + fep->link = phy_dev->link; status_change = 1; } } @@@ -954,7 -960,7 +961,7 @@@ static int fec_enet_mii_probe(struct ne char mdio_bus_id[MII_BUS_ID_SIZE]; char phy_name[MII_BUS_ID_SIZE + 3]; int phy_id; - int dev_id = fep->pdev->id; + int dev_id = fep->dev_id;
fep->phy_dev = NULL;
@@@ -973,8 -979,9 +980,9 @@@ }
if (phy_id >= PHY_MAX_ADDR) { - printk(KERN_INFO "%s: no PHY, assuming direct connection " - "to switch\n", ndev->name); + printk(KERN_INFO + "%s: no PHY, assuming direct connection to switch\n", + ndev->name); strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); phy_id = 0; } @@@ -999,8 -1006,9 +1007,9 @@@ fep->link = 0; fep->full_duplex = 0;
- printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] " - "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name, + printk(KERN_INFO + "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", + ndev->name, fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), fep->phy_dev->irq);
@@@ -1032,10 -1040,14 +1041,14 @@@ static int fec_enet_mii_init(struct pla * mdio interface in board design, and need to be configured by * fec0 mii_bus. */ - if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) { + if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ - fep->mii_bus = fec0_mii_bus; - return 0; + if (mii_cnt && fec0_mii_bus) { + fep->mii_bus = fec0_mii_bus; + mii_cnt++; + return 0; + } + return -ENOENT; }
fep->mii_timeout = 0; @@@ -1064,7 -1076,7 +1077,7 @@@ fep->mii_bus->read = fec_enet_mdio_read; fep->mii_bus->write = fec_enet_mdio_write; fep->mii_bus->reset = fec_enet_mdio_reset; - snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1); + snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1); fep->mii_bus->priv = fep; fep->mii_bus->parent = &pdev->dev;
@@@ -1080,6 -1092,8 +1093,8 @@@ if (mdiobus_register(fep->mii_bus)) goto err_out_free_mdio_irq;
+ mii_cnt++; + /* save fec0 mii_bus */ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) fec0_mii_bus = fep->mii_bus; @@@ -1096,11 -1110,11 +1111,11 @@@ err_out
static void fec_enet_mii_remove(struct fec_enet_private *fep) { - if (fep->phy_dev) - phy_disconnect(fep->phy_dev); - mdiobus_unregister(fep->mii_bus); - kfree(fep->mii_bus->irq); - mdiobus_free(fep->mii_bus); + if (--mii_cnt == 0) { + mdiobus_unregister(fep->mii_bus); + kfree(fep->mii_bus->irq); + mdiobus_free(fep->mii_bus); + } }
static int fec_enet_get_settings(struct net_device *ndev, @@@ -1522,7 -1536,6 +1537,7 @@@ fec_probe(struct platform_device *pdev int i, irq, ret = 0; struct resource *r; const struct of_device_id *of_id; + static int dev_id;
of_id = of_match_device(fec_dt_ids, &pdev->dev); if (of_id) @@@ -1550,7 -1563,6 +1565,7 @@@
fep->hwp = ioremap(r->start, resource_size(r)); fep->pdev = pdev; + fep->dev_id = dev_id++;
if (!fep->hwp) { ret = -ENOMEM; @@@ -1574,8 -1586,12 +1589,12 @@@
for (i = 0; i < FEC_IRQ_NUM; i++) { irq = platform_get_irq(pdev, i); - if (i && irq < 0) - break; + if (irq < 0) { + if (i) + break; + ret = irq; + goto failed_irq; + } ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev); if (ret) { while (--i >= 0) { @@@ -1586,7 -1602,7 +1605,7 @@@ } }
- fep->clk = clk_get(&pdev->dev, "fec_clk"); + fep->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(fep->clk)) { ret = PTR_ERR(fep->clk); goto failed_clk; @@@ -1638,13 -1654,18 +1657,18 @@@ fec_drv_remove(struct platform_device * struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); struct resource *r; + int i;
- fec_stop(ndev); + unregister_netdev(ndev); fec_enet_mii_remove(fep); + for (i = 0; i < FEC_IRQ_NUM; i++) { + int irq = platform_get_irq(pdev, i); + if (irq > 0) + free_irq(irq, ndev); + } clk_disable(fep->clk); clk_put(fep->clk); iounmap(fep->hwp); - unregister_netdev(ndev); free_netdev(ndev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --combined drivers/net/ethernet/micrel/ks8842.c index de9f2e2,75ec87a..0a85690 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@@ -459,7 -459,7 +459,7 @@@ static int ks8842_tx_frame_dma(struct s sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, - &ctl->sg, 1, DMA_TO_DEVICE, + &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); if (!ctl->adesc) return NETDEV_TX_BUSY; @@@ -571,7 -571,7 +571,7 @@@ static int __ks8842_start_new_rx_dma(st sg_dma_len(sg) = DMA_BUFFER_SIZE;
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, - sg, 1, DMA_FROM_DEVICE, + sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc) @@@ -1264,18 -1264,7 +1264,7 @@@ static struct platform_driver ks8842_pl .remove = ks8842_remove, };
- static int __init ks8842_init(void) - { - return platform_driver_register(&ks8842_platform_driver); - } - - static void __exit ks8842_exit(void) - { - platform_driver_unregister(&ks8842_platform_driver); - } - - module_init(ks8842_init); - module_exit(ks8842_exit); + module_platform_driver(ks8842_platform_driver);
MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver"); MODULE_AUTHOR("Mocean Laboratories info@mocean-labs.com"); diff --combined drivers/net/ethernet/natsemi/macsonic.c index 5987d17,70367d7..f1b8556 --- a/drivers/net/ethernet/natsemi/macsonic.c +++ b/drivers/net/ethernet/natsemi/macsonic.c @@@ -142,7 -142,8 +142,7 @@@ static int macsonic_open(struct net_dev { int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST, - "sonic", dev); + retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); @@@ -153,8 -154,8 +153,8 @@@ * rupt as well, which must prevent re-entrance of the sonic handler. */ if (dev->irq == IRQ_AUTO_3) { - retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, - IRQ_FLG_FAST, "sonic", dev); + retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0, + "sonic", dev); if (retval) { printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9); @@@ -642,15 -643,4 +642,4 @@@ static struct platform_driver mac_sonic }, };
- static int __init mac_sonic_init_module(void) - { - return platform_driver_register(&mac_sonic_driver); - } - - static void __exit mac_sonic_cleanup_module(void) - { - platform_driver_unregister(&mac_sonic_driver); - } - - module_init(mac_sonic_init_module); - module_exit(mac_sonic_cleanup_module); + module_platform_driver(mac_sonic_driver); diff --combined drivers/net/ppp/pptp.c index f8a6853,ede899c..c1c9293 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@@ -162,7 -162,7 +162,7 @@@ static void del_chan(struct pppox_sock { spin_lock(&chan_lock); clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); - rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); + RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); spin_unlock(&chan_lock); synchronize_rcu(); } @@@ -423,8 -423,10 +423,8 @@@ static int pptp_bind(struct socket *soc lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp; - if (add_chan(po)) { - release_sock(sk); + if (add_chan(po)) error = -EBUSY; - }
release_sock(sk); return error; diff --combined drivers/net/wireless/ath/ath9k/main.c index d2348a5,5007297..7d92004 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@@ -118,7 -118,7 +118,7 @@@ void ath9k_ps_restore(struct ath_softc if (--sc->ps_usecount != 0) goto unlock;
- if (sc->ps_idle) + if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK)) mode = ATH9K_PM_FULL_SLEEP; else if (sc->ps_enabled && !(sc->ps_flags & (PS_WAIT_FOR_BEACON | @@@ -286,7 -286,7 +286,7 @@@ static bool ath_complete_reset(struct a ath_start_ani(common); }
- if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) { + if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) { struct ath_hw_antcomb_conf div_ant_conf; u8 lna_conf;
@@@ -332,7 -332,8 +332,8 @@@ static int ath_reset_internal(struct at hchan = ah->curchan; }
- if (fastcc && !ath9k_hw_check_alive(ah)) + if (fastcc && (ah->chip_fullsleep || + !ath9k_hw_check_alive(ah))) fastcc = false;
if (!ath_prepare_reset(sc, retry_tx, flush)) @@@ -561,7 -562,6 +562,6 @@@ void ath_ani_calibrate(unsigned long da /* Long calibration runs independently of short calibration. */ if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) { longcal = true; - ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); common->ani.longcal_timer = timestamp; }
@@@ -569,8 -569,6 +569,6 @@@ if (!common->ani.caldone) { if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { shortcal = true; - ath_dbg(common, ATH_DBG_ANI, - "shortcal @%lu\n", jiffies); common->ani.shortcal_timer = timestamp; common->ani.resetcal_timer = timestamp; } @@@ -584,8 -582,9 +582,9 @@@ }
/* Verify whether we must check ANI */ - if ((timestamp - common->ani.checkani_timer) >= - ah->config.ani_poll_interval) { + if (sc->sc_ah->config.enable_ani + && (timestamp - common->ani.checkani_timer) >= + ah->config.ani_poll_interval) { aniflag = true; common->ani.checkani_timer = timestamp; } @@@ -605,6 -604,11 +604,11 @@@ ah->rxchainmask, longcal); }
+ ath_dbg(common, ATH_DBG_ANI, + "Calibration @%lu finished: %s %s %s, caldone: %s\n", jiffies, + longcal ? "long" : "", shortcal ? "short" : "", + aniflag ? "ani" : "", common->ani.caldone ? "true" : "false"); + ath9k_ps_restore(sc);
set_timer: @@@ -630,7 -634,8 +634,8 @@@ } }
- static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) + static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta, + struct ieee80211_vif *vif) { struct ath_node *an; an = (struct ath_node *)sta->drv_priv; @@@ -640,6 -645,7 +645,7 @@@ list_add(&an->list, &sc->nodes); spin_unlock(&sc->nodes_lock); an->sta = sta; + an->vif = vif; #endif if (sc->sc_flags & SC_OP_TXAGGR) { ath_tx_node_init(sc, an); @@@ -740,6 -746,9 +746,9 @@@ void ath9k_tasklet(unsigned long data if (status & ATH9K_INT_GENTIMER) ath_gen_timer_isr(sc->sc_ah);
+ if (status & ATH9K_INT_MCI) + ath_mci_intr(sc); + out: /* re-enable hardware interrupt */ ath9k_hw_enable_interrupts(ah); @@@ -762,7 -771,8 +771,8 @@@ irqreturn_t ath_isr(int irq, void *dev ATH9K_INT_BMISS | \ ATH9K_INT_CST | \ ATH9K_INT_TSFOOR | \ - ATH9K_INT_GENTIMER) + ATH9K_INT_GENTIMER | \ + ATH9K_INT_MCI)
struct ath_softc *sc = dev; struct ath_hw *ah = sc->sc_ah; @@@ -880,82 -890,6 +890,6 @@@ chip_reset #undef SCHED_INTR }
- static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw) - { - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ieee80211_channel *channel = hw->conf.channel; - int r; - - ath9k_ps_wakeup(sc); - spin_lock_bh(&sc->sc_pcu_lock); - atomic_set(&ah->intr_ref_cnt, -1); - - ath9k_hw_configpcipowersave(ah, false); - - if (!ah->curchan) - ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah); - - r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); - if (r) { - ath_err(common, - "Unable to reset channel (%u MHz), reset status %d\n", - channel->center_freq, r); - } - - ath_complete_reset(sc, true); - - /* Enable LED */ - ath9k_hw_cfg_output(ah, ah->led_pin, - AR_GPIO_OUTPUT_MUX_AS_OUTPUT); - ath9k_hw_set_gpio(ah, ah->led_pin, 0); - - spin_unlock_bh(&sc->sc_pcu_lock); - - ath9k_ps_restore(sc); - } - - void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw) - { - struct ath_hw *ah = sc->sc_ah; - struct ieee80211_channel *channel = hw->conf.channel; - int r; - - ath9k_ps_wakeup(sc); - - ath_cancel_work(sc); - - spin_lock_bh(&sc->sc_pcu_lock); - - /* - * Keep the LED on when the radio is disabled - * during idle unassociated state. - */ - if (!sc->ps_idle) { - ath9k_hw_set_gpio(ah, ah->led_pin, 1); - ath9k_hw_cfg_gpio_input(ah, ah->led_pin); - } - - ath_prepare_reset(sc, false, true); - - if (!ah->curchan) - ah->curchan = ath9k_cmn_get_curchannel(hw, ah); - - r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); - if (r) { - ath_err(ath9k_hw_common(sc->sc_ah), - "Unable to reset channel (%u MHz), reset status %d\n", - channel->center_freq, r); - } - - ath9k_hw_phy_disable(ah); - - ath9k_hw_configpcipowersave(ah, true); - - spin_unlock_bh(&sc->sc_pcu_lock); - ath9k_ps_restore(sc); - } - static int ath_reset(struct ath_softc *sc, bool retry_tx) { int r; @@@ -1091,6 -1025,9 +1025,9 @@@ static int ath9k_start(struct ieee80211 * and then setup of the interrupt mask. */ spin_lock_bh(&sc->sc_pcu_lock); + + atomic_set(&ah->intr_ref_cnt, -1); + r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); if (r) { ath_err(common, @@@ -1117,6 -1054,9 +1054,9 @@@ if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) ah->imask |= ATH9K_INT_CST;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) + ah->imask |= ATH9K_INT_MCI; + sc->sc_flags &= ~SC_OP_INVALID; sc->sc_ah->is_monitoring = false;
@@@ -1129,12 -1069,25 +1069,25 @@@ goto mutex_unlock; }
+ if (ah->led_pin >= 0) { + ath9k_hw_cfg_output(ah, ah->led_pin, + AR_GPIO_OUTPUT_MUX_AS_OUTPUT); + ath9k_hw_set_gpio(ah, ah->led_pin, 0); + } + + /* + * Reset key cache to sane defaults (all entries cleared) instead of + * semi-random values after suspend/resume. + */ + ath9k_cmn_init_crypto(sc->sc_ah); + spin_unlock_bh(&sc->sc_pcu_lock);
if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) && !ah->btcoex_hw.enabled) { - ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, - AR_STOMP_LOW_WLAN_WGHT); + if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)) + ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT, + AR_STOMP_LOW_WLAN_WGHT); ath9k_hw_btcoex_enable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) @@@ -1173,6 -1126,13 +1126,13 @@@ static void ath9k_tx(struct ieee80211_h } }
+ /* + * Cannot tx while the hardware is in full sleep, it first needs a full + * chip reset to recover from that + */ + if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) + goto exit; + if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) { /* * We are using PS-Poll and mac80211 can request TX while in @@@ -1219,6 -1179,7 +1179,7 @@@ static void ath9k_stop(struct ieee80211 struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); + bool prev_idle;
mutex_lock(&sc->mutex);
@@@ -1237,6 -1198,7 +1198,7 @@@ ath9k_hw_btcoex_disable(ah); if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) ath9k_btcoex_timer_pause(sc); + ath_mci_flush_profile(&sc->btcoex.mci); }
spin_lock_bh(&sc->sc_pcu_lock); @@@ -1248,35 -1210,45 +1210,45 @@@ * before setting the invalid flag. */ ath9k_hw_disable_interrupts(ah);
- if (!(sc->sc_flags & SC_OP_INVALID)) { - ath_drain_all_txq(sc, false); - ath_stoprecv(sc); - ath9k_hw_phy_disable(ah); - } else - sc->rx.rxlink = NULL; + spin_unlock_bh(&sc->sc_pcu_lock); + + /* we can now sync irq and kill any running tasklets, since we already + * disabled interrupts and not holding a spin lock */ + synchronize_irq(sc->irq); + tasklet_kill(&sc->intr_tq); + tasklet_kill(&sc->bcon_tasklet); + + prev_idle = sc->ps_idle; + sc->ps_idle = true; + + spin_lock_bh(&sc->sc_pcu_lock); + + if (ah->led_pin >= 0) { + ath9k_hw_set_gpio(ah, ah->led_pin, 1); + ath9k_hw_cfg_gpio_input(ah, ah->led_pin); + } + + ath_prepare_reset(sc, false, true);
if (sc->rx.frag) { dev_kfree_skb_any(sc->rx.frag); sc->rx.frag = NULL; }
- /* disable HAL and put h/w to sleep */ - ath9k_hw_disable(ah); + if (!ah->curchan) + ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
- spin_unlock_bh(&sc->sc_pcu_lock); + ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); + ath9k_hw_phy_disable(ah);
- /* we can now sync irq and kill any running tasklets, since we already - * disabled interrupts and not holding a spin lock */ - synchronize_irq(sc->irq); - tasklet_kill(&sc->intr_tq); - tasklet_kill(&sc->bcon_tasklet); + ath9k_hw_configpcipowersave(ah, true);
- ath9k_ps_restore(sc); + spin_unlock_bh(&sc->sc_pcu_lock);
- sc->ps_idle = true; - ath_radio_disable(sc, hw); + ath9k_ps_restore(sc);
sc->sc_flags |= SC_OP_INVALID; + sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@@@ -1616,8 -1588,8 +1588,8 @@@ static int ath9k_config(struct ieee8021 struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &hw->conf; - bool disable_radio = false;
+ ath9k_ps_wakeup(sc); mutex_lock(&sc->mutex);
/* @@@ -1628,13 -1600,8 +1600,8 @@@ */ if (changed & IEEE80211_CONF_CHANGE_IDLE) { sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); - if (!sc->ps_idle) { - ath_radio_enable(sc, hw); - ath_dbg(common, ATH_DBG_CONFIG, - "not-idle: enabling radio\n"); - } else { - disable_radio = true; - } + if (sc->ps_idle) + ath_cancel_work(sc); }
/* @@@ -1741,18 -1708,12 +1708,12 @@@ ath_dbg(common, ATH_DBG_CONFIG, "Set power: %d\n", conf->power_level); sc->config.txpowlimit = 2 * conf->power_level; - ath9k_ps_wakeup(sc); ath9k_cmn_update_txpow(ah, sc->curtxpow, sc->config.txpowlimit, &sc->curtxpow); }
mutex_unlock(&sc->mutex); + ath9k_ps_restore(sc);
return 0; } @@@ -1798,7 -1759,7 +1759,7 @@@ static int ath9k_sta_add(struct ieee802 struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { };
- ath_node_attach(sc, sta); + ath_node_attach(sc, sta, vif);
if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_AP_VLAN) @@@ -2320,9 -2281,6 +2281,6 @@@ static void ath9k_flush(struct ieee8021 return; }
- if (drop) - timeout = 1; - for (j = 0; j < timeout; j++) { bool npend = false;
@@@ -2340,21 -2298,22 +2298,22 @@@ }
if (!npend) - goto out; + break; }
- ath9k_ps_wakeup(sc); - spin_lock_bh(&sc->sc_pcu_lock); - drain_txq = ath_drain_all_txq(sc, false); - spin_unlock_bh(&sc->sc_pcu_lock); + if (drop) { + ath9k_ps_wakeup(sc); + spin_lock_bh(&sc->sc_pcu_lock); + drain_txq = ath_drain_all_txq(sc, false); + spin_unlock_bh(&sc->sc_pcu_lock);
- if (!drain_txq) - ath_reset(sc, false); + if (!drain_txq) + ath_reset(sc, false);
- ath9k_ps_restore(sc); - ieee80211_wake_queues(hw); + ath9k_ps_restore(sc); + ieee80211_wake_queues(hw); + }
- out: ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); mutex_unlock(&sc->mutex); } diff --combined drivers/net/wireless/iwlwifi/iwl-agn-tx.c index df1540c,a1a95d5..67c66dc --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@@ -91,10 -91,7 +91,10 @@@ static void iwlagn_tx_cmd_build_basic(s tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; } else { - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + else + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; }
iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags); @@@ -286,6 -283,19 +286,19 @@@ int iwlagn_tx_skb(struct iwl_priv *priv IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); #endif
+ if (unlikely(ieee80211_is_probe_resp(fc))) { + struct iwl_wipan_noa_data *noa_data = + rcu_dereference(priv->noa_data); + + if (noa_data && + pskb_expand_head(skb, 0, noa_data->length, + GFP_ATOMIC) == 0) { + memcpy(skb_put(skb, noa_data->length), + noa_data->data, noa_data->length); + hdr = (struct ieee80211_hdr *)skb->data; + } + } + hdr_len = ieee80211_hdrlen(fc);
/* For management frames use broadcast id to do not break aggregation */ @@@ -780,6 -790,7 +793,7 @@@ int iwlagn_rx_reply_tx(struct iwl_priv iwl_rx_reply_tx_agg(priv, tx_resp);
if (tx_resp->frame_count == 1) { + IWL_DEBUG_TX_REPLY(priv, "Q %d, ssn %d", txq_id, ssn); __skb_queue_head_init(&skbs); /*we can free until ssn % q.n_bd not inclusive */ iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id, @@@ -803,7 -814,8 +817,8 @@@ iwl_is_associated_ctx(ctx) && ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION) { ctx->last_tx_rejected = true; - iwl_trans_stop_queue(trans(priv), txq_id); + iwl_trans_stop_queue(trans(priv), txq_id, + "Tx on passive channel");
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) " @@@ -909,11 -921,9 +924,9 @@@ int iwlagn_rx_reply_compressed_ba(struc ba_resp->sta_id); IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, " "scd_flow = %d, scd_ssn = %d\n", - ba_resp->tid, - ba_resp->seq_ctl, + ba_resp->tid, ba_resp->seq_ctl, (unsigned long long)le64_to_cpu(ba_resp->bitmap), - ba_resp->scd_flow, - ba_resp->scd_ssn); + scd_flow, ba_resp_scd_ssn);
/* Mark that the expected block-ack response arrived */ agg->wait_for_ba = false; diff --combined net/batman-adv/translation-table.c index 5f09a57,cc87acf..dc0779b --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@@ -36,18 -36,9 +36,9 @@@ static void _tt_global_del(struct bat_p static void tt_purge(struct work_struct *work);
/* returns 1 if they are the same mac addr */ - static int compare_ltt(const struct hlist_node *node, const void *data2) + static int compare_tt(const struct hlist_node *node, const void *data2) { - const void *data1 = container_of(node, struct tt_local_entry, - hash_entry); - - return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); - } - - /* returns 1 if they are the same mac addr */ - static int compare_gtt(const struct hlist_node *node, const void *data2) - { - const void *data1 = container_of(node, struct tt_global_entry, + const void *data1 = container_of(node, struct tt_common_entry, hash_entry);
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); @@@ -60,14 -51,13 +51,13 @@@ static void tt_start_timer(struct bat_p msecs_to_jiffies(5000)); }
- static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, - const void *data) + static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash, + const void *data) { - struct hashtable_t *hash = bat_priv->tt_local_hash; struct hlist_head *head; struct hlist_node *node; - struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL; - int index; + struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL; + uint32_t index;
if (!hash) return NULL; @@@ -76,51 -66,46 +66,46 @@@ head = &hash->table[index];
rcu_read_lock(); - hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) { - if (!compare_eth(tt_local_entry, data)) + hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { + if (!compare_eth(tt_common_entry, data)) continue;
- if (!atomic_inc_not_zero(&tt_local_entry->refcount)) + if (!atomic_inc_not_zero(&tt_common_entry->refcount)) continue;
- tt_local_entry_tmp = tt_local_entry; + tt_common_entry_tmp = tt_common_entry; break; } rcu_read_unlock();
- return tt_local_entry_tmp; + return tt_common_entry_tmp; }
- static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, - const void *data) + static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, + const void *data) { - struct hashtable_t *hash = bat_priv->tt_global_hash; - struct hlist_head *head; - struct hlist_node *node; - struct tt_global_entry *tt_global_entry; - struct tt_global_entry *tt_global_entry_tmp = NULL; - int index; - - if (!hash) - return NULL; - - index = choose_orig(data, hash->size); - head = &hash->table[index]; + struct tt_common_entry *tt_common_entry; + struct tt_local_entry *tt_local_entry = NULL;
- rcu_read_lock(); - hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) { - if (!compare_eth(tt_global_entry, data)) - continue; + tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data); + if (tt_common_entry) + tt_local_entry = container_of(tt_common_entry, + struct tt_local_entry, common); + return tt_local_entry; + }
- if (!atomic_inc_not_zero(&tt_global_entry->refcount)) - continue; + static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, + const void *data) + { + struct tt_common_entry *tt_common_entry; + struct tt_global_entry *tt_global_entry = NULL;
- tt_global_entry_tmp = tt_global_entry; - break; - } - rcu_read_unlock(); + tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data); + if (tt_common_entry) + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, common); + return tt_global_entry;
- return tt_global_entry_tmp; }
static bool is_out_of_time(unsigned long starting_time, unsigned long timeout) @@@ -133,15 -118,18 +118,18 @@@
static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry) { - if (atomic_dec_and_test(&tt_local_entry->refcount)) - kfree_rcu(tt_local_entry, rcu); + if (atomic_dec_and_test(&tt_local_entry->common.refcount)) + kfree_rcu(tt_local_entry, common.rcu); }
static void tt_global_entry_free_rcu(struct rcu_head *rcu) { + struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry;
- tt_global_entry = container_of(rcu, struct tt_global_entry, rcu); + tt_common_entry = container_of(rcu, struct tt_common_entry, rcu); + tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, + common);
if (tt_global_entry->orig_node) orig_node_free_ref(tt_global_entry->orig_node); @@@ -151,8 -139,9 +139,9 @@@
static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) { - if (atomic_dec_and_test(&tt_global_entry->refcount)) - call_rcu(&tt_global_entry->rcu, tt_global_entry_free_rcu); + if (atomic_dec_and_test(&tt_global_entry->common.refcount)) + call_rcu(&tt_global_entry->common.rcu, + tt_global_entry_free_rcu); }
static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, @@@ -201,6 -190,7 +190,7 @@@ void tt_local_add(struct net_device *so struct bat_priv *bat_priv = netdev_priv(soft_iface); struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; + int hash_added;
tt_local_entry = tt_local_hash_find(bat_priv, addr);
@@@ -217,26 -207,33 +207,33 @@@ "Creating new local tt entry: %pM (ttvn: %d)\n", addr, (uint8_t)atomic_read(&bat_priv->ttvn));
- memcpy(tt_local_entry->addr, addr, ETH_ALEN); - tt_local_entry->last_seen = jiffies; - tt_local_entry->flags = NO_FLAGS; + memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); + tt_local_entry->common.flags = NO_FLAGS; if (is_wifi_iface(ifindex)) - tt_local_entry->flags |= TT_CLIENT_WIFI; - atomic_set(&tt_local_entry->refcount, 2); + tt_local_entry->common.flags |= TT_CLIENT_WIFI; + atomic_set(&tt_local_entry->common.refcount, 2); + tt_local_entry->last_seen = jiffies;
/* the batman interface mac address should never be purged */ if (compare_eth(addr, soft_iface->dev_addr)) - tt_local_entry->flags |= TT_CLIENT_NOPURGE; + tt_local_entry->common.flags |= TT_CLIENT_NOPURGE; + + hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, + &tt_local_entry->common, + &tt_local_entry->common.hash_entry); + + if (unlikely(hash_added != 0)) { + /* remove the reference for the hash */ + tt_local_entry_free_ref(tt_local_entry); + goto out; + }
- tt_local_event(bat_priv, addr, tt_local_entry->flags); + tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
/* The local entry has to be marked as NEW to avoid to send it in * a full table response going out before the next ttvn increment * (consistency check) */ - tt_local_entry->flags |= TT_CLIENT_NEW; - - hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, - tt_local_entry, &tt_local_entry->hash_entry); + tt_local_entry->common.flags |= TT_CLIENT_NEW;
/* remove address from global hash if present */ tt_global_entry = tt_global_hash_find(bat_priv, addr); @@@ -245,12 -242,10 +242,12 @@@ if (tt_global_entry) { /* This node is probably going to update its tt table */ tt_global_entry->orig_node->tt_poss_change = true; - /* The global entry has to be marked as PENDING and has to be + /* The global entry has to be marked as ROAMING and has to be * kept for consistency purpose */ - tt_global_entry->flags |= TT_CLIENT_ROAM; - tt_global_entry->common.flags |= TT_CLIENT_PENDING; ++ tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->roam_at = jiffies; + - send_roam_adv(bat_priv, tt_global_entry->addr, + send_roam_adv(bat_priv, tt_global_entry->common.addr, tt_global_entry->orig_node); } out: @@@ -312,13 -307,12 +309,12 @@@ int tt_local_seq_print_text(struct seq_ struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); struct hashtable_t *hash = bat_priv->tt_local_hash; - struct tt_local_entry *tt_local_entry; + struct tt_common_entry *tt_common_entry; struct hard_iface *primary_if; struct hlist_node *node; struct hlist_head *head; - size_t buf_size, pos; - char *buff; - int i, ret = 0; + uint32_t i; + int ret = 0;
primary_if = primary_if_get_selected(bat_priv); if (!primary_if) { @@@ -339,51 -333,27 +335,27 @@@ "announced via TT (TTVN: %u):\n", net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
- buf_size = 1; - /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ - for (i = 0; i < hash->size; i++) { - head = &hash->table[i]; - - rcu_read_lock(); - __hlist_for_each_rcu(node, head) - buf_size += 29; - rcu_read_unlock(); - } - - buff = kmalloc(buf_size, GFP_ATOMIC); - if (!buff) { - ret = -ENOMEM; - goto out; - } - - buff[0] = '\0'; - pos = 0; - for (i = 0; i < hash->size; i++) { head = &hash->table[i];
rcu_read_lock(); - hlist_for_each_entry_rcu(tt_local_entry, node, + hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { - pos += snprintf(buff + pos, 30, " * %pM " - "[%c%c%c%c%c]\n", - tt_local_entry->addr, - (tt_local_entry->flags & + seq_printf(seq, " * %pM [%c%c%c%c%c]\n", + tt_common_entry->addr, + (tt_common_entry->flags & TT_CLIENT_ROAM ? 'R' : '.'), - (tt_local_entry->flags & + (tt_common_entry->flags & TT_CLIENT_NOPURGE ? 'P' : '.'), - (tt_local_entry->flags & + (tt_common_entry->flags & TT_CLIENT_NEW ? 'N' : '.'), - (tt_local_entry->flags & + (tt_common_entry->flags & TT_CLIENT_PENDING ? 'X' : '.'), - (tt_local_entry->flags & + (tt_common_entry->flags & TT_CLIENT_WIFI ? 'W' : '.')); } rcu_read_unlock(); } - - seq_printf(seq, "%s", buff); - kfree(buff); out: if (primary_if) hardif_free_ref(primary_if); @@@ -394,13 -364,13 +366,13 @@@ static void tt_local_set_pending(struc struct tt_local_entry *tt_local_entry, uint16_t flags) { - tt_local_event(bat_priv, tt_local_entry->addr, - tt_local_entry->flags | flags); + tt_local_event(bat_priv, tt_local_entry->common.addr, + tt_local_entry->common.flags | flags);
/* The local client has to be marked as "pending to be removed" but has * to be kept in the table in order to send it in a full table * response issued before the net ttvn increment (consistency check) */ - tt_local_entry->flags |= TT_CLIENT_PENDING; + tt_local_entry->common.flags |= TT_CLIENT_PENDING; }
void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, @@@ -416,7 -386,7 +388,7 @@@ (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: " - "%s\n", tt_local_entry->addr, message); + "%s\n", tt_local_entry->common.addr, message); out: if (tt_local_entry) tt_local_entry_free_ref(tt_local_entry); @@@ -426,23 -396,27 +398,27 @@@ static void tt_local_purge(struct bat_p { struct hashtable_t *hash = bat_priv->tt_local_hash; struct tt_local_entry *tt_local_entry; + struct tt_common_entry *tt_common_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - int i; + uint32_t i;
for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, hash_entry) { - if (tt_local_entry->flags & TT_CLIENT_NOPURGE) + tt_local_entry = container_of(tt_common_entry, + struct tt_local_entry, + common); + if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE) continue;
/* entry already marked for deletion */ - if (tt_local_entry->flags & TT_CLIENT_PENDING) + if (tt_local_entry->common.flags & TT_CLIENT_PENDING) continue;
if (!is_out_of_time(tt_local_entry->last_seen, @@@ -453,7 -427,7 +429,7 @@@ TT_CLIENT_DEL); bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) " "pending to be removed: timed out\n", - tt_local_entry->addr); + tt_local_entry->common.addr); } spin_unlock_bh(list_lock); } @@@ -464,10 -438,11 +440,11 @@@ static void tt_local_table_free(struct { struct hashtable_t *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ + struct tt_common_entry *tt_common_entry; struct tt_local_entry *tt_local_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; - int i; + uint32_t i;
if (!bat_priv->tt_local_hash) return; @@@ -479,9 -454,12 +456,12 @@@ list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, hash_entry) { hlist_del_rcu(node); + tt_local_entry = container_of(tt_common_entry, + struct tt_local_entry, + common); tt_local_entry_free_ref(tt_local_entry); } spin_unlock_bh(list_lock); @@@ -529,6 -507,7 +509,7 @@@ int tt_global_add(struct bat_priv *bat_ struct tt_global_entry *tt_global_entry; struct orig_node *orig_node_tmp; int ret = 0; + int hash_added;
tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
@@@ -539,18 -518,24 +520,24 @@@ if (!tt_global_entry) goto out;
- memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN); + memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); + tt_global_entry->common.flags = NO_FLAGS; + atomic_set(&tt_global_entry->common.refcount, 2); /* Assign the new orig_node */ atomic_inc(&orig_node->refcount); tt_global_entry->orig_node = orig_node; tt_global_entry->ttvn = ttvn; - tt_global_entry->flags = NO_FLAGS; tt_global_entry->roam_at = 0; - atomic_set(&tt_global_entry->refcount, 2);
- hash_add(bat_priv->tt_global_hash, compare_gtt, - choose_orig, tt_global_entry, - &tt_global_entry->hash_entry); + hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, + choose_orig, &tt_global_entry->common, + &tt_global_entry->common.hash_entry); + + if (unlikely(hash_added != 0)) { + /* remove the reference for the hash */ + tt_global_entry_free_ref(tt_global_entry); + goto out_remove; + } atomic_inc(&orig_node->tt_size); } else { if (tt_global_entry->orig_node != orig_node) { @@@ -561,20 -546,21 +548,21 @@@ orig_node_free_ref(orig_node_tmp); atomic_inc(&orig_node->tt_size); } + tt_global_entry->common.flags = NO_FLAGS; tt_global_entry->ttvn = ttvn; - tt_global_entry->flags = NO_FLAGS; tt_global_entry->roam_at = 0; }
if (wifi) - tt_global_entry->flags |= TT_CLIENT_WIFI; + tt_global_entry->common.flags |= TT_CLIENT_WIFI;
bat_dbg(DBG_TT, bat_priv, "Creating new global tt entry: %pM (via %pM)\n", - tt_global_entry->addr, orig_node->orig); + tt_global_entry->common.addr, orig_node->orig);
+ out_remove: /* remove address from local hash if present */ - tt_local_remove(bat_priv, tt_global_entry->addr, + tt_local_remove(bat_priv, tt_global_entry->common.addr, "global tt received", roaming); ret = 1; out: @@@ -588,13 -574,13 +576,13 @@@ int tt_global_seq_print_text(struct seq struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); struct hashtable_t *hash = bat_priv->tt_global_hash; + struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; struct hard_iface *primary_if; struct hlist_node *node; struct hlist_head *head; - size_t buf_size, pos; - char *buff; - int i, ret = 0; + uint32_t i; + int ret = 0;
primary_if = primary_if_get_selected(bat_priv); if (!primary_if) { @@@ -617,53 -603,32 +605,32 @@@ seq_printf(seq, " %-13s %s %-15s %s %s\n", "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
- buf_size = 1; - /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via - * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/ - for (i = 0; i < hash->size; i++) { - head = &hash->table[i]; - - rcu_read_lock(); - __hlist_for_each_rcu(node, head) - buf_size += 67; - rcu_read_unlock(); - } - - buff = kmalloc(buf_size, GFP_ATOMIC); - if (!buff) { - ret = -ENOMEM; - goto out; - } - - buff[0] = '\0'; - pos = 0; - for (i = 0; i < hash->size; i++) { head = &hash->table[i];
rcu_read_lock(); - hlist_for_each_entry_rcu(tt_global_entry, node, + hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { - pos += snprintf(buff + pos, 69, - " * %pM (%3u) via %pM (%3u) " - "[%c%c%c]\n", tt_global_entry->addr, + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); + seq_printf(seq, " * %pM (%3u) via %pM (%3u) " + "[%c%c%c]\n", + tt_global_entry->common.addr, tt_global_entry->ttvn, tt_global_entry->orig_node->orig, (uint8_t) atomic_read( &tt_global_entry->orig_node-> last_ttvn), - (tt_global_entry->flags & + (tt_global_entry->common.flags & TT_CLIENT_ROAM ? 'R' : '.'), - (tt_global_entry->flags & + (tt_global_entry->common.flags & TT_CLIENT_PENDING ? 'X' : '.'), - (tt_global_entry->flags & + (tt_global_entry->common.flags & TT_CLIENT_WIFI ? 'W' : '.')); } rcu_read_unlock(); } - - seq_printf(seq, "%s", buff); - kfree(buff); out: if (primary_if) hardif_free_ref(primary_if); @@@ -679,13 -644,13 +646,13 @@@ static void _tt_global_del(struct bat_p
bat_dbg(DBG_TT, bat_priv, "Deleting global tt entry %pM (via %pM): %s\n", - tt_global_entry->addr, tt_global_entry->orig_node->orig, + tt_global_entry->common.addr, tt_global_entry->orig_node->orig, message);
atomic_dec(&tt_global_entry->orig_node->tt_size);
- hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, - tt_global_entry->addr); + hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, + tt_global_entry->common.addr); out: if (tt_global_entry) tt_global_entry_free_ref(tt_global_entry); @@@ -696,7 -661,6 +663,7 @@@ void tt_global_del(struct bat_priv *bat const char *message, bool roaming) { struct tt_global_entry *tt_global_entry = NULL; + struct tt_local_entry *tt_local_entry = NULL;
tt_global_entry = tt_global_hash_find(bat_priv, addr); if (!tt_global_entry) @@@ -704,36 -668,23 +671,37 @@@
if (tt_global_entry->orig_node == orig_node) { if (roaming) { - tt_global_entry->common.flags |= TT_CLIENT_ROAM; - tt_global_entry->roam_at = jiffies; - goto out; + /* if we are deleting a global entry due to a roam + * event, there are two possibilities: + * 1) the client roamed from node A to node B => we mark + * it with TT_CLIENT_ROAM, we start a timer and we + * wait for node B to claim it. In case of timeout + * the entry is purged. + * 2) the client roamed to us => we can directly delete + * the global entry, since it is useless now. */ + tt_local_entry = tt_local_hash_find(bat_priv, - tt_global_entry->addr); ++ tt_global_entry->common.addr); + if (!tt_local_entry) { - tt_global_entry->flags |= TT_CLIENT_ROAM; ++ tt_global_entry->common.flags |= TT_CLIENT_ROAM; + tt_global_entry->roam_at = jiffies; + goto out; + } } _tt_global_del(bat_priv, tt_global_entry, message); } out: if (tt_global_entry) tt_global_entry_free_ref(tt_global_entry); + if (tt_local_entry) + tt_local_entry_free_ref(tt_local_entry); }
void tt_global_del_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, const char *message) { struct tt_global_entry *tt_global_entry; - int i; + struct tt_common_entry *tt_common_entry; + uint32_t i; struct hashtable_t *hash = bat_priv->tt_global_hash; struct hlist_node *node, *safe; struct hlist_head *head; @@@ -747,14 -698,18 +715,18 @@@ list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_global_entry, node, safe, + hlist_for_each_entry_safe(tt_common_entry, node, safe, head, hash_entry) { + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); if (tt_global_entry->orig_node == orig_node) { bat_dbg(DBG_TT, bat_priv, "Deleting global tt entry %pM " - "(via %pM): originator time out\n", - tt_global_entry->addr, - tt_global_entry->orig_node->orig); + "(via %pM): %s\n", + tt_global_entry->common.addr, + tt_global_entry->orig_node->orig, + message); hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); } @@@ -767,20 -722,24 +739,24 @@@ static void tt_global_roam_purge(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->tt_global_hash; + struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - int i; + uint32_t i;
for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, hash_entry) { - if (!(tt_global_entry->flags & TT_CLIENT_ROAM)) + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); + if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM)) continue; if (!is_out_of_time(tt_global_entry->roam_at, TT_CLIENT_ROAM_TIMEOUT * 1000)) @@@ -788,7 -747,7 +764,7 @@@
bat_dbg(DBG_TT, bat_priv, "Deleting global " "tt entry (%pM): Roaming timeout\n", - tt_global_entry->addr); + tt_global_entry->common.addr); atomic_dec(&tt_global_entry->orig_node->tt_size); hlist_del_rcu(node); tt_global_entry_free_ref(tt_global_entry); @@@ -802,10 -761,11 +778,11 @@@ static void tt_global_table_free(struc { struct hashtable_t *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ + struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; - int i; + uint32_t i;
if (!bat_priv->tt_global_hash) return; @@@ -817,9 -777,12 +794,12 @@@ list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_global_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, hash_entry) { hlist_del_rcu(node); + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); tt_global_entry_free_ref(tt_global_entry); } spin_unlock_bh(list_lock); @@@ -835,8 -798,8 +815,8 @@@ static bool _is_ap_isolated(struct tt_l { bool ret = false;
- if (tt_local_entry->flags & TT_CLIENT_WIFI && - tt_global_entry->flags & TT_CLIENT_WIFI) + if (tt_local_entry->common.flags & TT_CLIENT_WIFI && + tt_global_entry->common.flags & TT_CLIENT_WIFI) ret = true;
return ret; @@@ -869,7 -832,7 +849,7 @@@ struct orig_node *transtable_search(str
/* A global client marked as PENDING has already moved from that * originator */ - if (tt_global_entry->flags & TT_CLIENT_PENDING) + if (tt_global_entry->common.flags & TT_CLIENT_PENDING) goto out;
orig_node = tt_global_entry->orig_node; @@@ -888,29 -851,34 +868,34 @@@ uint16_t tt_global_crc(struct bat_priv { uint16_t total = 0, total_one; struct hashtable_t *hash = bat_priv->tt_global_hash; + struct tt_common_entry *tt_common_entry; struct tt_global_entry *tt_global_entry; struct hlist_node *node; struct hlist_head *head; - int i, j; + uint32_t i; + int j;
for (i = 0; i < hash->size; i++) { head = &hash->table[i];
rcu_read_lock(); - hlist_for_each_entry_rcu(tt_global_entry, node, + hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { + tt_global_entry = container_of(tt_common_entry, + struct tt_global_entry, + common); if (compare_eth(tt_global_entry->orig_node, orig_node)) { /* Roaming clients are in the global table for * consistency only. They don't have to be * taken into account while computing the * global crc */ - if (tt_global_entry->flags & TT_CLIENT_ROAM) + if (tt_common_entry->flags & TT_CLIENT_ROAM) continue; total_one = 0; for (j = 0; j < ETH_ALEN; j++) total_one = crc16_byte(total_one, - tt_global_entry->addr[j]); + tt_common_entry->addr[j]); total ^= total_one; } } @@@ -925,25 -893,26 +910,26 @@@ uint16_t tt_local_crc(struct bat_priv * { uint16_t total = 0, total_one; struct hashtable_t *hash = bat_priv->tt_local_hash; - struct tt_local_entry *tt_local_entry; + struct tt_common_entry *tt_common_entry; struct hlist_node *node; struct hlist_head *head; - int i, j; + uint32_t i; + int j;
for (i = 0; i < hash->size; i++) { head = &hash->table[i];
rcu_read_lock(); - hlist_for_each_entry_rcu(tt_local_entry, node, + hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { /* not yet committed clients have not to be taken into * account while computing the CRC */ - if (tt_local_entry->flags & TT_CLIENT_NEW) + if (tt_common_entry->flags & TT_CLIENT_NEW) continue; total_one = 0; for (j = 0; j < ETH_ALEN; j++) total_one = crc16_byte(total_one, - tt_local_entry->addr[j]); + tt_common_entry->addr[j]); total ^= total_one; } rcu_read_unlock(); @@@ -1032,21 -1001,25 +1018,25 @@@ unlock /* data_ptr is useless here, but has to be kept to respect the prototype */ static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr) { - const struct tt_local_entry *tt_local_entry = entry_ptr; + const struct tt_common_entry *tt_common_entry = entry_ptr;
- if (tt_local_entry->flags & TT_CLIENT_NEW) + if (tt_common_entry->flags & TT_CLIENT_NEW) return 0; return 1; }
static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr) { - const struct tt_global_entry *tt_global_entry = entry_ptr; + const struct tt_common_entry *tt_common_entry = entry_ptr; + const struct tt_global_entry *tt_global_entry; const struct orig_node *orig_node = data_ptr;
- if (tt_global_entry->flags & TT_CLIENT_ROAM) + if (tt_common_entry->flags & TT_CLIENT_ROAM) return 0;
+ tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, + common); + return (tt_global_entry->orig_node == orig_node); }
@@@ -1057,7 -1030,7 +1047,7 @@@ static struct sk_buff *tt_response_fill const void *), void *cb_data) { - struct tt_local_entry *tt_local_entry; + struct tt_common_entry *tt_common_entry; struct tt_query_packet *tt_response; struct tt_change *tt_change; struct hlist_node *node; @@@ -1065,7 -1038,7 +1055,7 @@@ struct sk_buff *skb = NULL; uint16_t tt_tot, tt_count; ssize_t tt_query_size = sizeof(struct tt_query_packet); - int i; + uint32_t i;
if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { tt_len = primary_if->soft_iface->mtu - tt_query_size; @@@ -1089,15 -1062,16 +1079,16 @@@ for (i = 0; i < hash->size; i++) { head = &hash->table[i];
- hlist_for_each_entry_rcu(tt_local_entry, node, + hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { if (tt_count == tt_tot) break;
- if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data))) + if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data))) continue;
- memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN); + memcpy(tt_change->addr, tt_common_entry->addr, + ETH_ALEN); tt_change->flags = NO_FLAGS;
tt_count++; @@@ -1204,11 -1178,11 +1195,11 @@@ static bool send_other_tt_response(stru (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */ - req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst); + req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst); if (!req_dst_orig_node) goto out;
- res_dst_orig_node = get_orig_node(bat_priv, tt_request->src); + res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src); if (!res_dst_orig_node) goto out;
@@@ -1334,7 -1308,7 +1325,7 @@@ static bool send_my_tt_response(struct my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); req_ttvn = tt_request->ttvn;
- orig_node = get_orig_node(bat_priv, tt_request->src); + orig_node = orig_hash_find(bat_priv, tt_request->src); if (!orig_node) goto out;
@@@ -1514,7 -1488,7 +1505,7 @@@ bool is_my_client(struct bat_priv *bat_ goto out; /* Check if the client has been logically deleted (but is kept for * consistency purpose) */ - if (tt_local_entry->flags & TT_CLIENT_PENDING) + if (tt_local_entry->common.flags & TT_CLIENT_PENDING) goto out; ret = true; out: @@@ -1737,45 -1711,53 +1728,53 @@@ void tt_free(struct bat_priv *bat_priv kfree(bat_priv->tt_buff); }
- /* This function will reset the specified flags from all the entries in - * the given hash table and will increment num_local_tt for each involved - * entry */ - static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags) + /* This function will enable or disable the specified flags for all the entries + * in the given hash table and returns the number of modified entries */ + static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags, + bool enable) { - int i; - struct hashtable_t *hash = bat_priv->tt_local_hash; + uint32_t i; + uint16_t changed_num = 0; struct hlist_head *head; struct hlist_node *node; - struct tt_local_entry *tt_local_entry; + struct tt_common_entry *tt_common_entry;
if (!hash) - return; + goto out;
for (i = 0; i < hash->size; i++) { head = &hash->table[i];
rcu_read_lock(); - hlist_for_each_entry_rcu(tt_local_entry, node, + hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { - if (!(tt_local_entry->flags & flags)) - continue; - tt_local_entry->flags &= ~flags; - atomic_inc(&bat_priv->num_local_tt); + if (enable) { + if ((tt_common_entry->flags & flags) == flags) + continue; + tt_common_entry->flags |= flags; + } else { + if (!(tt_common_entry->flags & flags)) + continue; + tt_common_entry->flags &= ~flags; + } + changed_num++; } rcu_read_unlock(); } - + out: + return changed_num; }
/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->tt_local_hash; + struct tt_common_entry *tt_common_entry; struct tt_local_entry *tt_local_entry; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* protects write access to the hash lists */ - int i; + uint32_t i;
if (!hash) return; @@@ -1785,16 -1767,19 +1784,19 @@@ list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock); - hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, + hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head, hash_entry) { - if (!(tt_local_entry->flags & TT_CLIENT_PENDING)) + if (!(tt_common_entry->flags & TT_CLIENT_PENDING)) continue;
bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry " - "(%pM): pending\n", tt_local_entry->addr); + "(%pM): pending\n", tt_common_entry->addr);
atomic_dec(&bat_priv->num_local_tt); hlist_del_rcu(node); + tt_local_entry = container_of(tt_common_entry, + struct tt_local_entry, + common); tt_local_entry_free_ref(tt_local_entry); } spin_unlock_bh(list_lock); @@@ -1804,7 -1789,11 +1806,11 @@@
void tt_commit_changes(struct bat_priv *bat_priv) { - tt_local_reset_flags(bat_priv, TT_CLIENT_NEW); + uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash, + TT_CLIENT_NEW, false); + /* all the reset entries have now to be effectively counted as local + * entries */ + atomic_add(changed_num, &bat_priv->num_local_tt); tt_local_purge_pending_clients(bat_priv);
/* Increment the TTVN only once per OGM interval */ diff --combined net/ipv4/ipip.c index 0b2e732,9490690..413ed1b --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@@ -148,7 -148,7 +148,7 @@@ struct pcpu_tstats unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; - }; + } __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ipip_get_stats(struct net_device *dev) { @@@ -285,8 -285,6 +285,8 @@@ static struct ip_tunnel * ipip_tunnel_l if (register_netdevice(dev) < 0) goto failed_free;
+ strcpy(nt->parms.name, dev->name); + dev_hold(dev); ipip_tunnel_link(ipn, nt); return nt; @@@ -761,6 -759,7 +761,6 @@@ static int ipip_tunnel_init(struct net_ struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev; - strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); @@@ -826,7 -825,6 +826,7 @@@ static void ipip_destroy_tunnels(struc static int __net_init ipip_init_net(struct net *net) { struct ipip_net *ipn = net_generic(net, ipip_net_id); + struct ip_tunnel *t; int err;
ipn->tunnels[0] = ipn->tunnels_wc; @@@ -850,9 -848,6 +850,9 @@@ if ((err = register_netdev(ipn->fb_tunnel_dev))) goto err_reg_dev;
+ t = netdev_priv(ipn->fb_tunnel_dev); + + strcpy(t->parms.name, ipn->fb_tunnel_dev->name); return 0;
err_reg_dev: diff --combined net/ipv6/addrconf.c index 36806de,94f3fd9..59a9d0e --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@@ -630,13 -630,13 +630,13 @@@ ipv6_add_addr(struct inet6_dev *idev, c goto out; }
- rt = addrconf_dst_alloc(idev, addr, 0); + rt = addrconf_dst_alloc(idev, addr, false); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto out; }
- ipv6_addr_copy(&ifa->addr, addr); + ifa->addr = *addr;
spin_lock_init(&ifa->lock); spin_lock_init(&ifa->state_lock); @@@ -657,7 -657,7 +657,7 @@@ * layer address of our nexhop router */
- if (dst_get_neighbour_raw(&rt->dst) == NULL) + if (dst_get_neighbour_noref_raw(&rt->dst) == NULL) ifa->flags &= ~IFA_F_OPTIMISTIC;
ifa->idev = idev; @@@ -1228,7 -1228,7 +1228,7 @@@ try_nextdev if (!hiscore->ifa) return -EADDRNOTAVAIL;
- ipv6_addr_copy(saddr, &hiscore->ifa->addr); + *saddr = hiscore->ifa->addr; in6_ifa_put(hiscore->ifa); return 0; } @@@ -1249,7 -1249,7 +1249,7 @@@ int ipv6_get_lladdr(struct net_device * list_for_each_entry(ifp, &idev->addr_list, if_list) { if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { - ipv6_addr_copy(addr, &ifp->addr); + *addr = ifp->addr; err = 0; break; } @@@ -1700,7 -1700,7 +1700,7 @@@ addrconf_prefix_route(struct in6_addr * .fc_protocol = RTPROT_KERNEL, };
- ipv6_addr_copy(&cfg.fc_dst, pfx); + cfg.fc_dst = *pfx;
/* Prevent useless cloning on PtP SIT. This thing is done here expecting that the whole @@@ -1805,8 -1805,7 +1805,8 @@@ static struct inet6_dev *addrconf_add_d return ERR_PTR(-EACCES);
/* Add default multicast route */ - addrconf_add_mroute(dev); + if (!(dev->flags & IFF_LOOPBACK)) + addrconf_add_mroute(dev);
/* Add link local route */ addrconf_add_lroute(dev); diff --combined net/ipv6/sit.c index 96f3623,b7d14cc..3b6dac9 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@@ -91,7 -91,7 +91,7 @@@ struct pcpu_tstats unsigned long rx_bytes; unsigned long tx_packets; unsigned long tx_bytes; - }; + } __attribute__((aligned(4*sizeof(unsigned long))));
static struct net_device_stats *ipip6_get_stats(struct net_device *dev) { @@@ -263,8 -263,6 +263,8 @@@ static struct ip_tunnel *ipip6_tunnel_l if (register_netdevice(dev) < 0) goto failed_free;
+ strcpy(nt->parms.name, dev->name); + dev_hold(dev);
ipip6_tunnel_link(sitn, nt); @@@ -682,7 -680,7 +682,7 @@@ static netdev_tx_t ipip6_tunnel_xmit(st struct neighbour *neigh = NULL;
if (skb_dst(skb)) - neigh = dst_get_neighbour(skb_dst(skb)); + neigh = dst_get_neighbour_noref(skb_dst(skb));
if (neigh == NULL) { if (net_ratelimit()) @@@ -707,7 -705,7 +707,7 @@@ struct neighbour *neigh = NULL;
if (skb_dst(skb)) - neigh = dst_get_neighbour(skb_dst(skb)); + neigh = dst_get_neighbour_noref(skb_dst(skb));
if (neigh == NULL) { if (net_ratelimit()) @@@ -916,7 -914,7 +916,7 @@@ ipip6_tunnel_ioctl (struct net_device * goto done; #ifdef CONFIG_IPV6_SIT_6RD } else { - ipv6_addr_copy(&ip6rd.prefix, &t->ip6rd.prefix); + ip6rd.prefix = t->ip6rd.prefix; ip6rd.relay_prefix = t->ip6rd.relay_prefix; ip6rd.prefixlen = t->ip6rd.prefixlen; ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen; @@@ -1084,7 -1082,7 +1084,7 @@@ if (relay_prefix != ip6rd.relay_prefix) goto done;
- ipv6_addr_copy(&t->ip6rd.prefix, &prefix); + t->ip6rd.prefix = prefix; t->ip6rd.relay_prefix = relay_prefix; t->ip6rd.prefixlen = ip6rd.prefixlen; t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen; @@@ -1146,6 -1144,7 +1146,6 @@@ static int ipip6_tunnel_init(struct net struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev; - strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); @@@ -1208,7 -1207,6 +1208,7 @@@ static void __net_exit sit_destroy_tunn static int __net_init sit_init_net(struct net *net) { struct sit_net *sitn = net_generic(net, sit_net_id); + struct ip_tunnel *t; int err;
sitn->tunnels[0] = sitn->tunnels_wc; @@@ -1233,9 -1231,6 +1233,9 @@@ if ((err = register_netdev(sitn->fb_tunnel_dev))) goto err_reg_dev;
+ t = netdev_priv(sitn->fb_tunnel_dev); + + strcpy(t->parms.name, sitn->fb_tunnel_dev->name); return 0;
err_reg_dev: diff --combined net/mac80211/agg-tx.c index 2e4b961,c45fa5d..7380287 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@@ -78,10 -78,13 +78,13 @@@ static void ieee80211_send_addba_reques memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (sdata->vif.type == NL80211_IFTYPE_AP || - sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + sdata->vif.type == NL80211_IFTYPE_AP_VLAN || + sdata->vif.type == NL80211_IFTYPE_MESH_POINT) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN); + else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) + memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); @@@ -185,6 -188,7 +188,7 @@@ int ___ieee80211_stop_tx_ba_session(str #endif /* CONFIG_MAC80211_HT_DEBUG */
del_timer_sync(&tid_tx->addba_resp_timer); + del_timer_sync(&tid_tx->session_timer);
/* * After this packets are no longer handed right through @@@ -303,38 -307,6 +307,38 @@@ ieee80211_wake_queue_agg(struct ieee802 __release(agg_queue); }
+/* + * splice packets from the STA's pending to the local pending, + * requires a call to ieee80211_agg_splice_finish later + */ +static void __acquires(agg_queue) +ieee80211_agg_splice_packets(struct ieee80211_local *local, + struct tid_ampdu_tx *tid_tx, u16 tid) +{ + int queue = ieee80211_ac_from_tid(tid); + unsigned long flags; + + ieee80211_stop_queue_agg(local, tid); + + if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" + " from the pending queue\n", tid)) + return; + + if (!skb_queue_empty(&tid_tx->pending)) { + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + /* copy over remaining packets */ + skb_queue_splice_tail_init(&tid_tx->pending, + &local->pending[queue]); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + } +} + +static void __releases(agg_queue) +ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) +{ + ieee80211_wake_queue_agg(local, tid); +} + void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) { struct tid_ampdu_tx *tid_tx; @@@ -346,17 -318,19 +350,17 @@@ tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/* - * While we're asking the driver about the aggregation, - * stop the AC queue so that we don't have to worry - * about frames that came in while we were doing that, - * which would require us to put them to the AC pending - * afterwards which just makes the code more complex. + * Start queuing up packets for this aggregation session. + * We're going to release them once the driver is OK with + * that. */ clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
/* - * make sure no packets are being processed to get - * valid starting sequence number + * Make sure no packets are being processed. This ensures that + * we have a valid starting sequence number and that in-flight + * packets have been flushed out and no packets for this TID + * will go into the driver during the ampdu_action call. */ synchronize_net();
@@@ -370,15 -344,17 +374,15 @@@ " tid %d\n", tid); #endif spin_lock_bh(&sta->lock); + ieee80211_agg_splice_packets(local, tid_tx, tid); ieee80211_assign_tid_tx(sta, tid, NULL); + ieee80211_agg_splice_finish(local, tid); spin_unlock_bh(&sta->lock);
- ieee80211_wake_queue_agg(local, tid); kfree_rcu(tid_tx, rcu_head); return; }
- /* we can take packets again now */ - ieee80211_wake_queue_agg(local, tid); - /* activate the timer for the recipient's addBA response */ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); #ifdef CONFIG_MAC80211_HT_DEBUG @@@ -396,6 -372,28 +400,28 @@@ tid_tx->timeout); }
+ /* + * After accepting the AddBA Response we activated a timer, + * resetting it after each frame that we send. + */ + static void sta_tx_agg_session_timer_expired(unsigned long data) + { + /* not an elegant detour, but there is no choice as the timer passes + * only one argument, and various sta_info are needed here, so init + * flow in sta_info_create gives the TID as data, while the timer_to_id + * array gives the sta through container_of */ + u8 *ptid = (u8 *)data; + u8 *timer_to_id = ptid - *ptid; + struct sta_info *sta = container_of(timer_to_id, struct sta_info, + timer_to_tid[0]); + + #ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid); + #endif + + ieee80211_stop_tx_ba_session(&sta->sta, *ptid); + } + int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, u16 timeout) { @@@ -420,15 -418,11 +446,11 @@@ pubsta->addr, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */
- /* - * The aggregation code is not prepared to handle - * anything but STA/AP due to the BSSID handling. - * IBSS could work in the code but isn't supported - * by drivers or the standard. - */ if (sdata->vif.type != NL80211_IFTYPE_STATION && + sdata->vif.type != NL80211_IFTYPE_MESH_POINT && sdata->vif.type != NL80211_IFTYPE_AP_VLAN && - sdata->vif.type != NL80211_IFTYPE_AP) + sdata->vif.type != NL80211_IFTYPE_AP && + sdata->vif.type != NL80211_IFTYPE_ADHOC) return -EINVAL;
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { @@@ -439,6 -433,27 +461,27 @@@ return -EINVAL; }
+ /* + * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a + * member of an IBSS, and has no other existing Block Ack agreement + * with the recipient STA, then the initiating STA shall transmit a + * Probe Request frame to the recipient STA and shall not transmit an + * ADDBA Request frame unless it receives a Probe Response frame + * from the recipient within dot11ADDBAFailureTimeout. + * + * The probe request mechanism for ADDBA is currently not implemented, + * but we only build up Block Ack session with HT STAs. This information + * is set when we receive a bss info from a probe response or a beacon. + */ + if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC && + !sta->sta.ht_cap.ht_supported) { + #ifdef CONFIG_MAC80211_HT_DEBUG + printk(KERN_DEBUG "BA request denied - IBSS STA %pM" + "does not advertise HT support\n", pubsta->addr); + #endif /* CONFIG_MAC80211_HT_DEBUG */ + return -EINVAL; + } + spin_lock_bh(&sta->lock);
/* we have tried too many times, receiver does not want A-MPDU */ @@@ -470,11 -485,16 +513,16 @@@
tid_tx->timeout = timeout;
- /* Tx timer */ + /* response timer */ tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; init_timer(&tid_tx->addba_resp_timer);
+ /* tx timer */ + tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; + tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; + init_timer(&tid_tx->session_timer); + /* assign a dialog token */ sta->ampdu_mlme.dialog_token_allocator++; tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; @@@ -494,6 -514,38 +542,6 @@@ } EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
-/* - * splice packets from the STA's pending to the local pending, - * requires a call to ieee80211_agg_splice_finish later - */ -static void __acquires(agg_queue) -ieee80211_agg_splice_packets(struct ieee80211_local *local, - struct tid_ampdu_tx *tid_tx, u16 tid) -{ - int queue = ieee80211_ac_from_tid(tid); - unsigned long flags; - - ieee80211_stop_queue_agg(local, tid); - - if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" - " from the pending queue\n", tid)) - return; - - if (!skb_queue_empty(&tid_tx->pending)) { - spin_lock_irqsave(&local->queue_stop_reason_lock, flags); - /* copy over remaining packets */ - skb_queue_splice_tail_init(&tid_tx->pending, - &local->pending[queue]); - spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); - } -} - -static void __releases(agg_queue) -ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) -{ - ieee80211_wake_queue_agg(local, tid); -} - static void ieee80211_agg_tx_operational(struct ieee80211_local *local, struct sta_info *sta, u16 tid) { @@@ -547,7 -599,7 +595,7 @@@ void ieee80211_start_tx_ba_cb(struct ie }
mutex_lock(&local->sta_mtx); - sta = sta_info_get(sdata, ra); + sta = sta_info_get_bss(sdata, ra); if (!sta) { mutex_unlock(&local->sta_mtx); #ifdef CONFIG_MAC80211_HT_DEBUG @@@ -676,7 -728,7 +724,7 @@@ void ieee80211_stop_tx_ba_cb(struct iee
mutex_lock(&local->sta_mtx);
- sta = sta_info_get(sdata, ra); + sta = sta_info_get_bss(sdata, ra); if (!sta) { #ifdef CONFIG_MAC80211_HT_DEBUG printk(KERN_DEBUG "Could not find station: %pM\n", ra); @@@ -814,6 -866,11 +862,11 @@@ void ieee80211_process_addba_resp(struc ieee80211_agg_tx_operational(local, sta, tid);
sta->ampdu_mlme.addba_req_num[tid] = 0; + + if (tid_tx->timeout) + mod_timer(&tid_tx->session_timer, + TU_TO_EXP_TIME(tid_tx->timeout)); + } else { ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, true); diff --combined net/nfc/nci/core.c index ea660344,37de28e..c55f233 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@@ -25,6 -25,8 +25,8 @@@ * */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/types.h> #include <linux/workqueue.h> #include <linux/completion.h> @@@ -69,7 -71,7 +71,7 @@@ static int __nci_request(struct nci_de __u32 timeout) { int rc = 0; - unsigned long completion_rc; + long completion_rc;
ndev->req_status = NCI_REQ_PEND;
@@@ -79,7 -81,7 +81,7 @@@ &ndev->req_completion, timeout);
- nfc_dbg("wait_for_completion return %ld", completion_rc); + pr_debug("wait_for_completion return %ld\n", completion_rc);
if (completion_rc > 0) { switch (ndev->req_status) { @@@ -96,8 -98,8 +98,8 @@@ break; } } else { - nfc_err("wait_for_completion_interruptible_timeout failed %ld", - completion_rc); + pr_err("wait_for_completion_interruptible_timeout failed %ld\n", + completion_rc);
rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc)); } @@@ -126,7 -128,10 +128,10 @@@ static inline int nci_request(struct nc
static void nci_reset_req(struct nci_dev *ndev, unsigned long opt) { - nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL); + struct nci_core_reset_cmd cmd; + + cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG; + nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd); }
static void nci_init_req(struct nci_dev *ndev, unsigned long opt) @@@ -136,17 -141,11 +141,11 @@@
static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt) { - struct nci_core_conn_create_cmd conn_cmd; struct nci_rf_disc_map_cmd cmd; struct disc_map_config *cfg = cmd.mapping_configs; __u8 *num = &cmd.num_mapping_configs; int i;
- /* create static rf connection */ - conn_cmd.target_handle = 0; - conn_cmd.num_target_specific_params = 0; - nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd); - /* set rf mapping configurations */ *num = 0;
@@@ -326,8 -325,6 +325,6 @@@ static void nci_cmd_timer(unsigned lon { struct nci_dev *ndev = (void *) arg;
- nfc_dbg("entry"); - atomic_set(&ndev->cmd_cnt, 1); queue_work(ndev->cmd_wq, &ndev->cmd_work); } @@@ -336,8 -333,6 +333,6 @@@ static int nci_dev_up(struct nfc_dev *n { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry"); - return nci_open_device(ndev); }
@@@ -345,8 -340,6 +340,6 @@@ static int nci_dev_down(struct nfc_dev { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry"); - return nci_close_device(ndev); }
@@@ -355,20 -348,18 +348,18 @@@ static int nci_start_poll(struct nfc_de struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc;
- nfc_dbg("entry"); - if (test_bit(NCI_DISCOVERY, &ndev->flags)) { - nfc_err("unable to start poll, since poll is already active"); + pr_err("unable to start poll, since poll is already active\n"); return -EBUSY; }
if (ndev->target_active_prot) { - nfc_err("there is an active target"); + pr_err("there is an active target\n"); return -EBUSY; }
if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { - nfc_dbg("target is active, implicitly deactivate..."); + pr_debug("target is active, implicitly deactivate...\n");
rc = nci_request(ndev, nci_rf_deactivate_req, 0, msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT)); @@@ -389,10 -380,8 +380,8 @@@ static void nci_stop_poll(struct nfc_de { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry"); - if (!test_bit(NCI_DISCOVERY, &ndev->flags)) { - nfc_err("unable to stop poll, since poll is not active"); + pr_err("unable to stop poll, since poll is not active\n"); return; }
@@@ -405,21 -394,21 +394,21 @@@ static int nci_activate_target(struct n { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry, target_idx %d, protocol 0x%x", target_idx, protocol); + pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) { - nfc_err("there is no available target to activate"); + pr_err("there is no available target to activate\n"); return -EINVAL; }
if (ndev->target_active_prot) { - nfc_err("there is already an active target"); + pr_err("there is already an active target\n"); return -EBUSY; }
if (!(ndev->target_available_prots & (1 << protocol))) { - nfc_err("target does not support the requested protocol 0x%x", - protocol); + pr_err("target does not support the requested protocol 0x%x\n", + protocol); return -EINVAL; }
@@@ -433,10 -422,10 +422,10 @@@ static void nci_deactivate_target(struc { struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
- nfc_dbg("entry, target_idx %d", target_idx); + pr_debug("target_idx %d\n", target_idx);
if (!ndev->target_active_prot) { - nfc_err("unable to deactivate target, no active target"); + pr_err("unable to deactivate target, no active target\n"); return; }
@@@ -456,10 -445,10 +445,10 @@@ static int nci_data_exchange(struct nfc struct nci_dev *ndev = nfc_get_drvdata(nfc_dev); int rc;
- nfc_dbg("entry, target_idx %d, len %d", target_idx, skb->len); + pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
if (!ndev->target_active_prot) { - nfc_err("unable to exchange data, no active target"); + pr_err("unable to exchange data, no active target\n"); return -EINVAL; }
@@@ -470,7 -459,7 +459,7 @@@ ndev->data_exchange_cb = cb; ndev->data_exchange_cb_context = cb_context;
- rc = nci_send_data(ndev, ndev->conn_id, skb); + rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb); if (rc) clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
@@@ -502,7 -491,7 +491,7 @@@ struct nci_dev *nci_allocate_device(str { struct nci_dev *ndev;
- nfc_dbg("entry, supported_protocols 0x%x", supported_protocols); + pr_debug("supported_protocols 0x%x\n", supported_protocols);
if (!ops->open || !ops->close || !ops->send) return NULL; @@@ -542,8 -531,6 +531,6 @@@ EXPORT_SYMBOL(nci_allocate_device) */ void nci_free_device(struct nci_dev *ndev) { - nfc_dbg("entry"); - nfc_free_device(ndev->nfc_dev); kfree(ndev); } @@@ -560,8 -547,6 +547,6 @@@ int nci_register_device(struct nci_dev struct device *dev = &ndev->nfc_dev->dev; char name[32];
- nfc_dbg("entry"); - rc = nfc_register_device(ndev->nfc_dev); if (rc) goto exit; @@@ -624,8 -609,6 +609,6 @@@ EXPORT_SYMBOL(nci_register_device) */ void nci_unregister_device(struct nci_dev *ndev) { - nfc_dbg("entry"); - nci_close_device(ndev);
destroy_workqueue(ndev->cmd_wq); @@@ -645,7 -628,7 +628,7 @@@ int nci_recv_frame(struct sk_buff *skb { struct nci_dev *ndev = (struct nci_dev *) skb->dev;
- nfc_dbg("entry, len %d", skb->len); + pr_debug("len %d\n", skb->len);
if (!ndev || (!test_bit(NCI_UP, &ndev->flags) && !test_bit(NCI_INIT, &ndev->flags))) { @@@ -665,7 -648,7 +648,7 @@@ static int nci_send_frame(struct sk_buf { struct nci_dev *ndev = (struct nci_dev *) skb->dev;
- nfc_dbg("entry, len %d", skb->len); + pr_debug("len %d\n", skb->len);
if (!ndev) { kfree_skb(skb); @@@ -684,11 -667,11 +667,11 @@@ int nci_send_cmd(struct nci_dev *ndev, struct nci_ctrl_hdr *hdr; struct sk_buff *skb;
- nfc_dbg("entry, opcode 0x%x, plen %d", opcode, plen); + pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL); if (!skb) { - nfc_err("no memory for command"); + pr_err("no memory for command\n"); return -ENOMEM; }
@@@ -718,7 -701,7 +701,7 @@@ static void nci_tx_work(struct work_str struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work); struct sk_buff *skb;
- nfc_dbg("entry, credits_cnt %d", atomic_read(&ndev->credits_cnt)); + pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
/* Send queued tx data */ while (atomic_read(&ndev->credits_cnt)) { @@@ -726,12 -709,15 +709,15 @@@ if (!skb) return;
- atomic_dec(&ndev->credits_cnt); + /* Check if data flow control is used */ + if (atomic_read(&ndev->credits_cnt) != + NCI_DATA_FLOW_CONTROL_NOT_USED) + atomic_dec(&ndev->credits_cnt);
- nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d", - nci_pbf(skb->data), - nci_conn_id(skb->data), - nci_plen(skb->data)); + pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n", + nci_pbf(skb->data), + nci_conn_id(skb->data), + nci_plen(skb->data));
nci_send_frame(skb); } @@@ -760,7 -746,7 +746,7 @@@ static void nci_rx_work(struct work_str break;
default: - nfc_err("unknown MT 0x%x", nci_mt(skb->data)); + pr_err("unknown MT 0x%x\n", nci_mt(skb->data)); kfree_skb(skb); break; } @@@ -774,7 -760,7 +760,7 @@@ static void nci_cmd_work(struct work_st struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work); struct sk_buff *skb;
- nfc_dbg("entry, cmd_cnt %d", atomic_read(&ndev->cmd_cnt)); + pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
/* Send queued command */ if (atomic_read(&ndev->cmd_cnt)) { @@@ -784,11 -770,11 +770,11 @@@
atomic_dec(&ndev->cmd_cnt);
- nfc_dbg("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d", - nci_pbf(skb->data), - nci_opcode_gid(nci_opcode(skb->data)), - nci_opcode_oid(nci_opcode(skb->data)), - nci_plen(skb->data)); + pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n", + nci_pbf(skb->data), + nci_opcode_gid(nci_opcode(skb->data)), + nci_opcode_oid(nci_opcode(skb->data)), + nci_plen(skb->data));
nci_send_frame(skb);
diff --combined net/sched/sch_gred.c index 6cd8ddf,a1b7407..1b5e631 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@@ -34,7 -34,7 +34,7 @@@ struct gred_sched
struct gred_sched_data { u32 limit; /* HARD maximal queue length */ - u32 DP; /* the drop pramaters */ + u32 DP; /* the drop parameters */ u32 bytesin; /* bytes seen on virtualQ so far*/ u32 packetsin; /* packets seen on virtualQ so far*/ u32 backlog; /* bytes on the virtualQ */ @@@ -379,13 -379,14 +379,14 @@@ static inline int gred_change_table_def }
static inline int gred_change_vq(struct Qdisc *sch, int dp, - struct tc_gred_qopt *ctl, int prio, u8 *stab) + struct tc_gred_qopt *ctl, int prio, + u8 *stab, u32 max_P) { struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q;
if (table->tab[dp] == NULL) { - table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); + table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC); if (table->tab[dp] == NULL) return -ENOMEM; } @@@ -400,7 -401,7 +401,7 @@@
red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, - ctl->Scell_log, stab); + ctl->Scell_log, stab, max_P);
return 0; } @@@ -409,6 -410,7 +410,7 @@@ static const struct nla_policy gred_pol [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) }, [TCA_GRED_STAB] = { .len = 256 }, [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) }, + [TCA_GRED_MAX_P] = { .type = NLA_U32 }, };
static int gred_change(struct Qdisc *sch, struct nlattr *opt) @@@ -418,6 -420,7 +420,7 @@@ struct nlattr *tb[TCA_GRED_MAX + 1]; int err, prio = GRED_DEF_PRIO; u8 *stab; + u32 max_P;
if (opt == NULL) return -EINVAL; @@@ -433,6 -436,8 +436,8 @@@ tb[TCA_GRED_STAB] == NULL) return -EINVAL;
+ max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; + err = -EINVAL; ctl = nla_data(tb[TCA_GRED_PARMS]); stab = nla_data(tb[TCA_GRED_STAB]); @@@ -457,7 -462,7 +462,7 @@@
sch_tree_lock(sch);
- err = gred_change_vq(sch, ctl->DP, ctl, prio, stab); + err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P); if (err < 0) goto errout_locked;
@@@ -498,6 -503,7 +503,7 @@@ static int gred_dump(struct Qdisc *sch struct gred_sched *table = qdisc_priv(sch); struct nlattr *parms, *opts = NULL; int i; + u32 max_p[MAX_DPs]; struct tc_gred_sopt sopt = { .DPs = table->DPs, .def_DP = table->def, @@@ -509,6 -515,14 +515,14 @@@ if (opts == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); + + for (i = 0; i < MAX_DPs; i++) { + struct gred_sched_data *q = table->tab[i]; + + max_p[i] = q ? q->parms.max_P : 0; + } + NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p); + parms = nla_nest_start(skb, TCA_GRED_PARMS); if (parms == NULL) goto nla_put_failure; diff --combined net/sunrpc/svc.c index e9632bb,9d01d46..a3797c0 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@@ -527,20 -527,17 +527,20 @@@ svc_destroy(struct svc_serv *serv printk("svc_destroy: no threads for serv=%p!\n", serv);
del_timer_sync(&serv->sv_temptimer); - - svc_close_all(&serv->sv_tempsocks); + /* + * The set of xprts (contained in the sv_tempsocks and + * sv_permsocks lists) is now constant, since it is modified + * only by accepting new sockets (done by service threads in + * svc_recv) or aging old ones (done by sv_temptimer), or + * configuration changes (excluded by whatever locking the + * caller is using--nfsd_mutex in the case of nfsd). So it's + * safe to traverse those lists and shut everything down: + */ + svc_close_all(serv);
if (serv->sv_shutdown) serv->sv_shutdown(serv);
- svc_close_all(&serv->sv_permsocks); - - BUG_ON(!list_empty(&serv->sv_permsocks)); - BUG_ON(!list_empty(&serv->sv_tempsocks)); - cache_clean_deferred(serv);
if (svc_serv_is_pooled(serv)) @@@ -686,8 -683,8 +686,8 @@@ found_pool * Create or destroy enough new threads to make the number * of threads the given number. If `pool' is non-NULL, applies * only to threads in that pool, otherwise round-robins between - * all pools. Must be called with a svc_get() reference and - * the BKL or another lock to protect access to svc_serv fields. + * all pools. Caller must ensure that mutual exclusion between this and + * server startup or shutdown. * * Destroying threads relies on the service threads filling in * rqstp->rq_task, which only the nfs ones do. Assumes the serv @@@ -829,7 -826,7 +829,7 @@@ static int __svc_rpcb_register4(const u return error; }
- #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + #if IS_ENABLED(CONFIG_IPV6) /* * Register an "inet6" protocol family netid with the local * rpcbind daemon via an rpcbind v4 SET request. @@@ -875,7 -872,7 +875,7 @@@ static int __svc_rpcb_register6(const u
return error; } - #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ + #endif /* IS_ENABLED(CONFIG_IPV6) */
/* * Register a kernel RPC service via rpcbind version 4. @@@ -896,11 -893,11 +896,11 @@@ static int __svc_register(const char *p error = __svc_rpcb_register4(program, version, protocol, port); break; - #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: error = __svc_rpcb_register6(program, version, protocol, port); - #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ + #endif }
if (error < 0) diff --combined net/sunrpc/svc_xprt.c index 0633c7e,38649cf..74cb0d8 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@@ -22,7 -22,6 +22,7 @@@ static struct svc_deferred_req *svc_def static int svc_deferred_recv(struct svc_rqst *rqstp); static struct cache_deferred_req *svc_defer(struct cache_req *req); static void svc_age_temp_xprts(unsigned long closure); +static void svc_delete_xprt(struct svc_xprt *xprt);
/* apparently the "standard" is that clients close * idle connections after 5 minutes, servers after @@@ -148,8 -147,8 +148,8 @@@ EXPORT_SYMBOL_GPL(svc_xprt_put) * Called by transport drivers to initialize the transport independent * portion of the transport instance. */ -void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, - struct svc_serv *serv) +void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, + struct svc_xprt *xprt, struct svc_serv *serv) { memset(xprt, 0, sizeof(*xprt)); xprt->xpt_class = xcl; @@@ -164,7 -163,7 +164,7 @@@ spin_lock_init(&xprt->xpt_lock); set_bit(XPT_BUSY, &xprt->xpt_flags); rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); - xprt->xpt_net = get_net(&init_net); + xprt->xpt_net = get_net(net); } EXPORT_SYMBOL_GPL(svc_xprt_init);
@@@ -180,13 -179,13 +180,13 @@@ static struct svc_xprt *__svc_xpo_creat .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; - #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 sin6 = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, .sin6_port = htons(port), }; - #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ + #endif struct sockaddr *sap; size_t len;
@@@ -195,12 -194,12 +195,12 @@@ sap = (struct sockaddr *)&sin; len = sizeof(sin); break; - #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: sap = (struct sockaddr *)&sin6; len = sizeof(sin6); break; - #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ + #endif default: return ERR_PTR(-EAFNOSUPPORT); } @@@ -879,7 -878,7 +879,7 @@@ static void call_xpt_users(struct svc_x /* * Remove a dead transport */ -void svc_delete_xprt(struct svc_xprt *xprt) +static void svc_delete_xprt(struct svc_xprt *xprt) { struct svc_serv *serv = xprt->xpt_server; struct svc_deferred_req *dr; @@@ -894,7 -893,14 +894,7 @@@ spin_lock_bh(&serv->sv_lock); if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) list_del_init(&xprt->xpt_list); - /* - * The only time we're called while xpt_ready is still on a list - * is while the list itself is about to be destroyed (in - * svc_destroy). BUT svc_xprt_enqueue could still be attempting - * to add new entries to the sp_sockets list, so we can't leave - * a freed xprt on it. - */ - list_del_init(&xprt->xpt_ready); + BUG_ON(!list_empty(&xprt->xpt_ready)); if (test_bit(XPT_TEMP, &xprt->xpt_flags)) serv->sv_tmpcnt--; spin_unlock_bh(&serv->sv_lock); @@@ -922,48 -928,22 +922,48 @@@ void svc_close_xprt(struct svc_xprt *xp } EXPORT_SYMBOL_GPL(svc_close_xprt);
-void svc_close_all(struct list_head *xprt_list) +static void svc_close_list(struct list_head *xprt_list) +{ + struct svc_xprt *xprt; + + list_for_each_entry(xprt, xprt_list, xpt_list) { + set_bit(XPT_CLOSE, &xprt->xpt_flags); + set_bit(XPT_BUSY, &xprt->xpt_flags); + } +} + +void svc_close_all(struct svc_serv *serv) { + struct svc_pool *pool; struct svc_xprt *xprt; struct svc_xprt *tmp; + int i; + + svc_close_list(&serv->sv_tempsocks); + svc_close_list(&serv->sv_permsocks);
+ for (i = 0; i < serv->sv_nrpools; i++) { + pool = &serv->sv_pools[i]; + + spin_lock_bh(&pool->sp_lock); + while (!list_empty(&pool->sp_sockets)) { + xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready); + list_del_init(&xprt->xpt_ready); + } + spin_unlock_bh(&pool->sp_lock); + } /* - * The server is shutting down, and no more threads are running. - * svc_xprt_enqueue() might still be running, but at worst it - * will re-add the xprt to sp_sockets, which will soon get - * freed. So we don't bother with any more locking, and don't - * leave the close to the (nonexistent) server threads: + * At this point the sp_sockets lists will stay empty, since + * svc_enqueue will not add new entries without taking the + * sp_lock and checking XPT_BUSY. */ - list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { - set_bit(XPT_CLOSE, &xprt->xpt_flags); + list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list) svc_delete_xprt(xprt); - } + list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list) + svc_delete_xprt(xprt); + + BUG_ON(!list_empty(&serv->sv_permsocks)); + BUG_ON(!list_empty(&serv->sv_tempsocks)); }
/* diff --combined net/sunrpc/svcsock.c index 277909e,4653286..4645709 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@@ -157,7 -157,7 +157,7 @@@ static void svc_set_cmsg_data(struct sv cmh->cmsg_level = SOL_IPV6; cmh->cmsg_type = IPV6_PKTINFO; pki->ipi6_ifindex = daddr->sin6_scope_id; - ipv6_addr_copy(&pki->ipi6_addr, &daddr->sin6_addr); + pki->ipi6_addr = daddr->sin6_addr; cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); } break; @@@ -523,7 -523,7 +523,7 @@@ static int svc_udp_get_dest_address6(st return 0;
daddr->sin6_family = AF_INET6; - ipv6_addr_copy(&daddr->sin6_addr, &pki->ipi6_addr); + daddr->sin6_addr = pki->ipi6_addr; daddr->sin6_scope_id = pki->ipi6_ifindex; return 1; } @@@ -739,8 -739,7 +739,8 @@@ static void svc_udp_init(struct svc_soc { int err, level, optname, one = 1;
- svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv); + svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class, + &svsk->sk_xprt, serv); clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); svsk->sk_sk->sk_data_ready = svc_udp_data_ready; svsk->sk_sk->sk_write_space = svc_write_space; @@@ -1344,8 -1343,7 +1344,8 @@@ static void svc_tcp_init(struct svc_soc { struct sock *sk = svsk->sk_sk;
- svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv); + svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class, + &svsk->sk_xprt, serv); set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); if (sk->sk_state == TCP_LISTEN) { dprintk("setting up TCP socket for listening\n"); @@@ -1661,7 -1659,7 +1661,7 @@@ static struct svc_xprt *svc_bc_create_s return ERR_PTR(-ENOMEM);
xprt = &svsk->sk_xprt; - svc_xprt_init(&svc_tcp_bc_class, xprt, serv); + svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv);
serv->sv_bc_xprt = xprt;
linux-merge@lists.open-mesh.org