The following commit has been merged in the master branch: commit 4d2962632e83acf8659af33f635e1c27bfe64198 Merge: c76805da93ab876c87f2755928eacc6cf0750d1d 9323b239953605071866899e675c06fb2d41880c Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Jun 4 12:46:26 2014 +1000
Merge remote-tracking branch 'net-next/master'
Conflicts: Documentation/driver-model/devres.txt drivers/staging/rtl8821ae/core.c include/net/inetpeer.h net/ipv6/output_core.c
diff --combined Documentation/driver-model/devres.txt index 8947255,c74e044..df2613c --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@@ -236,9 -236,6 +236,9 @@@ certainly invest a bit more effort int MEM devm_kzalloc() devm_kfree() + devm_kmemdup() + devm_get_free_pages() + devm_free_pages()
IIO devm_iio_device_alloc() @@@ -312,9 -309,7 +312,14 @@@ SLAVE DMA ENGIN SPI devm_spi_register_master()
+ MDIO + devm_mdiobus_alloc() + devm_mdiobus_alloc_size() + devm_mdiobus_free() ++ +GPIO + devm_gpiod_get() + devm_gpiod_get_index() + devm_gpiod_get_optional() + devm_gpiod_get_index_optional() + devm_gpiod_put() diff --combined MAINTAINERS index 64ea4ec,dd33abf..f787a1b --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -355,7 -355,7 +355,7 @@@ F: Documentation/hwmon/adm102 F: drivers/hwmon/adm1025.c
ADM1029 HARDWARE MONITOR DRIVER -M: Corentin Labbe corentin.labbe@geomatys.fr +M: Corentin Labbe clabbe.montjoie@gmail.com L: lm-sensors@lm-sensors.org S: Maintained F: drivers/hwmon/adm1029.c @@@ -1617,6 -1617,12 +1617,6 @@@ S: Supporte F: drivers/misc/atmel_tclib.c F: drivers/clocksource/tcb_clksrc.c
-ATMEL TSADCC DRIVER -M: Josh Wu josh.wu@atmel.com -L: linux-input@vger.kernel.org -S: Supported -F: drivers/input/touchscreen/atmel_tsadcc.c - ATMEL USBA UDC DRIVER M: Nicolas Ferre nicolas.ferre@atmel.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -1882,7 -1888,7 +1882,7 @@@ F: drivers/net/ethernet/broadcom/bnx2. F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER - M: Ariel Elior ariele@broadcom.com + M: Ariel Elior ariel.elior@qlogic.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ @@@ -1962,6 -1968,12 +1962,12 @@@ S: Maintaine F: drivers/bcma/ F: include/linux/bcma/
+ BROADCOM SYSTEMPORT ETHERNET DRIVER + M: Florian Fainelli f.fainelli@gmail.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/ethernet/broadcom/bcmsysport.* + BROCADE BFA FC SCSI DRIVER M: Anil Gurumurthy anil.gurumurthy@qlogic.com M: Sudarsana Kalluru sudarsana.kalluru@qlogic.com @@@ -2188,7 -2200,6 +2194,7 @@@ T: git git://git.kernel.org/pub/scm/lin S: Supported F: drivers/char/* F: drivers/misc/* +F: include/linux/miscdevice.h
CHECKPATCH M: Andy Whitcroft apw@canonical.com @@@ -2218,9 -2229,8 +2224,8 @@@ F: drivers/platform/chrome CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti benve@cisco.com M: Sujith Sankar ssujith@cisco.com - M: Govindarajulu Varadarajan govindarajulu90@gmail.com + M: Govindarajulu Varadarajan _govind@gmx.com M: Neel Patel neepatel@cisco.com - M: Nishank Trivedi nistrive@cisco.com S: Supported F: drivers/net/ethernet/cisco/enic/
@@@ -2405,6 -2415,7 +2410,6 @@@ F: drivers/net/ethernet/ti/cpmac. CPU FREQUENCY DRIVERS M: Rafael J. Wysocki rjw@rjwysocki.net M: Viresh Kumar viresh.kumar@linaro.org -L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git @@@ -2415,6 -2426,7 +2420,6 @@@ F: include/linux/cpufreq. CPU FREQUENCY DRIVERS - ARM BIG LITTLE M: Viresh Kumar viresh.kumar@linaro.org M: Sudeep Holla sudeep.holla@arm.com -L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php S: Maintained @@@ -2680,15 -2692,6 +2685,15 @@@ S: Orpha F: Documentation/networking/decnet.txt F: net/decnet/
+DECSTATION PLATFORM SUPPORT +M: "Maciej W. Rozycki" macro@linux-mips.org +L: linux-mips@linux-mips.org +W: http://www.linux-mips.org/wiki/DECstation +S: Maintained +F: arch/mips/dec/ +F: arch/mips/include/asm/dec/ +F: arch/mips/include/asm/mach-dec/ + DEFXX FDDI NETWORK DRIVER M: "Maciej W. Rozycki" macro@linux-mips.org S: Maintained @@@ -2786,14 -2789,12 +2791,14 @@@ F: sound/soc/codecs/da[79]*.[ch
DIGI NEO AND CLASSIC PCI PRODUCTS M: Lidza Louina lidza.louina@gmail.com +M: Mark Hounschell markh@compro.net L: driverdev-devel@linuxdriverproject.org S: Maintained F: drivers/staging/dgnc/
DIGI EPCA PCI PRODUCTS M: Lidza Louina lidza.louina@gmail.com +M: Mark Hounschell markh@compro.net L: driverdev-devel@linuxdriverproject.org S: Maintained F: drivers/staging/dgap/ @@@ -3157,9 -3158,10 +3162,9 @@@ S: Maintaine F: drivers/scsi/eata_pio.*
EBTABLES -M: Bart De Schuymer bart.de.schuymer@pandora.be L: netfilter-devel@vger.kernel.org W: http://ebtables.sourceforge.net/ -S: Maintained +S: Orphan F: include/linux/netfilter_bridge/ebt_*.h F: include/uapi/linux/netfilter_bridge/ebt_*.h F: net/bridge/netfilter/ebt*.c @@@ -3772,8 -3774,7 +3777,8 @@@ F: fs/fscache F: include/linux/fscache*.h
F2FS FILE SYSTEM -M: Jaegeuk Kim jaegeuk.kim@samsung.com +M: Jaegeuk Kim jaegeuk@kernel.org +M: Changman Lee cm224.lee@samsung.com L: linux-f2fs-devel@lists.sourceforge.net W: http://en.wikipedia.org/wiki/F2FS T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git @@@ -4208,11 -4209,9 +4213,11 @@@ S: Maintaine F: fs/hpfs/
HSI SUBSYSTEM -M: Sebastian Reichel sre@debian.org +M: Sebastian Reichel sre@kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-hsi.git S: Maintained F: Documentation/ABI/testing/sysfs-bus-hsi +F: Documentation/hsi.txt F: drivers/hsi/ F: include/linux/hsi/ F: include/uapi/linux/hsi/ @@@ -6424,7 -6423,6 +6429,7 @@@ F: drivers/usb/*/*omap F: arch/arm/*omap*/usb*
OMAP GPIO DRIVER +M: Javier Martinez Canillas javier@dowhile0.org M: Santosh Shilimkar santosh.shilimkar@ti.com M: Kevin Hilman khilman@deeprootsystems.com L: linux-omap@vger.kernel.org @@@ -6714,7 -6712,6 +6719,7 @@@ F: Documentation/PCI F: drivers/pci/ F: include/linux/pci* F: arch/x86/pci/ +F: arch/x86/kernel/quirks.c
PCI DRIVER FOR IMX6 M: Richard Zhu r65037@freescale.com @@@ -6762,14 -6759,6 +6767,14 @@@ L: linux-pci@vger.kernel.or S: Maintained F: drivers/pci/host/*designware*
+PCI DRIVER FOR GENERIC OF HOSTS +M: Will Deacon will.deacon@arm.com +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/pci/host-generic-pci.txt +F: drivers/pci/host/pci-host-generic.c + PCMCIA SUBSYSTEM P: Linux PCMCIA Team L: linux-pcmcia@lists.infradead.org @@@ -6904,7 -6893,7 +6909,7 @@@ PKUNITY SOC DRIVER M: Guan Xuetao gxt@mprc.pku.edu.cn W: http://mprc.pku.edu.cn/~guanxuetao/linux S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git +T: git git://github.com/gxt/linux.git F: drivers/input/serio/i8042-unicore32io.h F: drivers/i2c/busses/i2c-puv3.c F: drivers/video/fb-puv3.c @@@ -6954,6 -6943,7 +6959,6 @@@ F: drivers/power
PNP SUPPORT M: Rafael J. Wysocki rafael.j.wysocki@intel.com -M: Bjorn Helgaas bhelgaas@google.com S: Maintained F: drivers/pnp/
@@@ -7420,14 -7410,6 +7425,14 @@@ F: drivers/rpmsg F: Documentation/rpmsg.txt F: include/linux/rpmsg.h
+RESET CONTROLLER FRAMEWORK +M: Philipp Zabel p.zabel@pengutronix.de +S: Maintained +F: drivers/reset/ +F: Documentation/devicetree/bindings/reset/ +F: include/linux/reset.h +F: include/linux/reset-controller.h + RFKILL M: Johannes Berg johannes@sipsolutions.net L: linux-wireless@vger.kernel.org @@@ -7677,6 -7659,7 +7682,6 @@@ L: linux-media@vger.kernel.or Q: https://patchwork.linuxtv.org/project/linux-media/list/ S: Supported F: drivers/media/platform/exynos4-is/ -F: include/media/s5p_fimc.h
SAMSUNG S3C24XX/S3C64XX SOC SERIES CAMIF DRIVER M: Sylwester Nawrocki sylvester.nawrocki@gmail.com @@@ -7979,26 -7962,6 +7984,26 @@@ M: Robin Holt <robinmholt@gmail.com S: Maintained F: drivers/misc/sgi-xp/
+SI2157 MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/tuners/si2157* + +SI2168 MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/dvb-frontends/si2168* + SI470X FM RADIO RECEIVER I2C DRIVER M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org @@@ -8518,7 -8481,7 +8523,7 @@@ S: Maintaine F: drivers/staging/olpc_dcon/
STAGING - OZMO DEVICES USB OVER WIFI DRIVER -M: Rupesh Gujare rupesh.gujare@atmel.com +M: Shigekatsu Tateno shigekatsu.tateno@atmel.com S: Maintained F: drivers/staging/ozwpan/
@@@ -8533,13 -8496,6 +8538,13 @@@ M: Florian Schilhabel <florian.c.schilh S: Odd Fixes F: drivers/staging/rtl8712/
+STAGING - REALTEK RTL8723U WIRELESS DRIVER +M: Larry Finger Larry.Finger@lwfinger.net +M: Jes Sorensen Jes.Sorensen@redhat.com +L: linux-wireless@vger.kernel.org +S: Maintained +F: drivers/staging/rtl8723au/ + STAGING - SILICON MOTION SM7XX FRAME BUFFER DRIVER M: Teddy Wang teddy.wang@siliconmotion.com.cn S: Odd Fixes @@@ -9220,7 -9176,7 +9225,7 @@@ UNICORE32 ARCHITECTURE M: Guan Xuetao gxt@mprc.pku.edu.cn W: http://mprc.pku.edu.cn/~guanxuetao/linux S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git +T: git git://github.com/gxt/linux.git F: arch/unicore32/
UNIFDEF diff --combined arch/arm/boot/dts/am33xx.dtsi index 9f53e82,f1eea4a..4a4e02d --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@@ -147,6 -147,9 +147,6 @@@ <0x44e10f90 0x40>; interrupts = <12 13 14>; #dma-cells = <1>; - dma-channels = <64>; - ti,edma-regions = <4>; - ti,edma-slots = <256>; };
gpio0: gpio@44e07000 { @@@ -662,6 -665,8 +662,8 @@@ mac: ethernet@4a100000 { compatible = "ti,cpsw"; ti,hwmods = "cpgmac0"; + clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; + clock-names = "fck", "cpts"; cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; @@@ -685,7 -690,6 +687,7 @@@ */ interrupts = <40 41 42 43>; ranges; + status = "disabled";
davinci_mdio: mdio@4a101000 { compatible = "ti,davinci_mdio"; @@@ -694,7 -698,6 +696,7 @@@ ti,hwmods = "davinci_mdio"; bus_freq = <1000000>; reg = <0x4a101000 0x100>; + status = "disabled"; };
cpsw_emac0: slave@4a100200 { diff --combined arch/arm/boot/dts/am4372.dtsi index 794c73e,03a2255..230bf0e --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@@ -67,15 -67,11 +67,15 @@@ };
ocp { - compatible = "simple-bus"; + compatible = "ti,am4372-l3-noc", "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; ti,hwmods = "l3_main"; + reg = <0x44000000 0x400000 + 0x44800000 0x400000>; + interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
prcm: prcm@44df0000 { compatible = "ti,am4-prcm"; @@@ -112,6 -108,9 +112,6 @@@ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - dma-channels = <64>; - ti,edma-regions = <4>; - ti,edma-slots = <256>; };
uart0: serial@44e09000 { @@@ -490,6 -489,8 +490,8 @@@ #address-cells = <1>; #size-cells = <1>; ti,hwmods = "cpgmac0"; + clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; + clock-names = "fck", "cpts"; status = "disabled"; cpdma_channels = <8>; ale_entries = <1024>; @@@ -522,12 -523,6 +524,12 @@@ /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; }; + + phy_sel: cpsw-phy-sel@44e10650 { + compatible = "ti,am43xx-cpsw-phy-sel"; + reg= <0x44e10650 0x4>; + reg-names = "gmii-sel"; + }; };
epwmss0: epwmss@48300000 { @@@ -742,121 -737,6 +744,121 @@@ #size-cells = <1>; status = "disabled"; }; + + am43xx_control_usb2phy1: control-phy@44e10620 { + compatible = "ti,control-phy-usb2-am437"; + reg = <0x44e10620 0x4>; + reg-names = "power"; + }; + + am43xx_control_usb2phy2: control-phy@0x44e10628 { + compatible = "ti,control-phy-usb2-am437"; + reg = <0x44e10628 0x4>; + reg-names = "power"; + }; + + ocp2scp0: ocp2scp@483a8000 { + compatible = "ti,omap-ocp2scp"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + ti,hwmods = "ocp2scp0"; + + usb2_phy1: phy@483a8000 { + compatible = "ti,am437x-usb2"; + reg = <0x483a8000 0x8000>; + ctrl-module = <&am43xx_control_usb2phy1>; + clocks = <&usb_phy0_always_on_clk32k>, + <&usb_otg_ss0_refclk960m>; + clock-names = "wkupclk", "refclk"; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + ocp2scp1: ocp2scp@483e8000 { + compatible = "ti,omap-ocp2scp"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + ti,hwmods = "ocp2scp1"; + + usb2_phy2: phy@483e8000 { + compatible = "ti,am437x-usb2"; + reg = <0x483e8000 0x8000>; + ctrl-module = <&am43xx_control_usb2phy2>; + clocks = <&usb_phy1_always_on_clk32k>, + <&usb_otg_ss1_refclk960m>; + clock-names = "wkupclk", "refclk"; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + dwc3_1: omap_dwc3@48380000 { + compatible = "ti,am437x-dwc3"; + ti,hwmods = "usb_otg_ss0"; + reg = <0x48380000 0x10000>; + interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <1>; + utmi-mode = <1>; + ranges; + + usb1: usb@48390000 { + compatible = "synopsys,dwc3"; + reg = <0x48390000 0x17000>; + interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; + phys = <&usb2_phy1>; + phy-names = "usb2-phy"; + maximum-speed = "high-speed"; + dr_mode = "otg"; + status = "disabled"; + }; + }; + + dwc3_2: omap_dwc3@483c0000 { + compatible = "ti,am437x-dwc3"; + ti,hwmods = "usb_otg_ss1"; + reg = <0x483c0000 0x10000>; + interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <1>; + utmi-mode = <1>; + ranges; + + usb2: usb@483d0000 { + compatible = "synopsys,dwc3"; + reg = <0x483d0000 0x17000>; + interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; + phys = <&usb2_phy2>; + phy-names = "usb2-phy"; + maximum-speed = "high-speed"; + dr_mode = "otg"; + status = "disabled"; + }; + }; + + qspi: qspi@47900000 { + compatible = "ti,am4372-qspi"; + reg = <0x47900000 0x100>; + #address-cells = <1>; + #size-cells = <0>; + ti,hwmods = "qspi"; + interrupts = <0 138 0x4>; + num-cs = <4>; + status = "disabled"; + }; + + hdq: hdq@48347000 { + compatible = "ti,am43xx-hdq"; + reg = <0x48347000 0x1000>; + interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&func_12m_clk>; + clock-names = "fck"; + ti,hwmods = "hdq1w"; + status = "disabled"; + }; }; };
diff --combined arch/arm/boot/dts/armada-xp-matrix.dts index 25674fe,3bb8c00..7e291e2 --- a/arch/arm/boot/dts/armada-xp-matrix.dts +++ b/arch/arm/boot/dts/armada-xp-matrix.dts @@@ -37,15 -37,19 +37,15 @@@
internal-regs { serial@12000 { status = "okay"; }; serial@12100 { status = "okay"; }; serial@12200 { status = "okay"; }; serial@12300 { - clock-frequency = <250000000>; status = "okay"; };
@@@ -57,6 -61,10 +57,10 @@@ ethernet@30000 { status = "okay"; phy-mode = "sgmii"; + fixed-link { + speed = <1000>; + full-duplex; + }; };
pcie-controller { diff --combined arch/sparc/include/asm/checksum_32.h index c80cf02,04471dc..426b238 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@@ -29,7 -29,7 +29,7 @@@ * * it's best to have buff aligned on a 32-bit boundary */ -extern __wsum csum_partial(const void *buff, int len, __wsum sum); +__wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it * checksums @@@ -38,7 -38,7 +38,7 @@@ * better 64-bit) boundary */
-extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); +unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) @@@ -238,4 -238,16 +238,16 @@@ static inline __sum16 ip_compute_csum(c return csum_fold(csum_partial(buff, len, 0)); }
+ #define HAVE_ARCH_CSUM_ADD + static inline __wsum csum_add(__wsum csum, __wsum addend) + { + __asm__ __volatile__( + "addcc %0, %1, %0\n" + "addx %0, %%g0, %0" + : "=r" (csum) + : "r" (addend), "0" (csum)); + + return csum; + } + #endif /* !(__SPARC_CHECKSUM_H) */ diff --combined arch/sparc/include/asm/checksum_64.h index b3c8b9d,2ff81ae..b8779a6 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@@ -29,7 -29,7 +29,7 @@@ * * it's best to have buff aligned on a 32-bit boundary */ -extern __wsum csum_partial(const void * buff, int len, __wsum sum); +__wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it * checksums @@@ -37,12 -37,12 +37,12 @@@ * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum);
-extern long __csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum); +long __csum_partial_copy_from_user(const void __user *src, + void *dst, int len, + __wsum sum);
static inline __wsum csum_partial_copy_from_user(const void __user *src, @@@ -59,9 -59,9 +59,9 @@@ * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -extern long __csum_partial_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum); +long __csum_partial_copy_to_user(const void *src, + void __user *dst, int len, + __wsum sum);
static inline __wsum csum_and_copy_to_user(const void *src, @@@ -77,7 -77,7 +77,7 @@@ /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. */ -extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */ static inline __sum16 csum_fold(__wsum sum) @@@ -96,9 -96,9 +96,9 @@@ }
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned int len, - unsigned short proto, - __wsum sum) + unsigned int len, + unsigned short proto, + __wsum sum) { __asm__ __volatile__( " addcc %1, %0, %0\n" @@@ -116,9 -116,9 +116,9 @@@ * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + unsigned short len, + unsigned short proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } @@@ -164,4 -164,16 +164,16 @@@ static inline __sum16 ip_compute_csum(c return csum_fold(csum_partial(buff, len, 0)); }
+ #define HAVE_ARCH_CSUM_ADD + static inline __wsum csum_add(__wsum csum, __wsum addend) + { + __asm__ __volatile__( + "addcc %0, %1, %0\n" + "addx %0, %%g0, %0" + : "=r" (csum) + : "r" (addend), "0" (csum)); + + return csum; + } + #endif /* !(__SPARC64_CHECKSUM_H) */ diff --combined drivers/clk/ti/clk-43xx.c index 527a43d,b4877e0..3795fce --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@@ -105,20 -105,30 +105,36 @@@ static struct ti_dt_clk am43xx_clks[] DT_CLK(NULL, "func_12m_clk", "func_12m_clk"), DT_CLK(NULL, "vtp_clk_div", "vtp_clk_div"), DT_CLK(NULL, "usbphy_32khz_clkmux", "usbphy_32khz_clkmux"), + DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"), + DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"), + DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"), + DT_CLK("48306200.ehrpwm", "tbclk", "ehrpwm3_tbclk"), + DT_CLK("48308200.ehrpwm", "tbclk", "ehrpwm4_tbclk"), + DT_CLK("4830a200.ehrpwm", "tbclk", "ehrpwm5_tbclk"), { .node_name = NULL }, };
int __init am43xx_dt_clk_init(void) { + struct clk *clk1, *clk2; + ti_dt_clocks_register(am43xx_clks);
omap2_clk_disable_autoidle_all();
+ /* + * cpsw_cpts_rft_clk has got the choice of 3 clocksources + * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck. + * By default dpll_core_m4_ck is selected, witn this as clock + * source the CPTS doesnot work properly. It gives clockcheck errors + * while running PTP. + * clockcheck: clock jumped backward or running slower than expected! + * By selecting dpll_core_m5_ck as the clocksource fixes this issue. + * In AM335x dpll_core_m5_ck is the default clocksource. + */ + clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk"); + clk2 = clk_get_sys(NULL, "dpll_core_m5_ck"); + clk_set_parent(clk1, clk2); + return 0; } diff --combined drivers/net/ethernet/Kconfig index 0513494,f0e2a4d..edb7186 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@@ -39,7 -39,6 +39,7 @@@ source "drivers/net/ethernet/cisco/Kcon config CX_ECAT tristate "Beckhoff CX5020 EtherCAT master support" depends on PCI + depends on X86 || COMPILE_TEST ---help--- Driver for EtherCAT master module located on CCAT FPGA that can be found on Beckhoff CX5020, and possibly other of CX @@@ -68,6 -67,7 +68,7 @@@ source "drivers/net/ethernet/neterion/K source "drivers/net/ethernet/faraday/Kconfig" source "drivers/net/ethernet/freescale/Kconfig" source "drivers/net/ethernet/fujitsu/Kconfig" + source "drivers/net/ethernet/hisilicon/Kconfig" source "drivers/net/ethernet/hp/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index dd57c7c,d18441e..cca6139 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@@ -6,7 -6,7 +6,7 @@@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein eilong@broadcom.com + * Maintained by: Ariel Elior ariel.elior@qlogic.com * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@@ -2781,7 -2781,7 +2781,7 @@@ int bnx2x_nic_load(struct bnx2x *bp, in
case LOAD_OPEN: netif_tx_start_all_queues(bp->dev); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); break;
case LOAD_DIAG: @@@ -4939,9 -4939,9 +4939,9 @@@ void bnx2x_update_coalesce_sb_index(str void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, u32 verbose) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(flag, &bp->sp_rtnl_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n", flag); schedule_delayed_work(&bp->sp_rtnl_task, 0); diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 3a8e51e,ff2bdd8..a413c1a --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -6,7 -6,7 +6,7 @@@ * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * - * Maintained by: Eilon Greenstein eilong@broadcom.com + * Maintained by: Ariel Elior ariel.elior@qlogic.com * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman @@@ -1858,10 -1858,10 +1858,10 @@@ void bnx2x_sp_event(struct bnx2x_fastpa return; #endif
- smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_inc(&bp->cq_spq_left); /* push the change in bp->spq_left and towards the memory */ - smp_mb__after_atomic_inc(); + smp_mb__after_atomic();
DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
@@@ -1876,11 -1876,11 +1876,11 @@@ * sp_state is cleared, and this order prevents * races */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state); wmb(); clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
/* schedule the sp task as mcp ack is required */ bnx2x_schedule_sp_task(bp); @@@ -5272,9 -5272,9 +5272,9 @@@ static void bnx2x_after_function_update __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
/* mark latest Q bit */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
/* send Q update ramrod for FCoE Q */ rc = bnx2x_queue_state_change(bp, &queue_params); @@@ -5500,7 -5500,7 +5500,7 @@@ next_spqe spqe_cnt++; } /* for */
- smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(spqe_cnt, &bp->eq_spq_left);
bp->eq_cons = sw_cons; @@@ -13875,9 -13875,9 +13875,9 @@@ static int bnx2x_drv_ctl(struct net_dev case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: { int count = ctl->data.credit.credit_count;
- smp_mb__before_atomic_inc(); + smp_mb__before_atomic(); atomic_add(count, &bp->cq_spq_left); - smp_mb__after_atomic_inc(); + smp_mb__after_atomic(); break; } case DRV_CTL_ULP_REGISTER_CMD: { diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index d725317,736264b..b193604 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@@ -12,7 -12,7 +12,7 @@@ * license other than the GPL, without Broadcom's express prior written * consent. * - * Maintained by: Eilon Greenstein eilong@broadcom.com + * Maintained by: Ariel Elior ariel.elior@qlogic.com * Written by: Vladislav Zolotarov * */ @@@ -258,16 -258,16 +258,16 @@@ static bool bnx2x_raw_check_pending(str
static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); }
static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(o->state, o->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); }
/** @@@ -2131,7 -2131,7 +2131,7 @@@ static int bnx2x_set_rx_mode_e1x(struc
/* The operation is completed */ clear_bit(p->state, p->pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
return 0; } @@@ -3576,16 -3576,16 +3576,16 @@@ error_exit1
static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); }
static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(o->sched_state, o->raw.pstate); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); }
static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o) @@@ -4200,7 -4200,7 +4200,7 @@@ int bnx2x_queue_state_change(struct bnx if (rc) { o->next_state = BNX2X_Q_STATE_MAX; clear_bit(pending_bit, pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return rc; }
@@@ -4288,7 -4288,7 +4288,7 @@@ static int bnx2x_queue_comp_cmd(struct wmb();
clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
return 0; } @@@ -5279,7 -5279,7 +5279,7 @@@ static inline int bnx2x_func_state_chan wmb();
clear_bit(cmd, &o->pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
return 0; } @@@ -5926,7 -5926,7 +5926,7 @@@ int bnx2x_func_state_change(struct bnx2 if (rc) { o->next_state = BNX2X_F_STATE_MAX; clear_bit(cmd, pending); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); return rc; }
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index faf0148,a93c7af..749fa31 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@@ -12,9 -12,9 +12,9 @@@ * license other than the GPL, without Broadcom's express prior written * consent. * - * Maintained by: Eilon Greenstein eilong@broadcom.com - * Written by: Shmulik Ravid shmulikr@broadcom.com - * Ariel Elior ariele@broadcom.com + * Maintained by: Ariel Elior ariel.elior@qlogic.com + * Written by: Shmulik Ravid + * Ariel Elior ariel.elior@qlogic.com * */ #include "bnx2x.h" @@@ -1650,9 -1650,9 +1650,9 @@@ stati void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp, struct bnx2x_virtf *vf) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); }
static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp, @@@ -2576,7 -2576,8 +2576,8 @@@ int bnx2x_get_vf_config(struct net_devi
ivi->vf = vfidx; ivi->qos = 0; - ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */ + ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */ + ivi->min_tx_rate = 0; ivi->spoofchk = 1; /*always enabled */ if (vf->state == VF_ENABLED) { /* mac and vlan are in vlan_mac objects */ @@@ -2990,9 -2991,9 +2991,9 @@@ void bnx2x_iov_task(struct work_struct
void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(flag, &bp->iov_task_state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0); } diff --combined drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 05613a8,c1b2c1d..0acee2c --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@@ -281,7 -281,7 +281,7 @@@ static int cxgb_close(struct net_devic if (adapter->params.stats_update_period && !(adapter->open_device_map & PORT_MASK)) { /* Stop statistics accumulation. */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); spin_lock(&adapter->work_lock); /* sync with update task */ spin_unlock(&adapter->work_lock); cancel_mac_stats_update(adapter); @@@ -1100,7 -1100,7 +1100,7 @@@ static int init_one(struct pci_dev *pde
netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
- SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); + netdev->ethtool_ops = &t1_ethtool_ops; }
if (t1_init_sw_modules(adapter, bi) < 0) { diff --combined drivers/net/ethernet/chelsio/cxgb4/sge.c index e249528,cced1a3..c173f5a --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@@ -1697,7 -1697,8 +1697,8 @@@ int t4_ethrx_handler(struct sge_rspq *q return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp; - csum_ok = pkt->csum_calc && !pkt->err_vec; + csum_ok = pkt->csum_calc && !pkt->err_vec && + (q->netdev->features & NETIF_F_RXCSUM); if ((pkt->l2info & htonl(RXF_TCP)) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { do_gro(rxq, si, pkt); @@@ -1720,8 -1721,7 +1721,7 @@@
rxq->stats.pkts++;
- if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) && - (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { + if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) { if (!pkt->ip_frag) { skb->ip_summed = CHECKSUM_UNNECESSARY; rxq->stats.rx_cso++; @@@ -2031,7 -2031,7 +2031,7 @@@ static void sge_rx_timer_cb(unsigned lo struct sge_fl *fl = s->egr_map[id];
clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
if (fl_starving(fl)) { rxq = container_of(fl, struct sge_eth_rxq, fl); diff --combined drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 9d88c1d,adebbf8..bdfa80c --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@@ -1510,7 -1510,8 +1510,8 @@@ int t4vf_ethrx_handler(struct sge_rspq { struct sk_buff *skb; const struct cpl_rx_pkt *pkt = (void *)rsp; - bool csum_ok = pkt->csum_calc && !pkt->err_vec; + bool csum_ok = pkt->csum_calc && !pkt->err_vec && + (rspq->netdev->features & NETIF_F_RXCSUM); struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/* @@@ -1538,8 -1539,8 +1539,8 @@@ skb_record_rx_queue(skb, rspq->idx); rxq->stats.pkts++;
- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && - !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { + if (csum_ok && !pkt->err_vec && + (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { if (!pkt->ip_frag) skb->ip_summed = CHECKSUM_UNNECESSARY; else { @@@ -1951,7 -1952,7 +1952,7 @@@ static void sge_rx_timer_cb(unsigned lo struct sge_fl *fl = s->egr_map[id];
clear_bit(id, s->starving_fl); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
/* * Since we are accessing fl without a lock there's a diff --combined drivers/net/ethernet/freescale/gianfar.c index ee6ddbd,2826740..a7fe6a7 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@@ -889,6 -889,17 +889,17 @@@ static int gfar_of_init(struct platform
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ /* In the case of a fixed PHY, the DT node associated + * to the PHY is the Ethernet MAC DT node. + */ + if (of_phy_is_fixed_link(np)) { + err = of_phy_register_fixed_link(np); + if (err) + goto err_grp_init; + + priv->phy_node = np; + } + /* Find the TBI PHY. If it's not there, we don't support SGMII */ priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
@@@ -1660,9 -1671,6 +1671,6 @@@ static int init_phy(struct net_device *
priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, interface); - if (!priv->phydev) - priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link, - interface); if (!priv->phydev) { dev_err(&dev->dev, "could not attach to PHY\n"); return -ENODEV; @@@ -1798,9 -1806,9 +1806,9 @@@ void stop_gfar(struct net_device *dev
netif_tx_stop_all_queues(dev);
- smp_mb__before_clear_bit(); + smp_mb__before_atomic(); set_bit(GFAR_DOWN, &priv->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
disable_napi(priv);
@@@ -2043,9 -2051,9 +2051,9 @@@ int startup_gfar(struct net_device *nde
gfar_init_tx_rx_base(priv);
- smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(GFAR_DOWN, &priv->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
/* Start Rx/Tx DMA and enable the interrupts */ gfar_start(priv); diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index 2e72449,cef3db4..04ab8a9 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -39,7 -39,7 +39,7 @@@ static const char i40e_driver_string[]
#define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MINOR 3 - #define DRV_VERSION_BUILD 36 + #define DRV_VERSION_BUILD 46 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@@ -356,6 -356,7 +356,7 @@@ static struct rtnl_link_stats64 *i40e_g struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); int i; @@@ -368,7 -369,6 +369,6 @@@
rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *tx_ring, *rx_ring; u64 bytes, packets; unsigned int start;
@@@ -2312,6 -2312,8 +2312,8 @@@ static int i40e_configure_rx_ring(struc rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; rx_ctx.showiv = 1; + /* set the prefena field to 1 because the manual says to */ + rx_ctx.prefena = 1;
/* clear the context in the HMC */ err = i40e_clear_lan_rx_queue_context(hw, pf_q); @@@ -2413,6 -2415,7 +2415,7 @@@ static int i40e_vsi_configure_rx(struc **/ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) { + struct i40e_ring *tx_ring, *rx_ring; u16 qoffset, qcount; int i, n;
@@@ -2426,8 -2429,8 +2429,8 @@@ qoffset = vsi->tc_config.tc_info[n].qoffset; qcount = vsi->tc_config.tc_info[n].qcount; for (i = qoffset; i < (qoffset + qcount); i++) { - struct i40e_ring *rx_ring = vsi->rx_rings[i]; - struct i40e_ring *tx_ring = vsi->tx_rings[i]; + rx_ring = vsi->rx_rings[i]; + tx_ring = vsi->tx_rings[i]; rx_ring->dcb_tc = n; tx_ring->dcb_tc = n; } @@@ -2565,7 -2568,6 +2568,6 @@@ static void i40e_enable_misc_int_causes I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | I40E_PFINT_ICR0_ENA_GPIO_MASK | I40E_PFINT_ICR0_ENA_TIMESYNC_MASK | - I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | I40E_PFINT_ICR0_ENA_VFLR_MASK | I40E_PFINT_ICR0_ENA_ADMINQ_MASK; @@@ -3160,9 -3162,7 +3162,7 @@@ static int i40e_vsi_control_tx(struct i usleep_range(1000, 2000); } /* Skip if the queue is already in the requested state */ - if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) - continue; - if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) continue;
/* turn on/off the queue */ @@@ -3178,13 -3178,8 +3178,8 @@@ /* wait for the change to finish */ for (j = 0; j < 10; j++) { tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); - if (enable) { - if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) - break; - } else { - if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) - break; - } + if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) + break;
udelay(10); } @@@ -3223,15 -3218,9 +3218,9 @@@ static int i40e_vsi_control_rx(struct i usleep_range(1000, 2000); }
- if (enable) { - /* is STAT set ? */ - if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) - continue; - } else { - /* is !STAT set ? */ - if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) - continue; - } + /* Skip if the queue is already in the requested state */ + if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) + continue;
/* turn on/off the queue */ if (enable) @@@ -3244,13 -3233,8 +3233,8 @@@ for (j = 0; j < 10; j++) { rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (enable) { - if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) - break; - } else { - if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) - break; - } + if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK)) + break;
udelay(10); } @@@ -3513,6 -3497,19 +3497,19 @@@ static void i40e_napi_disable_all(struc }
/** + * i40e_vsi_close - Shut down a VSI + * @vsi: the vsi to be quelled + **/ + static void i40e_vsi_close(struct i40e_vsi *vsi) + { + if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) + i40e_down(vsi); + i40e_vsi_free_irq(vsi); + i40e_vsi_free_tx_resources(vsi); + i40e_vsi_free_rx_resources(vsi); + } + + /** * i40e_quiesce_vsi - Pause a given VSI * @vsi: the VSI being paused **/ @@@ -3525,8 -3522,7 +3522,7 @@@ static void i40e_quiesce_vsi(struct i40 if (vsi->netdev && netif_running(vsi->netdev)) { vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); } else { - set_bit(__I40E_DOWN, &vsi->state); - i40e_down(vsi); + i40e_vsi_close(vsi); } }
@@@ -3543,7 -3539,7 +3539,7 @@@ static void i40e_unquiesce_vsi(struct i if (vsi->netdev && netif_running(vsi->netdev)) vsi->netdev->netdev_ops->ndo_open(vsi->netdev); else - i40e_up(vsi); /* this clears the DOWN bit */ + i40e_vsi_open(vsi); /* this clears the DOWN bit */ }
/** @@@ -4028,6 -4024,8 +4024,8 @@@ static void i40e_dcb_reconfigure(struc pf->vsi[v]->seid); /* Will try to configure as many components */ } else { + /* Re-configure VSI vectors based on updated TC map */ + i40e_vsi_map_rings_to_vectors(pf->vsi[v]); if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } @@@ -4067,6 -4065,9 +4065,9 @@@ static int i40e_init_pf_dcb(struct i40e DCB_CAP_DCBX_VER_IEEE; pf->flags |= I40E_FLAG_DCB_ENABLED; } + } else { + dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n", + pf->hw.aq.asq_last_status); }
out: @@@ -4309,24 -4310,32 +4310,32 @@@ int i40e_vsi_open(struct i40e_vsi *vsi if (err) goto err_setup_rx;
- if (!vsi->netdev) { - err = EINVAL; - goto err_setup_rx; - } - snprintf(int_name, sizeof(int_name) - 1, "%s-%s", - dev_driver_string(&pf->pdev->dev), vsi->netdev->name); - err = i40e_vsi_request_irq(vsi, int_name); - if (err) - goto err_setup_rx; + if (vsi->netdev) { + snprintf(int_name, sizeof(int_name) - 1, "%s-%s", + dev_driver_string(&pf->pdev->dev), vsi->netdev->name); + err = i40e_vsi_request_irq(vsi, int_name); + if (err) + goto err_setup_rx;
- /* Notify the stack of the actual queue counts. */ - err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs); - if (err) - goto err_set_queues; + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (err) + goto err_set_queues;
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs); - if (err) - goto err_set_queues; + err = netif_set_real_num_rx_queues(vsi->netdev, + vsi->num_queue_pairs); + if (err) + goto err_set_queues; + + } else if (vsi->type == I40E_VSI_FDIR) { + snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", + dev_driver_string(&pf->pdev->dev)); + err = i40e_vsi_request_irq(vsi, int_name); + } else { + err = -EINVAL; + goto err_setup_rx; + }
err = i40e_up_complete(vsi); if (err) @@@ -4383,14 -4392,7 +4392,7 @@@ static int i40e_close(struct net_devic struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi;
- if (test_and_set_bit(__I40E_DOWN, &vsi->state)) - return 0; - - i40e_down(vsi); - i40e_vsi_free_irq(vsi); - - i40e_vsi_free_tx_resources(vsi); - i40e_vsi_free_rx_resources(vsi); + i40e_vsi_close(vsi);
return 0; } @@@ -4676,7 -4678,7 +4678,7 @@@ static void i40e_service_event_complete BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
/* flush memory to make sure state is correct before next watchog */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__I40E_SERVICE_SCHED, &pf->state); }
@@@ -4709,8 -4711,7 +4711,7 @@@ void i40e_fdir_check_and_reenable(struc (pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; fcnt_prog = i40e_get_current_fd_count(pf); - fcnt_avail = pf->hw.fdir_shared_filter_count + - pf->fdir_pf_filter_count; + fcnt_avail = i40e_get_fd_cnt_all(pf); if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) { if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { @@@ -5226,9 -5227,6 +5227,6 @@@ static int i40e_get_capabilities(struc } } while (err);
- /* increment MSI-X count because current FW skips one */ - pf->hw.func_caps.num_msix_vectors++; - if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || (pf->hw.aq.fw_maj_ver < 2)) { pf->hw.func_caps.num_msix_vectors++; @@@ -5267,8 -5265,7 +5265,7 @@@ static int i40e_vsi_clear(struct i40e_v static void i40e_fdir_sb_setup(struct i40e_pf *pf) { struct i40e_vsi *vsi; - bool new_vsi = false; - int err, i; + int i;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) return; @@@ -5288,47 -5285,12 +5285,12 @@@ pf->vsi[pf->lan_vsi]->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); - goto err_vsi; + pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + return; } - new_vsi = true; - } - i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); - - err = i40e_vsi_setup_tx_resources(vsi); - if (err) - goto err_setup_tx; - err = i40e_vsi_setup_rx_resources(vsi); - if (err) - goto err_setup_rx; - - if (new_vsi) { - char int_name[IFNAMSIZ + 9]; - err = i40e_vsi_configure(vsi); - if (err) - goto err_setup_rx; - snprintf(int_name, sizeof(int_name) - 1, "%s-fdir", - dev_driver_string(&pf->pdev->dev)); - err = i40e_vsi_request_irq(vsi, int_name); - if (err) - goto err_setup_rx; - err = i40e_up_complete(vsi); - if (err) - goto err_up_complete; - clear_bit(__I40E_NEEDS_RESTART, &vsi->state); }
- return; - - err_up_complete: - i40e_down(vsi); - i40e_vsi_free_irq(vsi); - err_setup_rx: - i40e_vsi_free_rx_resources(vsi); - err_setup_tx: - i40e_vsi_free_tx_resources(vsi); - err_vsi: - pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; - i40e_vsi_clear(vsi); + i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring); }
/** @@@ -5405,8 -5367,10 +5367,10 @@@ static void i40e_reset_and_rebuild(stru * because the reset will make them disappear. */ ret = i40e_pf_reset(hw); - if (ret) + if (ret) { dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); + goto end_core_reset; + } pf->pfr_count++;
if (test_bit(__I40E_DOWN, &pf->state)) @@@ -5642,7 -5606,6 +5606,6 @@@ static void i40e_handle_mdd_event(struc **/ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) { - const int vxlan_hdr_qwords = 4; struct i40e_hw *hw = &pf->hw; i40e_status ret; u8 filter_index; @@@ -5660,7 -5623,6 +5623,6 @@@ port = pf->vxlan_ports[i]; ret = port ? i40e_aq_add_udp_tunnel(hw, ntohs(port), - vxlan_hdr_qwords, I40E_AQC_TUNNEL_TYPE_VXLAN, &filter_index, NULL) : i40e_aq_del_udp_tunnel(hw, i, NULL); @@@ -5987,14 -5949,12 +5949,12 @@@ static void i40e_vsi_clear_rings(struc **/ static int i40e_alloc_rings(struct i40e_vsi *vsi) { + struct i40e_ring *tx_ring, *rx_ring; struct i40e_pf *pf = vsi->back; int i;
/* Set basic values in the rings to be used later during open() */ for (i = 0; i < vsi->alloc_queue_pairs; i++) { - struct i40e_ring *tx_ring; - struct i40e_ring *rx_ring; - /* allocate space for both Tx and Rx in one shot */ tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL); if (!tx_ring) @@@ -6407,6 -6367,10 +6367,10 @@@ static int i40e_sw_init(struct i40e_pf I40E_FLAG_MSIX_ENABLED | I40E_FLAG_RX_1BUF_ENABLED;
+ /* Set default ITR */ + pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; + pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; + /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues */ @@@ -6649,6 -6613,96 +6613,96 @@@ static void i40e_del_vxlan_port(struct }
#endif + #ifdef HAVE_FDB_OPS + #ifdef USE_CONST_DEV_UC_CHAR + static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 flags) + #else + static int i40e_ndo_fdb_add(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr, + u16 flags) + #endif + { + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_pf *pf = np->vsi->back; + int err = 0; + + if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; + + /* Hardware does not support aging addresses so if a + * ndm_state is given only allow permanent addresses + */ + if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { + netdev_info(dev, "FDB only supports static addresses\n"); + return -EINVAL; + } + + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) + err = dev_uc_add_excl(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_add_excl(dev, addr); + else + err = -EINVAL; + + /* Only return duplicate errors if NLM_F_EXCL is set */ + if (err == -EEXIST && !(flags & NLM_F_EXCL)) + err = 0; + + return err; + } + + #ifndef USE_DEFAULT_FDB_DEL_DUMP + #ifdef USE_CONST_DEV_UC_CHAR + static int i40e_ndo_fdb_del(struct ndmsg *ndm, + struct net_device *dev, + const unsigned char *addr) + #else + static int i40e_ndo_fdb_del(struct ndmsg *ndm, + struct net_device *dev, + unsigned char *addr) + #endif + { + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_pf *pf = np->vsi->back; + int err = -EOPNOTSUPP; + + if (ndm->ndm_state & NUD_PERMANENT) { + netdev_info(dev, "FDB only supports static addresses\n"); + return -EINVAL; + } + + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { + if (is_unicast_ether_addr(addr)) + err = dev_uc_del(dev, addr); + else if (is_multicast_ether_addr(addr)) + err = dev_mc_del(dev, addr); + else + err = -EINVAL; + } + + return err; + } + + static int i40e_ndo_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + int idx) + { + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_pf *pf = np->vsi->back; + + if (pf->flags & I40E_FLAG_SRIOV_ENABLED) + idx = ndo_dflt_fdb_dump(skb, cb, dev, idx); + + return idx; + } + + #endif /* USE_DEFAULT_FDB_DEL_DUMP */ + #endif /* HAVE_FDB_OPS */ static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@@ -6669,13 -6723,20 +6723,20 @@@ .ndo_set_features = i40e_set_features, .ndo_set_vf_mac = i40e_ndo_set_vf_mac, .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, - .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, + .ndo_set_vf_rate = i40e_ndo_set_vf_bw, .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, #ifdef CONFIG_I40E_VXLAN .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, #endif + #ifdef HAVE_FDB_OPS + .ndo_fdb_add = i40e_ndo_fdb_add, + #ifndef USE_DEFAULT_FDB_DEL_DUMP + .ndo_fdb_del = i40e_ndo_fdb_del, + .ndo_fdb_dump = i40e_ndo_fdb_dump, + #endif + #endif };
/** @@@ -6720,10 -6781,12 +6781,12 @@@ static int i40e_config_netdev(struct i4 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_RXHASH | 0;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) + netdev->features |= NETIF_F_NTUPLE; + /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features;
@@@ -6772,7 -6835,6 +6835,6 @@@ static void i40e_vsi_delete(struct i40e return;
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); - return; }
/** @@@ -6982,11 -7044,7 +7044,7 @@@ int i40e_vsi_release(struct i40e_vsi *v unregister_netdev(vsi->netdev); } } else { - if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) - i40e_down(vsi); - i40e_vsi_free_irq(vsi); - i40e_vsi_free_tx_resources(vsi); - i40e_vsi_free_rx_resources(vsi); + i40e_vsi_close(vsi); } i40e_vsi_disable_irq(vsi); } @@@ -7516,8 -7574,6 +7574,6 @@@ void i40e_veb_release(struct i40e_veb *
i40e_aq_delete_element(&pf->hw, veb->seid, NULL); i40e_veb_clear(veb); - - return; }
/** @@@ -7998,7 -8054,6 +8054,6 @@@ static void i40e_determine_queue_usage( }
pf->queues_left = queues_left; - return; }
/** @@@ -8090,6 -8145,7 +8145,7 @@@ static int i40e_probe(struct pci_dev *p u16 link_status; int err = 0; u32 len; + u32 i;
err = pci_enable_device_mem(pdev); if (err) @@@ -8243,7 -8299,7 +8299,7 @@@ if (err) { dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err); pf->flags &= ~I40E_FLAG_DCB_ENABLED; - goto err_init_dcb; + /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */
@@@ -8279,6 -8335,13 +8335,13 @@@ dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } + /* if FDIR VSI was set up, start it now */ + for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { + if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { + i40e_vsi_open(pf->vsi[i]); + break; + } + }
/* The main driver is (mostly) up and happy. We need to set this state * before setting up the misc vector or we get a race and the vector @@@ -8300,6 -8363,7 +8363,7 @@@ } }
+ #ifdef CONFIG_PCI_IOV /* prep for VF support */ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && @@@ -8322,6 -8386,7 +8386,7 @@@ err); } } + #endif /* CONFIG_PCI_IOV */
pfs_found++;
@@@ -8332,6 -8397,7 +8397,7 @@@ dv.minor_version = DRV_VERSION_MINOR; dv.build_version = DRV_VERSION_BUILD; dv.subbuild_version = 0; + strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
/* since everything's happy, start the service_task timer */ @@@ -8373,9 -8439,6 +8439,6 @@@ err_vsis err_switch_setup: i40e_reset_interrupt_capability(pf); del_timer_sync(&pf->service_timer); - #ifdef CONFIG_I40E_DCB - err_init_dcb: - #endif /* CONFIG_I40E_DCB */ err_mac_addr: err_configure_lan_hmc: (void)i40e_shutdown_lan_hmc(hw); diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c047c3e,ea11e2c..86da067 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -301,7 -301,7 +301,7 @@@ static void ixgbe_remove_adapter(struc ixgbe_service_event_schedule(adapter); }
- void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) + static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) { u32 value;
@@@ -320,6 -320,32 +320,32 @@@ ixgbe_remove_adapter(hw); }
+ /** + * ixgbe_read_reg - Read from device register + * @hw: hw specific details + * @reg: offset of register to read + * + * Returns : value read or IXGBE_FAILED_READ_REG if removed + * + * This function is used to read device registers. It checks for device + * removal by confirming any read that returns all ones by checking the + * status register value for all ones. This function avoids reading from + * the hardware if a removal was previously detected in which case it + * returns IXGBE_FAILED_READ_REG (all ones). + */ + u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) + { + u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u32 value; + + if (ixgbe_removed(reg_addr)) + return IXGBE_FAILED_READ_REG; + value = readl(reg_addr + reg); + if (unlikely(value == IXGBE_FAILED_READ_REG)) + ixgbe_check_remove(hw, reg); + return value; + } + static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) { u16 value; @@@ -376,7 -402,7 +402,7 @@@ static void ixgbe_service_event_complet BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
/* flush memory to make sure state is correct before next watchdog */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); }
@@@ -3743,35 -3769,6 +3769,6 @@@ static int ixgbe_vlan_rx_kill_vid(struc }
/** - * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering - * @adapter: driver data - */ - static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter) - { - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl; - - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - } - - /** - * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering - * @adapter: driver data - */ - static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter) - { - struct ixgbe_hw *hw = &adapter->hw; - u32 vlnctrl; - - vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlnctrl |= IXGBE_VLNCTRL_VFE; - vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); - } - - /** * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping * @adapter: driver data */ @@@ -3850,6 -3847,158 +3847,158 @@@ static void ixgbe_restore_vlan(struct i }
/** + * ixgbe_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + **/ + static int ixgbe_write_mc_addr_list(struct net_device *netdev) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + + if (!netif_running(netdev)) + return 0; + + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, netdev); + else + return -ENOMEM; + + #ifdef CONFIG_PCI_IOV + ixgbe_restore_vf_multicasts(adapter); + #endif + + return netdev_mc_count(netdev); + } + + #ifdef CONFIG_PCI_IOV + void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr, + adapter->mac_table[i].queue, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); + + adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED); + } + } + #endif + + static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + int i; + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) { + if (adapter->mac_table[i].state & + IXGBE_MAC_STATE_IN_USE) + hw->mac.ops.set_rar(hw, i, + adapter->mac_table[i].addr, + adapter->mac_table[i].queue, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); + + adapter->mac_table[i].state &= + ~(IXGBE_MAC_STATE_MODIFIED); + } + } + } + + static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) + { + int i; + struct ixgbe_hw *hw = &adapter->hw; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].queue = 0; + } + ixgbe_sync_mac_table(adapter); + } + + static int ixgbe_available_rars(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + int i, count = 0; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state == 0) + count++; + } + return count; + } + + /* this function destroys the first RAR entry */ + static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter, + u8 *addr) + { + struct ixgbe_hw *hw = &adapter->hw; + + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); + adapter->mac_table[0].queue = VMDQ_P(0); + adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | + IXGBE_MAC_STATE_IN_USE); + hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, + adapter->mac_table[0].queue, + IXGBE_RAH_AV); + } + + int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) + { + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) + continue; + adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED | + IXGBE_MAC_STATE_IN_USE); + ether_addr_copy(adapter->mac_table[i].addr, addr); + adapter->mac_table[i].queue = queue; + ixgbe_sync_mac_table(adapter); + return i; + } + return -ENOMEM; + } + + int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) + { + /* search table for addr, if found, set to 0 and sync */ + int i; + struct ixgbe_hw *hw = &adapter->hw; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + + for (i = 0; i < hw->mac.num_rar_entries; i++) { + if (ether_addr_equal(addr, adapter->mac_table[i].addr) && + adapter->mac_table[i].queue == queue) { + adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; + adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + adapter->mac_table[i].queue = 0; + ixgbe_sync_mac_table(adapter); + return 0; + } + } + return -ENOMEM; + } + /** * ixgbe_write_uc_addr_list - write unicast addresses to RAR table * @netdev: network interface device structure * @@@ -3858,39 -4007,23 +4007,23 @@@ * 0 on no addresses written * X on writing X addresses to the RAR table **/ - static int ixgbe_write_uc_addr_list(struct net_device *netdev) + static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - unsigned int rar_entries = hw->mac.num_rar_entries - 1; int count = 0;
- /* In SR-IOV/VMDQ modes significantly less RAR entries are available */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - rar_entries = IXGBE_MAX_PF_MACVLANS - 1; - /* return ENOMEM indicating insufficient memory for addresses */ - if (netdev_uc_count(netdev) > rar_entries) + if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter)) return -ENOMEM;
if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; - /* return error if we do not support writing to RAR table */ - if (!hw->mac.ops.set_rar) - return -ENOMEM; - netdev_for_each_uc_addr(ha, netdev) { - if (!rar_entries) - break; - hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, - VMDQ_P(0), IXGBE_RAH_AV); + ixgbe_del_mac_filter(adapter, ha->addr, vfn); + ixgbe_add_mac_filter(adapter, ha->addr, vfn); count++; } } - /* write the addresses in reverse order to avoid write combining */ - for (; rar_entries > 0 ; rar_entries--) - hw->mac.ops.clear_rar(hw, rar_entries); - return count; }
@@@ -3908,11 -4041,12 +4041,12 @@@ void ixgbe_set_rx_mode(struct net_devic struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + u32 vlnctrl; int count;
/* Check for Promiscuous and All Multicast modes */ - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
/* set all bits that we expect to always be set */ fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ @@@ -3922,26 -4056,24 +4056,24 @@@
/* clear the bits we are changing the status of */ fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - + vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); if (netdev->flags & IFF_PROMISC) { hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); - vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); + vmolr |= IXGBE_VMOLR_MPE; /* Only disable hardware filter vlans in promiscuous mode * if SR-IOV and VMDQ are disabled - otherwise ensure * that hardware VLAN filters remain enabled. */ if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED))) - ixgbe_vlan_filter_disable(adapter); - else - ixgbe_vlan_filter_enable(adapter); + vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } - ixgbe_vlan_filter_enable(adapter); + vlnctrl |= IXGBE_VLNCTRL_VFE; hw->addr_ctrl.user_set_promisc = false; }
@@@ -3950,7 -4082,7 +4082,7 @@@ * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = ixgbe_write_uc_addr_list(netdev); + count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0)); if (count < 0) { fctrl |= IXGBE_FCTRL_UPE; vmolr |= IXGBE_VMOLR_ROPE; @@@ -3960,11 -4092,13 +4092,13 @@@ * then we should just turn on promiscuous mode so * that we can at least receive multicast traffic */ - hw->mac.ops.update_mc_addr_list(hw, netdev); - vmolr |= IXGBE_VMOLR_ROMPE; - - if (adapter->num_vfs) - ixgbe_restore_vf_multicasts(adapter); + count = ixgbe_write_mc_addr_list(netdev); + if (count < 0) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } else if (count) { + vmolr |= IXGBE_VMOLR_ROMPE; + }
if (hw->mac.type != ixgbe_mac_82598EB) { vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & @@@ -3985,6 -4119,7 +4119,7 @@@ /* NOTE: VLAN filtering is disabled by setting PROMISC */ }
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) @@@ -4101,8 -4236,8 +4236,8 @@@ static int ixgbe_hpbthresh(struct ixgbe (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && (pb == ixgbe_fcoe_get_tc(adapter))) tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; - #endif + /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: @@@ -4143,7 -4278,7 +4278,7 @@@ * @adapter: board private structure to calculate for * @pb: packet buffer to calculate */ - static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) + static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) { struct ixgbe_hw *hw = &adapter->hw; struct net_device *dev = adapter->netdev; @@@ -4153,6 -4288,14 +4288,14 @@@ /* Calculate max LAN frame size */ tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ #ifdef IXGBE_FCOE + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; + #endif + /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: @@@ -4179,15 -4322,17 +4322,17 @@@ static void ixgbe_pbthresh_setup(struc if (!num_tc) num_tc = 1;
- hw->fc.low_water = ixgbe_lpbthresh(adapter); - for (i = 0; i < num_tc; i++) { hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); + hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
/* Low water marks must not be larger than high water marks */ - if (hw->fc.low_water > hw->fc.high_water[i]) - hw->fc.low_water = 0; + if (hw->fc.low_water[i] > hw->fc.high_water[i]) + hw->fc.low_water[i] = 0; } + + for (; i < MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = 0; }
static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) @@@ -4249,20 -4394,10 +4394,10 @@@ static void ixgbe_macvlan_set_rx_mode(s vmolr |= IXGBE_VMOLR_ROMPE; hw->mac.ops.update_mc_addr_list(hw, dev); } - ixgbe_write_uc_addr_list(adapter->netdev); + ixgbe_write_uc_addr_list(adapter->netdev, pool); IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); }
- static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, - u8 *addr, u16 pool) - { - struct ixgbe_hw *hw = &adapter->hw; - unsigned int entry; - - entry = hw->mac.num_rar_entries - pool; - hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV); - } - static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) { struct ixgbe_adapter *adapter = vadapter->real_adapter; @@@ -4672,7 -4807,7 +4807,7 @@@ static void ixgbe_up_complete(struct ix if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw);
- smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter);
@@@ -4742,7 -4877,9 +4877,9 @@@ void ixgbe_up(struct ixgbe_adapter *ada void ixgbe_reset(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; int err; + u8 old_addr[ETH_ALEN];
if (ixgbe_removed(hw->hw_addr)) return; @@@ -4778,9 -4915,10 +4915,10 @@@ }
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); - - /* reprogram the RAR[0] in case user changed it. */ - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); + /* do not flush user set addresses */ + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); + ixgbe_flush_sw_mac_table(adapter); + ixgbe_mac_set_default_filter(adapter, old_addr);
/* update SAN MAC vmdq pool selection */ if (hw->mac.san_mac_rar_index) @@@ -5026,6 -5164,10 +5164,10 @@@ static int ixgbe_sw_init(struct ixgbe_a #endif /* CONFIG_IXGBE_DCB */ #endif /* IXGBE_FCOE */
+ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * + hw->mac.num_rar_entries, + GFP_ATOMIC); + /* Set MAC specific capability flags and exceptions */ switch (hw->mac.type) { case ixgbe_mac_82598EB: @@@ -5517,6 -5659,17 +5659,17 @@@ err_setup_tx return err; }
+ static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) + { + ixgbe_ptp_suspend(adapter); + + ixgbe_down(adapter); + ixgbe_free_irq(adapter); + + ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); + } + /** * ixgbe_close - Disables a network interface * @netdev: network interface device structure @@@ -5534,14 -5687,10 +5687,10 @@@ static int ixgbe_close(struct net_devic
ixgbe_ptp_stop(adapter);
- ixgbe_down(adapter); - ixgbe_free_irq(adapter); + ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
- ixgbe_free_all_tx_resources(adapter); - ixgbe_free_all_rx_resources(adapter); - ixgbe_release_hw_control(adapter);
return 0; @@@ -5568,7 -5717,7 +5717,7 @@@ static int ixgbe_resume(struct pci_dev e_dev_err("Cannot enable PCI device from suspend\n"); return err; } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev);
@@@ -5608,12 -5757,8 +5757,8 @@@ static int __ixgbe_shutdown(struct pci_ netif_device_detach(netdev);
rtnl_lock(); - if (netif_running(netdev)) { - ixgbe_down(adapter); - ixgbe_free_irq(adapter); - ixgbe_free_all_tx_resources(adapter); - ixgbe_free_all_rx_resources(adapter); - } + if (netif_running(netdev)) + ixgbe_close_suspend(adapter); rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter); @@@ -5945,7 -6090,7 +6090,7 @@@ static void ixgbe_fdir_reinit_subtask(s if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { for (i = 0; i < adapter->num_tx_queues; i++) set_bit(__IXGBE_TX_FDIR_INIT_DONE, - &(adapter->tx_ring[i]->state)); + &(adapter->tx_ring[i]->state)); /* re-enable flow director interrupts */ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR); } else { @@@ -7172,16 -7317,17 +7317,17 @@@ static int ixgbe_set_mac(struct net_dev struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; struct sockaddr *addr = p; + int ret;
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
+ ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); - - return 0; + ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + return ret > 0 ? 0 : ret; }
static int @@@ -7783,7 -7929,7 +7929,7 @@@ static const struct net_device_ops ixgb .ndo_do_ioctl = ixgbe_ioctl, .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, - .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, + .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, @@@ -8187,6 -8333,8 +8333,8 @@@ skip_sriov goto err_sw_init; }
+ ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); + setup_timer(&adapter->service_timer, &ixgbe_service_timer, (unsigned long) adapter);
@@@ -8242,7 -8390,7 +8390,7 @@@ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, hw->phy.sfp_type, - part_str); + part_str); else e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", hw->mac.type, hw->phy.type, part_str); @@@ -8304,8 -8452,8 +8452,8 @@@
ixgbe_dbg_adapter_init(adapter);
- /* Need link setup for MNG FW, else wait for IXGBE_UP */ - if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link) + /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ + if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); @@@ -8319,6 -8467,7 +8467,7 @@@ err_sw_init ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; iounmap(adapter->io_addr); + kfree(adapter->mac_table); err_ioremap: free_netdev(netdev); err_alloc_etherdev: @@@ -8392,6 -8541,7 +8541,7 @@@ static void ixgbe_remove(struct pci_de
e_dev_info("complete\n");
+ kfree(adapter->mac_table); free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev); @@@ -8542,7 -8692,7 +8692,7 @@@ static pci_ers_result_t ixgbe_io_slot_r e_err(probe, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; } else { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); adapter->hw.hw_addr = adapter->io_addr; pci_set_master(pdev); diff --combined drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index de2793b,eacce3a..75467f8 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@@ -85,7 -85,7 +85,7 @@@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); - MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); + MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
@@@ -1668,7 -1668,7 +1668,7 @@@ static void ixgbevf_up_complete(struct
spin_unlock_bh(&adapter->mbx_lock);
- smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter);
@@@ -3354,7 -3354,7 +3354,7 @@@ static int ixgbevf_resume(struct pci_de dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); return err; } - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev);
@@@ -3712,7 -3712,7 +3712,7 @@@ static pci_ers_result_t ixgbevf_io_slot return PCI_ERS_RESULT_DISCONNECT; }
- smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); pci_set_master(pdev);
diff --combined drivers/net/ethernet/mellanox/mlx4/main.c index c187d74,38e9a4c..19606a4 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@@ -104,8 -104,6 +104,6 @@@ module_param(enable_64b_cqe_eqe, bool, MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
- #define HCA_GLOBAL_CAP_MASK 0 - #define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] = @@@ -134,8 -132,7 +132,7 @@@ MODULE_PARM_DESC(log_num_vlan, "Log2 ma
static bool use_prio; module_param_named(use_prio, use_prio, bool, 0444); - MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " - "(0/1, default 0)"); + MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); @@@ -163,8 -160,7 +160,7 @@@ int mlx4_check_port_params(struct mlx4_ for (i = 0; i < dev->caps.num_ports - 1; i++) { if (port_type[i] != port_type[i + 1]) { if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { - mlx4_err(dev, "Only same port types supported " - "on this HCA, aborting.\n"); + mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); return -EINVAL; } } @@@ -172,8 -168,8 +168,8 @@@
for (i = 0; i < dev->caps.num_ports; i++) { if (!(port_type[i] & dev->caps.supported_type[i+1])) { - mlx4_err(dev, "Requested port type for port %d is not " - "supported on this HCA\n", i + 1); + mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", + i + 1); return -EINVAL; } } @@@ -195,26 -191,23 +191,23 @@@ static int mlx4_dev_cap(struct mlx4_de
err = mlx4_QUERY_DEV_CAP(dev, dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; }
if (dev_cap->min_page_sz > PAGE_SIZE) { - mlx4_err(dev, "HCA minimum page size of %d bigger than " - "kernel PAGE_SIZE of %ld, aborting.\n", + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", dev_cap->min_page_sz, PAGE_SIZE); return -ENODEV; } if (dev_cap->num_ports > MLX4_MAX_PORTS) { - mlx4_err(dev, "HCA has %d ports, but we only support %d, " - "aborting.\n", + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", dev_cap->num_ports, MLX4_MAX_PORTS); return -ENODEV; }
if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { - mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " - "PCI resource 2 size of 0x%llx, aborting.\n", + mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev_cap->uar_size, (unsigned long long) pci_resource_len(dev->pdev, 2)); return -ENODEV; @@@ -296,7 -289,6 +289,6 @@@
dev->caps.log_num_macs = log_num_mac; dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; - dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; @@@ -347,14 -339,12 +339,12 @@@
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { dev->caps.log_num_macs = dev_cap->log_max_macs[i]; - mlx4_warn(dev, "Requested number of MACs is too much " - "for port %d, reducing to %d.\n", + mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_macs); } if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; - mlx4_warn(dev, "Requested number of VLANs is too much " - "for port %d, reducing to %d.\n", + mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_vlans); } } @@@ -366,7 -356,6 +356,6 @@@ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = (1 << dev->caps.log_num_macs) * (1 << dev->caps.log_num_vlans) * - (1 << dev->caps.log_num_prios) * dev->caps.num_ports; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
@@@ -584,13 -573,14 +573,14 @@@ static int mlx4_slave_cap(struct mlx4_d memset(&hca_param, 0, sizeof(hca_param)); err = mlx4_QUERY_HCA(dev, &hca_param); if (err) { - mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); + mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); return err; }
- /*fail if the hca has an unknown capability */ - if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != - HCA_GLOBAL_CAP_MASK) { + /* fail if the hca has an unknown global capability + * at this time global_caps should be always zeroed + */ + if (hca_param.global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); return -ENOSYS; } @@@ -603,19 -593,18 +593,18 @@@ dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; err = mlx4_dev_cap(dev, &dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; }
err = mlx4_QUERY_FW(dev); if (err) - mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); + mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
page_size = ~dev->caps.page_size_cap + 1; mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); if (page_size > PAGE_SIZE) { - mlx4_err(dev, "HCA minimum page size of %d bigger than " - "kernel PAGE_SIZE of %ld, aborting.\n", + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", page_size, PAGE_SIZE); return -ENODEV; } @@@ -633,8 -622,8 +622,8 @@@ memset(&func_cap, 0, sizeof(func_cap)); err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); if (err) { - mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", - err); + mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", + err); return err; }
@@@ -661,8 -650,8 +650,8 @@@ dev->caps.num_amgms = 0;
if (dev->caps.num_ports > MLX4_MAX_PORTS) { - mlx4_err(dev, "HCA has %d ports, but we only support %d, " - "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", + dev->caps.num_ports, MLX4_MAX_PORTS); return -ENODEV; }
@@@ -680,8 -669,8 +669,8 @@@ for (i = 1; i <= dev->caps.num_ports; ++i) { err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); if (err) { - mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" - " port %d, aborting (%d).\n", i, err); + mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", + i, err); goto err_mem; } dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; @@@ -699,8 -688,7 +688,7 @@@ if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > pci_resource_len(dev->pdev, 2)) { - mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " - "PCI resource 2 size of 0x%llx, aborting.\n", + mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev->caps.uar_page_size * dev->caps.num_uars, (unsigned long long) pci_resource_len(dev->pdev, 2)); goto err_mem; @@@ -722,7 -710,7 +710,7 @@@ }
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; - mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); + mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
@@@ -784,8 -772,8 +772,8 @@@ int mlx4_change_port_types(struct mlx4_ dev->caps.port_type[port] = port_types[port - 1]; err = mlx4_SET_PORT(dev, port, -1); if (err) { - mlx4_err(dev, "Failed to set port %d, " - "aborting\n", port); + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); goto out; } } @@@ -868,9 -856,7 +856,7 @@@ static ssize_t set_port_type(struct dev } } if (err) { - mlx4_err(mdev, "Auto sensing is not supported on this HCA. " - "Set only 'eth' or 'ib' for both ports " - "(should be the same)\n"); + mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); goto out; }
@@@ -975,8 -961,8 +961,8 @@@ static ssize_t set_port_ib_mtu(struct d mlx4_CLOSE_PORT(mdev, port); err = mlx4_SET_PORT(mdev, port, -1); if (err) { - mlx4_err(mdev, "Failed to set port %d, " - "aborting\n", port); + mlx4_err(mdev, "Failed to set port %d, aborting\n", + port); goto err_set_port; } } @@@ -995,19 -981,19 +981,19 @@@ static int mlx4_load_fw(struct mlx4_de priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.fw_icm) { - mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); + mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); return -ENOMEM; }
err = mlx4_MAP_FA(dev, priv->fw.fw_icm); if (err) { - mlx4_err(dev, "MAP_FA command failed, aborting.\n"); + mlx4_err(dev, "MAP_FA command failed, aborting\n"); goto err_free; }
err = mlx4_RUN_FW(dev); if (err) { - mlx4_err(dev, "RUN_FW command failed, aborting.\n"); + mlx4_err(dev, "RUN_FW command failed, aborting\n"); goto err_unmap_fa; }
@@@ -1091,30 -1077,30 +1077,30 @@@ static int mlx4_init_icm(struct mlx4_de
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); if (err) { - mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); + mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); return err; }
- mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", + mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", (unsigned long long) icm_size >> 10, (unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.aux_icm) { - mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); + mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); return -ENOMEM; }
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); if (err) { - mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); + mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); goto err_free_aux; }
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); if (err) { - mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); goto err_unmap_aux; }
@@@ -1125,7 -1111,7 +1111,7 @@@ init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); goto err_unmap_cmpt; }
@@@ -1146,7 -1132,7 +1132,7 @@@ dev->caps.num_mtts, dev->caps.reserved_mtts, 1, 0); if (err) { - mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); goto err_unmap_eq; }
@@@ -1156,7 -1142,7 +1142,7 @@@ dev->caps.num_mpts, dev->caps.reserved_mrws, 1, 1); if (err) { - mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); goto err_unmap_mtt; }
@@@ -1167,7 -1153,7 +1153,7 @@@ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); + mlx4_err(dev, "Failed to map QP context memory, aborting\n"); goto err_unmap_dmpt; }
@@@ -1178,7 -1164,7 +1164,7 @@@ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); + mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); goto err_unmap_qp; }
@@@ -1189,7 -1175,7 +1175,7 @@@ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); + mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); goto err_unmap_auxc; }
@@@ -1210,7 -1196,7 +1196,7 @@@ dev->caps.num_cqs, dev->caps.reserved_cqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); goto err_unmap_rdmarc; }
@@@ -1220,7 -1206,7 +1206,7 @@@ dev->caps.num_srqs, dev->caps.reserved_srqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); goto err_unmap_cq; }
@@@ -1238,7 -1224,7 +1224,7 @@@ dev->caps.num_mgms + dev->caps.num_amgms, 0, 0); if (err) { - mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); + mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); goto err_unmap_srq; }
@@@ -1315,7 -1301,7 +1301,7 @@@ static void mlx4_slave_exit(struct mlx4
mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) - mlx4_warn(dev, "Failed to close slave function.\n"); + mlx4_warn(dev, "Failed to close slave function\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); }
@@@ -1413,7 -1399,7 +1399,7 @@@ static int mlx4_init_slave(struct mlx4_ u32 cmd_channel_ver;
if (atomic_read(&pf_loading)) { - mlx4_warn(dev, "PF is not ready. Deferring probe\n"); + mlx4_warn(dev, "PF is not ready - Deferring probe\n"); return -EPROBE_DEFER; }
@@@ -1426,8 -1412,7 +1412,7 @@@ * NUM_OF_RESET_RETRIES times before leaving.*/ if (ret_from_reset) { if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { - mlx4_warn(dev, "slave is currently in the " - "middle of FLR. Deferring probe.\n"); + mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); return -EPROBE_DEFER; } else @@@ -1441,8 -1426,7 +1426,7 @@@
if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != MLX4_COMM_GET_IF_REV(slave_read)) { - mlx4_err(dev, "slave driver version is not supported" - " by the master\n"); + mlx4_err(dev, "slave driver version is not supported by the master\n"); goto err; }
@@@ -1520,8 -1504,7 +1504,7 @@@ static void choose_steering_mode(struc
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) - mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " - "set to use B0 steering. Falling back to A0 steering mode.\n"); + mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); } dev->oper_log_mgm_entry_size = mlx4_log_num_mgm_entry_size > 0 ? @@@ -1529,8 -1512,7 +1512,7 @@@ MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); } - mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " - "modparam log_num_mgm_entry_size = %d\n", + mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", mlx4_steering_mode_str(dev->caps.steering_mode), dev->oper_log_mgm_entry_size, mlx4_log_num_mgm_entry_size); @@@ -1564,15 -1546,15 +1546,15 @@@ static int mlx4_init_hca(struct mlx4_de err = mlx4_QUERY_FW(dev); if (err) { if (err == -EACCES) - mlx4_info(dev, "non-primary physical function, skipping.\n"); + mlx4_info(dev, "non-primary physical function, skipping\n"); else - mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); + mlx4_err(dev, "QUERY_FW command failed, aborting\n"); return err; }
err = mlx4_load_fw(dev); if (err) { - mlx4_err(dev, "Failed to start FW, aborting.\n"); + mlx4_err(dev, "Failed to start FW, aborting\n"); return err; }
@@@ -1584,7 -1566,7 +1566,7 @@@
err = mlx4_dev_cap(dev, &dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); goto err_stop_fw; }
@@@ -1625,7 -1607,7 +1607,7 @@@
err = mlx4_INIT_HCA(dev, &init_hca); if (err) { - mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); + mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; } /* @@@ -1636,7 -1618,7 +1618,7 @@@ memset(&init_hca, 0, sizeof(init_hca)); err = mlx4_QUERY_HCA(dev, &init_hca); if (err) { - mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); + mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; } else { dev->caps.hca_core_clock = @@@ -1649,14 -1631,14 +1631,14 @@@ if (!dev->caps.hca_core_clock) { dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_err(dev, - "HCA frequency is 0. Timestamping is not supported."); + "HCA frequency is 0 - timestamping is not supported\n"); } else if (map_internal_clock(dev)) { /* * Map internal clock, * in case of failure disable timestamping */ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; - mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); + mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); } } } else { @@@ -1683,7 -1665,7 +1665,7 @@@
err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { - mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); + mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); goto unmap_bf; }
@@@ -1793,79 -1775,69 +1775,69 @@@ static int mlx4_setup_hca(struct mlx4_d
err = mlx4_init_uar_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "user access region table, aborting.\n"); - return err; + mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); + return err; }
err = mlx4_uar_alloc(dev, &priv->driver_uar); if (err) { - mlx4_err(dev, "Failed to allocate driver access region, " - "aborting.\n"); + mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); goto err_uar_table_free; }
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!priv->kar) { - mlx4_err(dev, "Couldn't map kernel access region, " - "aborting.\n"); + mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); err = -ENOMEM; goto err_uar_free; }
err = mlx4_init_pd_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "protection domain table, aborting.\n"); + mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); goto err_kar_unmap; }
err = mlx4_init_xrcd_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "reliable connection domain table, aborting.\n"); + mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); goto err_pd_table_free; }
err = mlx4_init_mr_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "memory region table, aborting.\n"); + mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); goto err_xrcd_table_free; }
if (!mlx4_is_slave(dev)) { err = mlx4_init_mcg_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); + mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); goto err_mr_table_free; } }
err = mlx4_init_eq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "event queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); goto err_mcg_table_free; }
err = mlx4_cmd_use_events(dev); if (err) { - mlx4_err(dev, "Failed to switch to event-driven " - "firmware commands, aborting.\n"); + mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); goto err_eq_table_free; }
err = mlx4_NOP(dev); if (err) { if (dev->flags & MLX4_FLAG_MSI_X) { - mlx4_warn(dev, "NOP command failed to generate MSI-X " - "interrupt IRQ %d).\n", + mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", priv->eq_table.eq[dev->caps.num_comp_vectors].irq); - mlx4_warn(dev, "Trying again without MSI-X.\n"); + mlx4_warn(dev, "Trying again without MSI-X\n"); } else { - mlx4_err(dev, "NOP command failed to generate interrupt " - "(IRQ %d), aborting.\n", + mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", priv->eq_table.eq[dev->caps.num_comp_vectors].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } @@@ -1877,28 -1849,25 +1849,25 @@@
err = mlx4_init_cq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "completion queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); goto err_cmd_poll; }
err = mlx4_init_srq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "shared receive queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); goto err_cq_table_free; }
err = mlx4_init_qp_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "queue pair table, aborting.\n"); + mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); goto err_srq_table_free; }
err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { - mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); + mlx4_err(dev, "Failed to initialize counters table, aborting\n"); goto err_qp_table_free; }
@@@ -1908,9 -1877,8 +1877,8 @@@ err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); if (err) - mlx4_warn(dev, "failed to get port %d default " - "ib capabilities (%d). Continuing " - "with caps = 0\n", port, err); + mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", + port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
/* initialize per-slave default ib port capabilities */ @@@ -1920,7 -1888,7 +1888,7 @@@ if (i == mlx4_master_func_num(dev)) continue; priv->mfunc.master.slave_state[i].ib_cap_mask[port] = - ib_port_default_caps; + ib_port_default_caps; } }
@@@ -1933,7 -1901,7 +1901,7 @@@ dev->caps.pkey_table_len[port] : -1); if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", - port); + port); goto err_counters_table_free; } } @@@ -2009,7 -1977,7 +1977,7 @@@ static void mlx4_enable_msi_x(struct ml kfree(entries); goto no_msi; } else if (nreq < MSIX_LEGACY_SZ + - dev->caps.num_ports * MIN_MSIX_P_PORT) { + dev->caps.num_ports * MIN_MSIX_P_PORT) { /*Working in legacy mode , all EQ's shared*/ dev->caps.comp_pool = 0; dev->caps.num_comp_vectors = nreq - 1; @@@ -2044,7 -2012,6 +2012,7 @@@ static int mlx4_init_port_info(struct m if (!mlx4_is_slave(dev)) { mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_vlan_table(dev, &info->vlan_table); + mlx4_init_roce_gid_table(dev, &info->gid_table); info->base_qpn = mlx4_get_base_qpn(dev, port); }
@@@ -2210,8 -2177,7 +2178,7 @@@ static int __mlx4_init_one(struct pci_d
err = pci_enable_device(pdev); if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, " - "aborting.\n"); + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; }
@@@ -2258,14 -2224,13 +2225,13 @@@ */ if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing DCS, aborting." - "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", + dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", pci_dev_data, pci_resource_flags(pdev, 0)); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing UAR, aborting.\n"); + dev_err(&pdev->dev, "Missing UAR, aborting\n"); err = -ENODEV; goto err_disable_pdev; } @@@ -2280,21 -2245,19 +2246,19 @@@
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); goto err_release_regions; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " - "consistent PCI DMA mask.\n"); + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " - "aborting.\n"); + dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); goto err_release_regions; } } @@@ -2325,7 -2288,7 +2289,7 @@@ if (total_vfs) { unsigned vfs_offset = 0; for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && - vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset + nvfs[i] < extended_func_num(pdev); vfs_offset += nvfs[i], i++) ; if (i == sizeof(nvfs)/sizeof(nvfs[0])) { @@@ -2351,8 -2314,7 +2315,7 @@@ if (err < 0) goto err_free_dev; else { - mlx4_warn(dev, "Multiple PFs not yet supported." - " Skipping PF.\n"); + mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); err = -EINVAL; goto err_free_dev; } @@@ -2362,8 -2324,8 +2325,8 @@@ mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); dev->dev_vfs = kzalloc( - total_vfs * sizeof(*dev->dev_vfs), - GFP_KERNEL); + total_vfs * sizeof(*dev->dev_vfs), + GFP_KERNEL); if (NULL == dev->dev_vfs) { mlx4_err(dev, "Failed to allocate memory for VFs\n"); err = 0; @@@ -2371,14 -2333,14 +2334,14 @@@ atomic_inc(&pf_loading); err = pci_enable_sriov(pdev, total_vfs); if (err) { - mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", err); atomic_dec(&pf_loading); err = 0; } else { mlx4_warn(dev, "Running in master mode\n"); dev->flags |= MLX4_FLAG_SRIOV | - MLX4_FLAG_MASTER; + MLX4_FLAG_MASTER; dev->num_vfs = total_vfs; sriov_initialized = 1; } @@@ -2395,7 -2357,7 +2358,7 @@@ */ err = mlx4_reset(dev); if (err) { - mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + mlx4_err(dev, "Failed to reset HCA, aborting\n"); goto err_rel_own; } } @@@ -2403,7 -2365,7 +2366,7 @@@ slave_start: err = mlx4_cmd_init(dev); if (err) { - mlx4_err(dev, "Failed to init command interface, aborting.\n"); + mlx4_err(dev, "Failed to init command interface, aborting\n"); goto err_sriov; }
@@@ -2417,8 -2379,7 +2380,7 @@@ dev->num_slaves = 0; err = mlx4_multi_func_init(dev); if (err) { - mlx4_err(dev, "Failed to init slave mfunc" - " interface, aborting.\n"); + mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); goto err_cmd; } } @@@ -2450,8 -2411,7 +2412,7 @@@ unsigned sum = 0; err = mlx4_multi_func_init(dev); if (err) { - mlx4_err(dev, "Failed to init master mfunc" - "interface, aborting.\n"); + mlx4_err(dev, "Failed to init master mfunc interface, aborting\n"); goto err_close; } if (sriov_initialized) { @@@ -2462,10 -2422,7 +2423,7 @@@ if (ib_ports && (num_vfs_argc > 1 || probe_vfs_argc > 1)) { mlx4_err(dev, - "Invalid syntax of num_vfs/probe_vfs " - "with IB port. Single port VFs syntax" - " is only supported when all ports " - "are configured as ethernet\n"); + "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); goto err_close; } for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { @@@ -2491,8 -2448,7 +2449,7 @@@ if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { err = -ENOSYS; - mlx4_err(dev, "INTx is not supported in multi-function mode." - " aborting.\n"); + mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); goto err_free_eq; }
@@@ -2637,7 -2593,7 +2594,7 @@@ static void __mlx4_remove_one(struct pc /* in SRIOV it is not allowed to unload the pf's * driver while there are alive vf's */ if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) - printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); + pr_warn("Removing PF when there are assigned VF's !!!\n"); mlx4_stop_sense(dev); mlx4_unregister_device(dev);
@@@ -2808,33 -2764,36 +2765,36 @@@ static struct pci_driver mlx4_driver = static int __init mlx4_verify_params(void) { if ((log_num_mac < 0) || (log_num_mac > 7)) { - pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); + pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); return -1; }
if (log_num_vlan != 0) - pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", - MLX4_LOG_NUM_VLANS); + pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", + MLX4_LOG_NUM_VLANS); + + if (use_prio != 0) + pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { - pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); + pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", + log_mtts_per_seg); return -1; }
/* Check if module param for ports type has legal combination */ if (port_type_array[0] == false && port_type_array[1] == true) { - printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); + pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); port_type_array[0] = true; }
if (mlx4_log_num_mgm_entry_size != -1 && (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { - pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " - "in legal range (-1 or %d..%d)\n", - mlx4_log_num_mgm_entry_size, - MLX4_MIN_MGM_LOG_ENTRY_SIZE, - MLX4_MAX_MGM_LOG_ENTRY_SIZE); + pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n", + mlx4_log_num_mgm_entry_size, + MLX4_MIN_MGM_LOG_ENTRY_SIZE, + MLX4_MAX_MGM_LOG_ENTRY_SIZE); return -1; }
diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4.h index 8e9eb02,9dd1b30..4b416ed --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@@ -216,18 -216,19 +216,19 @@@ extern int mlx4_debug_level #define mlx4_debug_level (0) #endif /* CONFIG_MLX4_DEBUG */
- #define mlx4_dbg(mdev, format, arg...) \ + #define mlx4_dbg(mdev, format, ...) \ do { \ if (mlx4_debug_level) \ - dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ + dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ + ##__VA_ARGS__); \ } while (0)
- #define mlx4_err(mdev, format, arg...) \ - dev_err(&mdev->pdev->dev, format, ##arg) - #define mlx4_info(mdev, format, arg...) \ - dev_info(&mdev->pdev->dev, format, ##arg) - #define mlx4_warn(mdev, format, arg...) \ - dev_warn(&mdev->pdev->dev, format, ##arg) + #define mlx4_err(mdev, format, ...) \ + dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + #define mlx4_info(mdev, format, ...) \ + dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + #define mlx4_warn(mdev, format, ...) \ + dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size; extern int log_mtts_per_seg; @@@ -695,17 -696,6 +696,17 @@@ struct mlx4_mac_table int max; };
+#define MLX4_ROCE_GID_ENTRY_SIZE 16 + +struct mlx4_roce_gid_entry { + u8 raw[MLX4_ROCE_GID_ENTRY_SIZE]; +}; + +struct mlx4_roce_gid_table { + struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS]; + struct mutex mutex; +}; + #define MLX4_MAX_VLAN_NUM 128 #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
@@@ -769,7 -759,6 +770,7 @@@ struct mlx4_port_info struct device_attribute port_mtu_attr; struct mlx4_mac_table mac_table; struct mlx4_vlan_table vlan_table; + struct mlx4_roce_gid_table gid_table; int base_qpn; };
@@@ -800,6 -789,10 +801,6 @@@ enum MLX4_USE_RR = 1, };
-struct mlx4_roce_gid_entry { - u8 raw[16]; -}; - struct mlx4_priv { struct mlx4_dev dev;
@@@ -847,6 -840,7 +848,6 @@@ int fs_hash_mode; u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; __be64 slave_node_guids[MLX4_MFUNC_MAX]; - struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
atomic_t opreq_count; struct work_struct opreq_task; @@@ -1147,8 -1141,6 +1148,8 @@@ int mlx4_change_port_types(struct mlx4_
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); +void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table); void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
@@@ -1158,7 -1150,6 +1159,7 @@@ int mlx4_get_slave_from_resource_id(str enum mlx4_resource resource_type, u64 resource_id, int *slave); void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave); int mlx4_init_resource_tracker(struct mlx4_dev *dev);
void mlx4_free_resource_tracker(struct mlx4_dev *dev, diff --combined drivers/net/ethernet/mellanox/mlx4/port.c index 5ec6f20,376f2f1..7ab9717 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@@ -75,16 -75,6 +75,16 @@@ void mlx4_init_vlan_table(struct mlx4_d table->total = 0; }
+void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) + memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE); +} + static int validate_index(struct mlx4_dev *dev, struct mlx4_mac_table *table, int index) { @@@ -254,8 -244,8 +254,8 @@@ void __mlx4_unregister_mac(struct mlx4_ if (validate_index(dev, table, index)) goto out; if (--table->refs[index]) { - mlx4_dbg(dev, "Have more references for index %d," - "no need to modify mac table\n", index); + mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", + index); goto out; }
@@@ -453,9 -443,8 +453,8 @@@ void __mlx4_unregister_vlan(struct mlx4 }
if (--table->refs[index]) { - mlx4_dbg(dev, "Have %d more references for index %d," - "no need to modify vlan table\n", table->refs[index], - index); + mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", + table->refs[index], index); goto out; } table->entries[index] = 0; @@@ -594,84 -583,6 +593,84 @@@ int mlx4_get_base_gid_ix(struct mlx4_de } EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
+static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, + int port, struct mlx4_cmd_mailbox *mailbox) +{ + struct mlx4_roce_gid_entry *gid_entry_mbox; + struct mlx4_priv *priv = mlx4_priv(dev); + int num_gids, base, offset; + int i, err; + + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + + mutex_lock(&(priv->port[port].gid_table.mutex)); + /* Zero-out gids belonging to that slave in the port GID table */ + for (i = 0, offset = base; i < num_gids; offset++, i++) + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE); + + /* Now, copy roce port gids table to mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf; + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, mailbox->dma, + ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; +} + + +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + struct mlx4_cmd_mailbox *mailbox; + int num_eth_ports, err; + int i; + + if (slave < 0 || slave > dev->num_vfs) + return; + + actv_ports = mlx4_get_active_ports(dev, slave); + + for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + num_eth_ports++; + } + } + + if (!num_eth_ports) + return; + + /* have ETH ports. Alloc mailbox for SET_PORT command */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return; + + for (i = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox); + if (err) + mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n", + slave, i + 1, err); + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return; +} + static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { @@@ -780,12 -691,10 +779,12 @@@ /* 2. Check that do not have duplicates in OTHER * entries in the port GID table */ + + mutex_lock(&(priv->port[port].gid_table.mutex)); for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { if (i >= base && i < base + num_gids) continue; /* don't compare to slave's current gids */ - gid_entry_tbl = &priv->roce_gids[port - 1][i]; + gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i]; if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) continue; gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); @@@ -796,10 -705,8 +795,9 @@@ if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, sizeof(gid_entry_tbl->raw))) { /* found duplicate */ - mlx4_warn(dev, "requested gid entry for slave:%d " - "is a duplicate of gid at index %d\n", + mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n", slave, i); + mutex_unlock(&(priv->port[port].gid_table.mutex)); return -EINVAL; } } @@@ -808,24 -715,16 +806,24 @@@ /* insert slave GIDs with memcpy, starting at slave's base index */ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) - memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16); + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
/* Now, copy roce port gids table to current mailbox for passing to FW */ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) - memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16); - - break; + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; } - return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, + + return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } @@@ -1198,8 -1097,7 +1196,8 @@@ int mlx4_get_slave_from_roce_gid(struc num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { - if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { + if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, + MLX4_ROCE_GID_ENTRY_SIZE)) { found_ix = i; break; } @@@ -1287,8 -1185,7 +1285,8 @@@ int mlx4_get_roce_gid_from_slave(struc if (!mlx4_is_master(dev)) return -EINVAL;
- memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16); + memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw, + MLX4_ROCE_GID_ENTRY_SIZE); return 0; } EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); diff --combined drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index f16e539,dd821b3..b6cddef --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@@ -586,7 -586,6 +586,7 @@@ void mlx4_free_resource_tracker(struct } /* free master's vlans */ i = dev->caps.function; + mlx4_reset_roce_gids(dev, i); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); rem_slave_vlans(dev, i); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); @@@ -963,7 -962,7 +963,7 @@@ static struct res_common *alloc_tr(u64 ret = alloc_srq_tr(id); break; case RES_MAC: - printk(KERN_ERR "implementation missing\n"); + pr_err("implementation missing\n"); return NULL; case RES_COUNTER: ret = alloc_counter_tr(id); @@@ -1057,10 -1056,10 +1057,10 @@@ static int remove_mtt_ok(struct res_mt { if (res->com.state == RES_MTT_BUSY || atomic_read(&res->ref_count)) { - printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", - __func__, __LINE__, - mtt_states_str(res->com.state), - atomic_read(&res->ref_count)); + pr_devel("%s-%d: state %s, ref_count %d\n", + __func__, __LINE__, + mtt_states_str(res->com.state), + atomic_read(&res->ref_count)); return -EBUSY; } else if (res->com.state != RES_MTT_ALLOCATED) return -EPERM; @@@ -3881,7 -3880,7 +3881,7 @@@ static int add_eth_header(struct mlx4_d } } if (!be_mac) { - pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", + pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", port); return -EINVAL; } @@@ -3978,7 -3977,7 +3978,7 @@@ int mlx4_QP_FLOW_STEERING_ATTACH_wrappe qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { - pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); + pr_err("Steering rule with qpn 0x%x rejected\n", qpn); return err; } rule_header = (struct _rule_hw *)(ctrl + 1); @@@ -3996,7 -3995,7 +3996,7 @@@ case MLX4_NET_TRANS_RULE_ID_IPV4: case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: - pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); + pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); if (add_eth_header(dev, slave, inbox, rlist, header_id)) { err = -EINVAL; goto err_put; @@@ -4005,7 -4004,7 +4005,7 @@@ sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; break; default: - pr_err("Corrupted mailbox.\n"); + pr_err("Corrupted mailbox\n"); err = -EINVAL; goto err_put; } @@@ -4019,7 -4018,7 +4019,7 @@@
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { - mlx4_err(dev, "Fail to add flow steering resources.\n "); + mlx4_err(dev, "Fail to add flow steering resources\n"); /* detach rule*/ mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, @@@ -4057,7 -4056,7 +4057,7 @@@ int mlx4_QP_FLOW_STEERING_DETACH_wrappe
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { - mlx4_err(dev, "Fail to remove flow steering resources.\n "); + mlx4_err(dev, "Fail to remove flow steering resources\n"); goto out; }
@@@ -4186,8 -4185,8 +4186,8 @@@ static void rem_slave_qps(struct mlx4_d
err = move_all_busy(dev, slave, RES_QP); if (err) - mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" - "for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { @@@ -4225,10 -4224,8 +4225,8 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_qps: failed" - " to move slave %d qpn %d to" - " reset\n", slave, - qp->local_qpn); + mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", + slave, qp->local_qpn); atomic_dec(&qp->rcq->ref_count); atomic_dec(&qp->scq->ref_count); atomic_dec(&qp->mtt->ref_count); @@@ -4262,8 -4259,8 +4260,8 @@@ static void rem_slave_srqs(struct mlx4_
err = move_all_busy(dev, slave, RES_SRQ); if (err) - mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(srq, tmp, srq_list, com.list) { @@@ -4293,9 -4290,7 +4291,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_srqs: failed" - " to move slave %d srq %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", slave, srqn);
atomic_dec(&srq->mtt->ref_count); @@@ -4330,8 -4325,8 +4326,8 @@@ static void rem_slave_cqs(struct mlx4_d
err = move_all_busy(dev, slave, RES_CQ); if (err) - mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(cq, tmp, cq_list, com.list) { @@@ -4361,9 -4356,7 +4357,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_cqs: failed" - " to move slave %d cq %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", slave, cqn); atomic_dec(&cq->mtt->ref_count); state = RES_CQ_ALLOCATED; @@@ -4395,8 -4388,8 +4389,8 @@@ static void rem_slave_mrs(struct mlx4_d
err = move_all_busy(dev, slave, RES_MPT); if (err) - mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { @@@ -4431,9 -4424,7 +4425,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_mrs: failed" - " to move slave %d mpt %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", slave, mptn); if (mpt->mtt) atomic_dec(&mpt->mtt->ref_count); @@@ -4465,8 -4456,8 +4457,8 @@@ static void rem_slave_mtts(struct mlx4_
err = move_all_busy(dev, slave, RES_MTT); if (err) - mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { @@@ -4568,8 -4559,8 +4560,8 @@@ static void rem_slave_eqs(struct mlx4_d
err = move_all_busy(dev, slave, RES_EQ); if (err) - mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(eq, tmp, eq_list, com.list) { @@@ -4601,9 -4592,8 +4593,8 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_eqs: failed" - " to move slave %d eqs %d to" - " SW ownership\n", slave, eqn); + mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", + slave, eqn); mlx4_free_cmd_mailbox(dev, mailbox); atomic_dec(&eq->mtt->ref_count); state = RES_EQ_RESERVED; @@@ -4632,8 -4622,8 +4623,8 @@@ static void rem_slave_counters(struct m
err = move_all_busy(dev, slave, RES_COUNTER); if (err) - mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(counter, tmp, counter_list, com.list) { @@@ -4663,8 -4653,8 +4654,8 @@@ static void rem_slave_xrcdns(struct mlx
err = move_all_busy(dev, slave, RES_XRCD); if (err) - mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { @@@ -4682,7 -4672,7 +4673,7 @@@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); - + mlx4_reset_roce_gids(dev, slave); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); rem_slave_vlans(dev, slave); rem_slave_macs(dev, slave); @@@ -4809,10 -4799,8 +4800,8 @@@ void mlx4_vf_immed_vlan_work_handler(st 0, MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) { - mlx4_info(dev, "UPDATE_QP failed for slave %d, " - "port %d, qpn %d (%d)\n", - work->slave, port, qp->local_qpn, - err); + mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, err); errors++; } } diff --combined drivers/net/ethernet/ti/davinci_emac.c index abc286d,f32d730..35a139e --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@@ -1567,6 -1567,7 +1567,6 @@@ static int emac_dev_open(struct net_dev while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, res_num))) { for (irq_num = res->start; irq_num <= res->end; irq_num++) { - dev_err(emac_dev, "Request IRQ %d\n", irq_num); if (request_irq(irq_num, emac_irq, 0, ndev->name, ndev)) { dev_err(emac_dev, @@@ -1864,7 -1865,6 +1864,6 @@@ static int davinci_emac_probe(struct pl struct emac_priv *priv; unsigned long hw_ram_addr; struct emac_platform_data *pdata; - struct device *emac_dev; struct cpdma_params dma_params; struct clk *emac_clk; unsigned long emac_bus_frequency; @@@ -1910,7 -1910,6 +1909,6 @@@ priv->coal_intvl = 0; priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
- emac_dev = &ndev->dev; /* Get EMAC platform data */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; @@@ -1929,7 -1928,7 +1927,7 @@@ hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
memset(&dma_params, 0, sizeof(dma_params)); - dma_params.dev = emac_dev; + dma_params.dev = &pdev->dev; dma_params.dmaregs = priv->emac_base; dma_params.rxthresh = priv->emac_base + 0x120; dma_params.rxfree = priv->emac_base + 0x140; @@@ -1979,7 -1978,7 +1977,7 @@@ }
ndev->netdev_ops = &emac_netdev_ops; - SET_ETHTOOL_OPS(ndev, ðtool_ops); + ndev->ethtool_ops = ðtool_ops; netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
/* register the network device */ @@@ -1993,7 -1992,7 +1991,7 @@@
if (netif_msg_probe(priv)) { - dev_notice(emac_dev, "DaVinci EMAC Probe found device "\ + dev_notice(&pdev->dev, "DaVinci EMAC Probe found device " "(regs: %p, irq: %d)\n", (void *)priv->emac_base_phys, ndev->irq); } diff --combined drivers/net/team/team.c index ce4989b,9a9ce8d..b4958c7 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -968,7 -968,7 +968,7 @@@ static void team_port_disable(struct te static void __team_compute_features(struct team *team) { struct team_port *port; - u32 vlan_features = TEAM_VLAN_FEATURES; + u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; unsigned short max_hard_header_len = ETH_HLEN; unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
@@@ -1724,7 -1724,6 +1724,7 @@@ static int team_change_mtu(struct net_d * to traverse list in reverse under rcu_read_lock */ mutex_lock(&team->lock); + team->port_mtu_change_allowed = true; list_for_each_entry(port, &team->port_list, list) { err = dev_set_mtu(port->dev, new_mtu); if (err) { @@@ -1733,7 -1732,6 +1733,7 @@@ goto unwind; } } + team->port_mtu_change_allowed = false; mutex_unlock(&team->lock);
dev->mtu = new_mtu; @@@ -1743,7 -1741,6 +1743,7 @@@ unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) dev_set_mtu(port->dev, dev->mtu); + team->port_mtu_change_allowed = false; mutex_unlock(&team->lock);
return err; @@@ -2854,9 -2851,7 +2854,9 @@@ static int team_device_event(struct not break; case NETDEV_PRECHANGEMTU: /* Forbid to change mtu of underlaying device */ - return NOTIFY_BAD; + if (!port->team->port_mtu_change_allowed) + return NOTIFY_BAD; + break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid to change type of underlaying device */ return NOTIFY_BAD; diff --combined drivers/net/usb/ipheth.c index 973275f,f725707..76465b1 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@@ -59,8 -59,6 +59,8 @@@ #define USB_PRODUCT_IPHONE_3GS 0x1294 #define USB_PRODUCT_IPHONE_4 0x1297 #define USB_PRODUCT_IPAD 0x129a +#define USB_PRODUCT_IPAD_2 0x12a2 +#define USB_PRODUCT_IPAD_3 0x12a6 #define USB_PRODUCT_IPAD_MINI 0x12ab #define USB_PRODUCT_IPHONE_4_VZW 0x129c #define USB_PRODUCT_IPHONE_4S 0x12a0 @@@ -107,14 -105,6 +107,14 @@@ static struct usb_device_id ipheth_tabl { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPAD, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI, @@@ -534,7 -524,7 +534,7 @@@ static int ipheth_probe(struct usb_inte usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(netdev, &intf->dev); - SET_ETHTOOL_OPS(netdev, &ops); + netdev->ethtool_ops = &ops;
retval = register_netdev(netdev); if (retval) { diff --combined drivers/net/wireless/orinoco/orinoco_usb.c index 3ac7133,1cbb783..c90939c --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@@ -100,11 -100,25 +100,11 @@@ static struct ez_usb_fw firmware = .code = NULL, };
-#ifdef CONFIG_USB_DEBUG -static int debug = 1; -#else -static int debug; -#endif - /* Debugging macros */ -#undef dbg -#define dbg(format, arg...) \ - do { if (debug) printk(KERN_DEBUG PFX "%s: " format "\n", \ - __func__ , ## arg); } while (0) #undef err #define err(format, arg...) \ do { printk(KERN_ERR PFX format "\n", ## arg); } while (0)
-/* Module paramaters */ -module_param(debug, int, 0644); -MODULE_PARM_DESC(debug, "Debug enabled or not"); - MODULE_FIRMWARE("orinoco_ezusb_fw");
/* @@@ -327,7 -341,7 +327,7 @@@ static void ezusb_request_timerfn(u_lon ctx->state = EZUSB_CTX_REQ_TIMEOUT; } else { ctx->state = EZUSB_CTX_RESP_TIMEOUT; - dbg("couldn't unlink"); + dev_dbg(&ctx->outurb->dev->dev, "couldn't unlink\n"); atomic_inc(&ctx->refcount); ctx->killed = 1; ezusb_ctx_complete(ctx); @@@ -620,9 -634,9 +620,9 @@@ static void ezusb_request_in_callback(s ctx = c; break; } - dbg("Skipped (0x%x/0x%x) (%d/%d)", - le16_to_cpu(ans->hermes_rid), - c->in_rid, ans->ans_reply_count, reply_count); + netdev_dbg(upriv->dev, "Skipped (0x%x/0x%x) (%d/%d)\n", + le16_to_cpu(ans->hermes_rid), c->in_rid, + ans->ans_reply_count, reply_count); } }
@@@ -754,7 -768,7 +754,7 @@@ static int ezusb_submit_in_urb(struct e void *cur_buf = upriv->read_urb->transfer_buffer;
if (upriv->read_urb->status == -EINPROGRESS) { - dbg("urb busy, not resubmiting"); + netdev_dbg(upriv->dev, "urb busy, not resubmiting\n"); retval = -EBUSY; goto exit; } @@@ -824,9 -838,8 +824,9 @@@ static int ezusb_firmware_download(stru memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE); if (variant_offset >= addr && variant_offset < addr + FW_BUF_SIZE) { - dbg("Patching card_variant byte at 0x%04X", - variant_offset); + netdev_dbg(upriv->dev, + "Patching card_variant byte at 0x%04X\n", + variant_offset); fw_buffer[variant_offset - addr] = FW_VAR_VALUE; } retval = usb_control_msg(upriv->udev, @@@ -866,6 -879,7 +866,6 @@@ static int ezusb_access_ltv(struct ezus BUG_ON(in_irq());
if (!upriv->udev) { - dbg("Device disconnected"); retval = -ENODEV; goto exit; } @@@ -1009,9 -1023,8 +1009,9 @@@ static int ezusb_doicmd_wait(struct her cpu_to_le16(parm1), cpu_to_le16(parm2), }; - dbg("0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X", - cmd, parm0, parm1, parm2); + netdev_dbg(upriv->dev, + "0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X\n", cmd, + parm0, parm1, parm2); ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; @@@ -1032,7 -1045,7 +1032,7 @@@ static int ezusb_docmd_wait(struct herm 0, 0, }; - dbg("0x%04X, parm0 0x%04X", cmd, parm0); + netdev_dbg(upriv->dev, "0x%04X, parm0 0x%04X\n", cmd, parm0); ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; @@@ -1319,7 -1332,7 +1319,7 @@@ static int ezusb_hard_reset(struct orin return retval; }
- dbg("sending control message"); + netdev_dbg(upriv->dev, "sending control message\n"); retval = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_TRIGER, @@@ -1388,8 -1401,10 +1388,8 @@@ static void ezusb_bulk_in_callback(stru u16 crc; u16 hermes_rid;
- if (upriv->udev == NULL) { - dbg("disconnected"); + if (upriv->udev == NULL) return; - }
if (urb->status == -ETIMEDOUT) { /* When a device gets unplugged we get this every time @@@ -1406,13 -1421,12 +1406,13 @@@ if ((urb->status == -EILSEQ) || (urb->status == -ENOENT) || (urb->status == -ECONNRESET)) { - dbg("status %d, not resubmiting", urb->status); + netdev_dbg(upriv->dev, "status %d, not resubmiting\n", + urb->status); return; } if (urb->status) - dbg("status: %d length: %d", - urb->status, urb->actual_length); + netdev_dbg(upriv->dev, "status: %d length: %d\n", + urb->status, urb->actual_length); if (urb->actual_length < sizeof(*ans)) { err("%s: short read, ignoring", __func__); goto resubmit; @@@ -1673,7 -1687,7 +1673,7 @@@ static int ezusb_probe(struct usb_inter firmware.code = fw_entry->data; } if (firmware.size && firmware.code) { - if (ezusb_firmware_download(upriv, &firmware)) + if (ezusb_firmware_download(upriv, &firmware) < 0) goto error; } else { err("No firmware to download"); diff --combined drivers/net/wireless/ti/wlcore/main.c index e71eae3,02c91d6..3d6028e --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@@ -543,7 -543,7 +543,7 @@@ static int wlcore_irq_locked(struct wl1 * wl1271_ps_elp_wakeup cannot be called concurrently. */ clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - smp_mb__after_clear_bit(); + smp_mb__after_atomic();
ret = wlcore_fw_status(wl, wl->fw_status); if (ret < 0) @@@ -1416,7 -1416,7 +1416,7 @@@ void wl1271_rx_filter_free(struct wl12x
int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, u16 offset, u8 flags, - u8 *pattern, u8 len) + const u8 *pattern, u8 len) { struct wl12xx_rx_filter_field *field;
@@@ -5184,7 -5184,8 +5184,8 @@@ out mutex_unlock(&wl->mutex); }
- static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) + static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) { struct wl1271 *wl = hw->priv;
diff --combined drivers/s390/net/qeth_core_main.c index e89f38c,549e9fd..1a884c9 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -20,9 -20,9 +20,10 @@@ #include <linux/kthread.h> #include <linux/slab.h> #include <net/iucv/af_iucv.h> + #include <net/dsfield.h>
#include <asm/ebcdic.h> +#include <asm/chpid.h> #include <asm/io.h> #include <asm/sysinfo.h> #include <asm/compat.h> @@@ -1013,7 -1013,7 +1014,7 @@@ static long __qeth_check_irb_error(stru
card = CARD_FROM_CDEV(cdev);
- if (!IS_ERR(irb)) + if (!card || !IS_ERR(irb)) return 0;
switch (PTR_ERR(irb)) { @@@ -1029,7 -1029,7 +1030,7 @@@ QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); if (intparm == QETH_RCD_PARM) { - if (card && (card->data.ccwdev == cdev)) { + if (card->data.ccwdev == cdev) { card->data.state = CH_STATE_DOWN; wake_up(&card->wait_q); } @@@ -1345,7 -1345,16 +1346,7 @@@ static void qeth_set_multiple_write_que static void qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; - struct channelPath_dsc { - u8 flags; - u8 lsn; - u8 desc; - u8 chpid; - u8 swla; - u8 zeroes; - u8 chla; - u8 chpp; - } *chp_dsc; + struct channel_path_desc *chp_dsc;
QETH_DBF_TEXT(SETUP, 2, "chp_desc");
@@@ -3662,42 -3671,56 +3663,56 @@@ void qeth_qdio_output_handler(struct cc } EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
+ /** + * Note: Function assumes that we have 4 outbound queues. + */ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { - if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX)) - return card->qdio.default_out_queue; - switch (card->qdio.no_out_queues) { - case 4: - if (cast_type && card->info.is_multicast_different) - return card->info.is_multicast_different & - (card->qdio.no_out_queues - 1); - if (card->qdio.do_prio_queueing && (ipv == 4)) { - const u8 tos = ip_hdr(skb)->tos; - - if (card->qdio.do_prio_queueing == - QETH_PRIO_Q_ING_TOS) { - if (tos & IP_TOS_NOTIMPORTANT) - return 3; - if (tos & IP_TOS_HIGHRELIABILITY) - return 2; - if (tos & IP_TOS_HIGHTHROUGHPUT) - return 1; - if (tos & IP_TOS_LOWDELAY) - return 0; - } - if (card->qdio.do_prio_queueing == - QETH_PRIO_Q_ING_PREC) - return 3 - (tos >> 6); - } else if (card->qdio.do_prio_queueing && (ipv == 6)) { - /* TODO: IPv6!!! */ + __be16 *tci; + u8 tos; + + if (cast_type && card->info.is_multicast_different) + return card->info.is_multicast_different & + (card->qdio.no_out_queues - 1); + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_TOS: + case QETH_PRIO_Q_ING_PREC: + switch (ipv) { + case 4: + tos = ipv4_get_dsfield(ip_hdr(skb)); + break; + case 6: + tos = ipv6_get_dsfield(ipv6_hdr(skb)); + break; + default: + return card->qdio.default_out_queue; } - return card->qdio.default_out_queue; - case 1: /* fallthrough for single-out-queue 1920-device */ + if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) + return ~tos >> 6 & 3; + if (tos & IPTOS_MINCOST) + return 3; + if (tos & IPTOS_RELIABILITY) + return 2; + if (tos & IPTOS_THROUGHPUT) + return 1; + if (tos & IPTOS_LOWDELAY) + return 0; + break; + case QETH_PRIO_Q_ING_SKB: + if (skb->priority > 5) + return 0; + return ~skb->priority >> 1 & 3; + case QETH_PRIO_Q_ING_VLAN: + tci = &((struct ethhdr *)skb->data)->h_proto; + if (*tci == ETH_P_8021Q) + return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3; + break; default: - return card->qdio.default_out_queue; + break; } + return card->qdio.default_out_queue; } EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
@@@ -5816,7 -5839,7 +5831,7 @@@ static int __init qeth_core_init(void if (rc) goto out_err; qeth_core_root_dev = root_device_register("qeth"); - rc = PTR_RET(qeth_core_root_dev); + rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) goto register_err; qeth_core_header_cache = kmem_cache_create("qeth_hdr", diff --combined drivers/staging/et131x/et131x.c index 0901ef5,15e0f4d..08356b6 --- a/drivers/staging/et131x/et131x.c +++ b/drivers/staging/et131x/et131x.c @@@ -762,7 -762,6 +762,7 @@@ static int et131x_init_eeprom(struct et */ if (eestatus & 0x4C) { int write_failed = 0; + if (pdev->revision == 0x01) { int i; static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; @@@ -2092,7 -2091,6 +2092,7 @@@ static void et1310_disable_phy_coma(str static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) { u32 tmp_free_buff_ring = *free_buff_ring; + tmp_free_buff_ring++; /* This works for all cases where limit < 1024. The 1023 case * works because 1023++ is 1024 which means the if condition is not @@@ -4239,6 -4237,7 +4239,6 @@@ static int et131x_ioctl(struct net_devi static int et131x_set_packet_filter(struct et131x_adapter *adapter) { int filter = adapter->packet_filter; - int status = 0; u32 ctrl; u32 pf_ctrl;
@@@ -4289,7 -4288,7 +4289,7 @@@ writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); writel(ctrl, &adapter->regs->rxmac.ctrl); } - return status; + return 0; }
/* et131x_multicast - The handler to configure multicasting on the interface */ @@@ -4605,7 -4604,7 +4605,7 @@@ static int et131x_pci_setup(struct pci_ netdev->netdev_ops = &et131x_netdev_ops;
SET_NETDEV_DEV(netdev, &pdev->dev); - SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); + netdev->ethtool_ops = &et131x_ethtool_ops;
adapter = et131x_adapter_init(netdev, pdev);
diff --combined drivers/staging/netlogic/xlr_net.c index 75d7c63,9d95761..e320d6b --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@@ -268,7 -268,6 +268,7 @@@ static void xlr_make_tx_desc(struct nlm unsigned long physkb = virt_to_phys(skb); int cpu_core = nlm_core_id(); int fr_stn_id = cpu_core * 8 + XLR_FB_STN; /* FB to 6th bucket */ + msg->msg0 = (((u64)1 << 63) | /* End of packet descriptor */ ((u64)127 << 54) | /* No Free back */ (u64)skb->len << 40 | /* Length of data */ @@@ -1067,7 -1066,7 +1067,7 @@@ static int xlr_net_probe(struct platfor xlr_set_rx_mode(ndev);
priv->num_rx_desc += MAX_NUM_DESC_SPILL; - SET_ETHTOOL_OPS(ndev, &xlr_ethtool_ops); + ndev->ethtool_ops = &xlr_ethtool_ops; SET_NETDEV_DEV(ndev, &pdev->dev);
/* Common registers, do one time initialization */ diff --combined drivers/staging/rtl8821ae/core.c index 9a37408,63ae2d1..046be2c --- a/drivers/staging/rtl8821ae/core.c +++ b/drivers/staging/rtl8821ae/core.c @@@ -88,9 -88,42 +88,9 @@@ static void rtl_op_stop(struct ieee8021 mutex_unlock(&rtlpriv->locks.conf_mutex); }
static void rtl_op_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) -/*<delete in kernel start>*/ -#endif -/*<delete in kernel end>*/ { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@@ -104,14 -137,26 +104,14 @@@ if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status)) goto err_free;
-/*<delete in kernel start>*/ -#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0)) - if (!rtlpriv->intf_ops->waitq_insert(hw, skb)) - rtlpriv->intf_ops->adapter_tx(hw, skb, &tcb_desc); -#else -/*<delete in kernel end>*/ if (!rtlpriv->intf_ops->waitq_insert(hw, control->sta, skb)) rtlpriv->intf_ops->adapter_tx(hw, control->sta, skb, &tcb_desc); -/*<delete in kernel start>*/ -#endif -/*<delete in kernel end>*/ return;
err_free: dev_kfree_skb_any(skb); return; } -/*<delete in kernel start>*/ -#endif -/*<delete in kernel end>*/
static int rtl_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) @@@ -126,15 -171,26 +126,15 @@@ return -EOPNOTSUPP; }
-/*This flag is not defined before kernel 3.4*/ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; -#endif
rtl_ips_nic_on(hw);
mutex_lock(&rtlpriv->locks.conf_mutex); -/*<delete in kernel start>*/ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) switch (ieee80211_vif_type_p2p(vif)) { case NL80211_IFTYPE_P2P_CLIENT: mac->p2p = P2P_ROLE_CLIENT; /*fall through*/ -#else -/*<delete in kernel end>*/ - switch (vif->type) { -/*<delete in kernel start>*/ -#endif -/*<delete in kernel end>*/ case NL80211_IFTYPE_STATION: if (mac->beacon_enabled == 1) { RT_TRACE(COMP_MAC80211, DBG_LOUD, @@@ -158,9 -214,13 +158,9 @@@ (u8 *) (&mac->basic_rates));
break; case NL80211_IFTYPE_P2P_GO: mac->p2p = P2P_ROLE_GO; /*fall through*/ -#endif -/*<delete in kernel end>*/ case NL80211_IFTYPE_AP: RT_TRACE(COMP_MAC80211, DBG_LOUD, ("NL80211_IFTYPE_AP \n")); @@@ -250,7 -310,9 +250,7 @@@ static void rtl_op_remove_interface(str
mutex_unlock(&rtlpriv->locks.conf_mutex); } -/*<delete in kernel start>*/ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) -/*<delete in kernel end>*/ + static int rtl_op_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype new_type, bool p2p) @@@ -266,7 -328,9 +266,7 @@@ (" p2p %x\n",p2p)); return ret; } -/*<delete in kernel start>*/ -#endif -/*<delete in kernel end>*/ + static int rtl_op_config(struct ieee80211_hw *hw, u32 changed) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@@ -334,9 -398,14 +334,9 @@@ }
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { struct ieee80211_channel *channel = hw->conf.chandef.chan; enum nl80211_channel_type channel_type = cfg80211_get_chandef_type(&(hw->conf.chandef)); -#else - struct ieee80211_channel *channel = hw->conf.channel; - enum nl80211_channel_type channel_type = hw->conf.channel_type; -#endif u8 wide_chan = (u8) channel->hw_value;
if (mac->act_scanning) @@@ -592,9 -661,14 +592,9 @@@ static int _rtl_get_hal_qnum(u16 queue *for mac80211 VO=0, VI=1, BE=2, BK=3 *for rtl819x BE=0, BK=1, VI=2, VO=3 */ static int rtl_op_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *param) -#else -static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue, - const struct ieee80211_tx_queue_params *param) -#endif { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@@ -890,7 -964,11 +890,7 @@@ out mutex_unlock(&rtlpriv->locks.conf_mutex); }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) static u64 rtl_op_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -#else -static u64 rtl_op_get_tsf(struct ieee80211_hw *hw) -#endif { struct rtl_priv *rtlpriv = rtl_priv(hw); u64 tsf; @@@ -899,8 -977,12 +899,8 @@@ return tsf; }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) static void rtl_op_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf) -#else -static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf) -#endif { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@@ -910,7 -992,11 +910,7 @@@ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss)); }
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) static void rtl_op_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -#else -static void rtl_op_reset_tsf(struct ieee80211_hw *hw) -#endif { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp = 0; @@@ -937,7 -1023,13 +937,7 @@@ static int rtl_op_ampdu_action(struct i struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action, struct ieee80211_sta *sta, u16 tid, u16 * ssn -/*<delete in kernel start>*/ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)) -/*<delete in kernel end>*/ ,u8 buf_size -/*<delete in kernel start>*/ -#endif -/*<delete in kernel end>*/ ) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@@ -948,9 -1040,13 +948,9 @@@ ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid)); return rtl_tx_agg_start(hw, vif, sta, tid, ssn); break; case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: -#else - case IEEE80211_AMPDU_TX_STOP: -#endif RT_TRACE(COMP_MAC80211, DBG_TRACE, ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid)); return rtl_tx_agg_stop(hw, vif, sta, tid); @@@ -1078,6 -1174,9 +1078,6 @@@ static int rtl_op_set_key(struct ieee80 mutex_lock(&rtlpriv->locks.conf_mutex); /* <1> get encryption alg */
-/*<delete in kernel start>*/ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) -/*<delete in kernel end>*/ switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: key_type = WEP40_ENCRYPTION; @@@ -1110,6 -1209,43 +1110,6 @@@ ("alg_err:%x!!!!:\n", key->cipher)); goto out_unlock; } -/*<delete in kernel start>*/ -#else - switch (key->alg) { - case ALG_WEP: - if (key->keylen == WLAN_KEY_LEN_WEP40) { - key_type = WEP40_ENCRYPTION; - RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:WEP40\n")); - } else { - RT_TRACE(COMP_SEC, DBG_DMESG, - ("alg:WEP104\n")); - key_type = WEP104_ENCRYPTION; - } - break; - case ALG_TKIP: - key_type = TKIP_ENCRYPTION; - RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:TKIP\n")); - break; - case ALG_CCMP: - key_type = AESCCMP_ENCRYPTION; - RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CCMP\n")); - break; - case ALG_AES_CMAC: - /*HW don't support CMAC encryption, use software CMAC encryption */ - key_type = AESCMAC_ENCRYPTION; - RT_TRACE(COMP_SEC, DBG_DMESG, ("alg:CMAC\n")); - RT_TRACE(COMP_SEC, DBG_DMESG, - ("HW don't support CMAC encryption, " - "use software CMAC encryption\n")); - err = -EOPNOTSUPP; - goto out_unlock; - default: - RT_TRACE(COMP_ERR, DBG_EMERG, - ("alg_err:%x!!!!:\n", key->alg)); - goto out_unlock; - } -#endif -/*<delete in kernel end>*/ if(key_type == WEP40_ENCRYPTION || key_type == WEP104_ENCRYPTION || vif->type == NL80211_IFTYPE_ADHOC) @@@ -1278,7 -1414,9 +1278,9 @@@ static void rtl_op_rfkill_poll(struct i * before switch channel or power save, or tx buffer packet * maybe send after offchannel or rf sleep, this may cause * dis-association by AP */ - static void rtl_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop) + static void rtl_op_flush(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 queues, bool drop) { struct rtl_priv *rtlpriv = rtl_priv(hw);
@@@ -1292,7 -1430,13 +1294,7 @@@ const struct ieee80211_ops rtl_ops = .tx = rtl_op_tx, .add_interface = rtl_op_add_interface, .remove_interface = rtl_op_remove_interface, -/*<delete in kernel start>*/ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) -/*<delete in kernel end>*/ .change_interface = rtl_op_change_interface, -/*<delete in kernel start>*/ -#endif -/*<delete in kernel end>*/ .config = rtl_op_config, .configure_filter = rtl_op_configure_filter, .set_key = rtl_op_set_key, diff --combined drivers/usb/gadget/u_ether.c index fe0880d,ce8e281..3d78a88 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@@ -793,7 -793,7 +793,7 @@@ struct eth_dev *gether_setup_name(struc
net->netdev_ops = ð_netdev_ops;
- SET_ETHTOOL_OPS(net, &ops); + net->ethtool_ops = &ops;
dev->gadget = g; SET_NETDEV_DEV(net, &g->dev); @@@ -818,7 -818,7 +818,7 @@@
return dev; } -EXPORT_SYMBOL(gether_setup_name); +EXPORT_SYMBOL_GPL(gether_setup_name);
struct net_device *gether_setup_name_default(const char *netname) { @@@ -850,12 -850,12 +850,12 @@@
net->netdev_ops = ð_netdev_ops;
- SET_ETHTOOL_OPS(net, &ops); + net->ethtool_ops = &ops; SET_NETDEV_DEVTYPE(net, &gadget_type);
return net; } -EXPORT_SYMBOL(gether_setup_name_default); +EXPORT_SYMBOL_GPL(gether_setup_name_default);
int gether_register_netdev(struct net_device *net) { @@@ -893,7 -893,7 +893,7 @@@
return status; } -EXPORT_SYMBOL(gether_register_netdev); +EXPORT_SYMBOL_GPL(gether_register_netdev);
void gether_set_gadget(struct net_device *net, struct usb_gadget *g) { @@@ -903,7 -903,7 +903,7 @@@ dev->gadget = g; SET_NETDEV_DEV(net, &g->dev); } -EXPORT_SYMBOL(gether_set_gadget); +EXPORT_SYMBOL_GPL(gether_set_gadget);
int gether_set_dev_addr(struct net_device *net, const char *dev_addr) { @@@ -916,7 -916,7 +916,7 @@@ memcpy(dev->dev_mac, new_addr, ETH_ALEN); return 0; } -EXPORT_SYMBOL(gether_set_dev_addr); +EXPORT_SYMBOL_GPL(gether_set_dev_addr);
int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) { @@@ -925,7 -925,7 +925,7 @@@ dev = netdev_priv(net); return get_ether_addr_str(dev->dev_mac, dev_addr, len); } -EXPORT_SYMBOL(gether_get_dev_addr); +EXPORT_SYMBOL_GPL(gether_get_dev_addr);
int gether_set_host_addr(struct net_device *net, const char *host_addr) { @@@ -938,7 -938,7 +938,7 @@@ memcpy(dev->host_mac, new_addr, ETH_ALEN); return 0; } -EXPORT_SYMBOL(gether_set_host_addr); +EXPORT_SYMBOL_GPL(gether_set_host_addr);
int gether_get_host_addr(struct net_device *net, char *host_addr, int len) { @@@ -947,7 -947,7 +947,7 @@@ dev = netdev_priv(net); return get_ether_addr_str(dev->host_mac, host_addr, len); } -EXPORT_SYMBOL(gether_get_host_addr); +EXPORT_SYMBOL_GPL(gether_get_host_addr);
int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) { @@@ -961,7 -961,7 +961,7 @@@
return strlen(host_addr); } -EXPORT_SYMBOL(gether_get_host_addr_cdc); +EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) { @@@ -970,7 -970,7 +970,7 @@@ dev = netdev_priv(net); memcpy(host_mac, dev->host_mac, ETH_ALEN); } -EXPORT_SYMBOL(gether_get_host_addr_u8); +EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
void gether_set_qmult(struct net_device *net, unsigned qmult) { @@@ -979,7 -979,7 +979,7 @@@ dev = netdev_priv(net); dev->qmult = qmult; } -EXPORT_SYMBOL(gether_set_qmult); +EXPORT_SYMBOL_GPL(gether_set_qmult);
unsigned gether_get_qmult(struct net_device *net) { @@@ -988,7 -988,7 +988,7 @@@ dev = netdev_priv(net); return dev->qmult; } -EXPORT_SYMBOL(gether_get_qmult); +EXPORT_SYMBOL_GPL(gether_get_qmult);
int gether_get_ifname(struct net_device *net, char *name, int len) { @@@ -997,7 -997,7 +997,7 @@@ rtnl_unlock(); return strlen(name); } -EXPORT_SYMBOL(gether_get_ifname); +EXPORT_SYMBOL_GPL(gether_get_ifname);
/** * gether_cleanup - remove Ethernet-over-USB device @@@ -1014,7 -1014,7 +1014,7 @@@ void gether_cleanup(struct eth_dev *dev flush_work(&dev->work); free_netdev(dev->net); } -EXPORT_SYMBOL(gether_cleanup); +EXPORT_SYMBOL_GPL(gether_cleanup);
/** * gether_connect - notify network layer that USB link is active @@@ -1095,7 -1095,7 +1095,7 @@@ fail0 return ERR_PTR(result); return dev->net; } -EXPORT_SYMBOL(gether_connect); +EXPORT_SYMBOL_GPL(gether_connect);
/** * gether_disconnect - notify network layer that USB link is inactive @@@ -1166,7 -1166,7 +1166,7 @@@ void gether_disconnect(struct gether *l dev->port_usb = NULL; spin_unlock(&dev->lock); } -EXPORT_SYMBOL(gether_disconnect); +EXPORT_SYMBOL_GPL(gether_disconnect);
MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Brownell"); diff --combined include/linux/netdevice.h index 6c1ae9f,774e539..abe3de1 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@@ -56,9 -56,6 +56,6 @@@ struct device struct phy_device; /* 802.11 specific */ struct wireless_dev; - /* source back-compat hooks */ - #define SET_ETHTOOL_OPS(netdev,ops) \ - ( (netdev)->ethtool_ops = (ops) )
void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@@ -493,7 -490,7 +490,7 @@@ static inline void napi_disable(struct static inline void napi_enable(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); }
@@@ -853,7 -850,8 +850,8 @@@ typedef u16 (*select_queue_fallback_t)( * SR-IOV management functions. * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); - * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); + * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, + * int max_tx_rate); * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_config)(struct net_device *dev, * int vf, struct ifla_vf_info *ivf); @@@ -1047,8 -1045,9 +1045,9 @@@ struct net_device_ops int queue, u8 *mac); int (*ndo_set_vf_vlan)(struct net_device *dev, int queue, u16 vlan, u8 qos); - int (*ndo_set_vf_tx_rate)(struct net_device *dev, - int vf, int rate); + int (*ndo_set_vf_rate)(struct net_device *dev, + int vf, int min_tx_rate, + int max_tx_rate); int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); int (*ndo_get_vf_config)(struct net_device *dev, @@@ -2634,6 -2633,7 +2633,7 @@@ int dev_get_phys_port_id(struct net_dev struct netdev_phys_port_id *ppid); int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq); + int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
@@@ -3003,6 -3003,15 +3003,15 @@@ int __hw_addr_sync(struct netdev_hw_add struct netdev_hw_addr_list *from_list, int addr_len); void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); + int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)); + void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)); void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */ @@@ -3023,6 -3032,38 +3032,38 @@@ void dev_uc_unsync(struct net_device *t void dev_uc_flush(struct net_device *dev); void dev_uc_init(struct net_device *dev);
+ /** + * __dev_uc_sync - Synchonize device's unicast list + * @dev: device to sync + * @sync: function to call if address should be added + * @unsync: function to call if address should be removed + * + * Add newly added addresses to the interface, and release + * addresses that have been deleted. + **/ + static inline int __dev_uc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) + { + return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); + } + + /** + * __dev_uc_unsync - Remove synchonized addresses from device + * @dev: device to sync + * @unsync: function to call if address should be removed + * + * Remove all addresses that were added to the device by dev_uc_sync(). + **/ + static inline void __dev_uc_unsync(struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) + { + __hw_addr_unsync_dev(&dev->uc, dev, unsync); + } + /* Functions used for multicast addresses handling */ int dev_mc_add(struct net_device *dev, const unsigned char *addr); int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); @@@ -3035,6 -3076,38 +3076,38 @@@ void dev_mc_unsync(struct net_device *t void dev_mc_flush(struct net_device *dev); void dev_mc_init(struct net_device *dev);
+ /** + * __dev_mc_sync - Synchonize device's multicast list + * @dev: device to sync + * @sync: function to call if address should be added + * @unsync: function to call if address should be removed + * + * Add newly added addresses to the interface, and release + * addresses that have been deleted. + **/ + static inline int __dev_mc_sync(struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *), + int (*unsync)(struct net_device *, + const unsigned char *)) + { + return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); + } + + /** + * __dev_mc_unsync - Remove synchonized addresses from device + * @dev: device to sync + * @unsync: function to call if address should be removed + * + * Remove all addresses that were added to the device by dev_mc_sync(). + **/ + static inline void __dev_mc_unsync(struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *)) + { + __hw_addr_unsync_dev(&dev->mc, dev, unsync); + } + /* Functions used for secondary unicast and multicast support */ void dev_set_rx_mode(struct net_device *dev); void __dev_set_rx_mode(struct net_device *dev); @@@ -3180,6 -3253,20 +3253,20 @@@ const char *netdev_drivername(const str
void linkwatch_run_queue(void);
+ static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, + netdev_features_t f2) + { + if (f1 & NETIF_F_GEN_CSUM) + f1 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); + if (f2 & NETIF_F_GEN_CSUM) + f2 |= (NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); + f1 &= f2; + if (f1 & NETIF_F_GEN_CSUM) + f1 &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM); + + return f1; + } + static inline netdev_features_t netdev_get_wanted_features( struct net_device *dev) { diff --combined include/linux/netlink.h index 034cda7,7a28115..9e572da --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@@ -16,10 -16,9 +16,10 @@@ static inline struct nlmsghdr *nlmsg_hd }
enum netlink_skb_flags { - NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ - NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ - NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ + NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ + NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ + NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ + NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */ };
struct netlink_skb_parms { @@@ -46,7 -45,8 +46,8 @@@ struct netlink_kernel_cfg unsigned int flags; void (*input)(struct sk_buff *skb); struct mutex *cb_mutex; - void (*bind)(int group); + int (*bind)(int group); + void (*unbind)(int group); bool (*compare)(struct net *net, struct sock *sk); };
diff --combined include/uapi/linux/audit.h index 4c31a36,b21ea45..cf67147 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@@ -357,7 -357,7 +357,7 @@@ enum #define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\ __AUDIT_ARCH_CONVENTION_MIPS64_N32) #define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) -#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\ +#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE|\ __AUDIT_ARCH_CONVENTION_MIPS64_N32) #define AUDIT_ARCH_OPENRISC (EM_OPENRISC) #define AUDIT_ARCH_PARISC (EM_PARISC) @@@ -385,6 -385,14 +385,14 @@@ */ #define AUDIT_MESSAGE_TEXT_MAX 8560
+ /* Multicast Netlink socket groups (default up to 32) */ + enum audit_nlgrps { + AUDIT_NLGRP_NONE, /* Group 0 not used */ + AUDIT_NLGRP_READLOG, /* "best effort" read only socket */ + __AUDIT_NLGRP_MAX + }; + #define AUDIT_NLGRP_MAX (__AUDIT_NLGRP_MAX - 1) + struct audit_status { __u32 mask; /* Bit mask for valid entries */ __u32 enabled; /* 1 = enabled, 0 = disabled */ diff --combined kernel/sysctl.c index df42e15,e36ae4b..d8cf6a0 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@@ -152,6 -152,10 +152,6 @@@ static unsigned long hung_task_timeout_ #ifdef CONFIG_SPARC #endif
-#ifdef CONFIG_SPARC64 -extern int sysctl_tsb_ratio; -#endif - #ifdef __hppa__ extern int pwrsw_enabled; #endif @@@ -639,7 -643,7 +639,7 @@@ static struct ctl_table kern_table[] = .extra2 = &one, }, #endif - +#ifdef CONFIG_UEVENT_HELPER { .procname = "hotplug", .data = &uevent_helper, @@@ -647,7 -651,7 +647,7 @@@ .mode = 0644, .proc_handler = proc_dostring, }, - +#endif #ifdef CONFIG_CHR_DEV_SG { .procname = "sg-big-buff", @@@ -2497,11 -2501,11 +2497,11 @@@ int proc_do_large_bitmap(struct ctl_tab bool first = 1; size_t left = *lenp; unsigned long bitmap_len = table->maxlen; - unsigned long *bitmap = (unsigned long *) table->data; + unsigned long *bitmap = *(unsigned long **) table->data; unsigned long *tmp_bitmap = NULL; char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c;
- if (!bitmap_len || !left || (*ppos && !write)) { + if (!bitmap || !bitmap_len || !left || (*ppos && !write)) { *lenp = 0; return 0; } diff --combined lib/Kconfig.debug index e548aa0,d1b7bdf..6da2c25 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@@ -180,7 -180,7 +180,7 @@@ config STRIP_ASM_SYM
config READABLE_ASM bool "Generate readable assembler code" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !LTO help Disable some compiler optimizations that tend to generate human unreadable assembler output. This may make the kernel slightly slower, but it helps @@@ -1620,6 -1620,19 +1620,19 @@@ config TEST_USER_COP
If unsure, say N.
+ config TEST_BPF + tristate "Test BPF filter functionality" + default n + depends on m && NET + help + This builds the "test_bpf" module that runs various test vectors + against the BPF interpreter or BPF JIT compiler depending on the + current setting. This is in particular useful for BPF JIT compiler + development, but also to run regression tests against changes in + the interpreter code. + + If unsure, say N. + source "samples/Kconfig"
source "lib/Kconfig.kgdb" diff --combined net/bluetooth/hci_event.c index 682f33a,3454807..6f1c894 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@@ -45,7 -45,7 +45,7 @@@ static void hci_cc_inquiry_cancel(struc return;
clear_bit(HCI_INQUIRY, &hdev->flags); - smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ + smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY);
hci_conn_check_pending(hdev); @@@ -991,10 -991,25 +991,25 @@@ static void hci_cc_le_set_adv_enable(st if (!sent) return;
+ if (status) + return; + hci_dev_lock(hdev);
- if (!status) - mgmt_advertising(hdev, *sent); + /* If we're doing connection initation as peripheral. Set a + * timeout in case something goes wrong. + */ + if (*sent) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); + if (conn) + queue_delayed_work(hdev->workqueue, + &conn->le_conn_timeout, + HCI_LE_CONN_TIMEOUT); + } + + mgmt_advertising(hdev, *sent);
hci_dev_unlock(hdev); } @@@ -1018,6 -1033,33 +1033,33 @@@ static void hci_cc_le_set_scan_param(st hci_dev_unlock(hdev); }
+ static bool has_pending_adv_report(struct hci_dev *hdev) + { + struct discovery_state *d = &hdev->discovery; + + return bacmp(&d->last_adv_addr, BDADDR_ANY); + } + + static void clear_pending_adv_report(struct hci_dev *hdev) + { + struct discovery_state *d = &hdev->discovery; + + bacpy(&d->last_adv_addr, BDADDR_ANY); + d->last_adv_data_len = 0; + } + + static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type, s8 rssi, u8 *data, u8 len) + { + struct discovery_state *d = &hdev->discovery; + + bacpy(&d->last_adv_addr, bdaddr); + d->last_adv_addr_type = bdaddr_type; + d->last_adv_rssi = rssi; + memcpy(d->last_adv_data, data, len); + d->last_adv_data_len = len; + } + static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) { @@@ -1036,9 -1078,25 +1078,25 @@@ switch (cp->enable) { case LE_SCAN_ENABLE: set_bit(HCI_LE_SCAN, &hdev->dev_flags); + if (hdev->le_scan_type == LE_SCAN_ACTIVE) + clear_pending_adv_report(hdev); break;
case LE_SCAN_DISABLE: + /* We do this here instead of when setting DISCOVERY_STOPPED + * since the latter would potentially require waiting for + * inquiry to stop too. + */ + if (has_pending_adv_report(hdev)) { + struct discovery_state *d = &hdev->discovery; + + mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, + d->last_adv_addr_type, NULL, + d->last_adv_rssi, 0, 1, + d->last_adv_data, + d->last_adv_data_len, NULL, 0); + } + /* Cancel this timer so that we don't try to disable scanning * when it's already disabled. */ @@@ -1187,6 -1245,59 +1245,59 @@@ static void hci_cc_write_remote_amp_ass amp_write_rem_assoc_continue(hdev, rp->phy_handle); }
+ static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb) + { + struct hci_rp_read_rssi *rp = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + + if (rp->status) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->rssi = rp->rssi; + + hci_dev_unlock(hdev); + } + + static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb) + { + struct hci_cp_read_tx_power *sent; + struct hci_rp_read_tx_power *rp = (void *) skb->data; + struct hci_conn *conn; + + BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + + if (rp->status) + return; + + sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); + if (!sent) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (!conn) + goto unlock; + + switch (sent->type) { + case 0x00: + conn->tx_power = rp->tx_power; + break; + case 0x01: + conn->max_tx_power = rp->tx_power; + break; + } + + unlock: + hci_dev_unlock(hdev); + } + static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { BT_DBG("%s status 0x%2.2x", hdev->name, status); @@@ -1768,7 -1879,7 +1879,7 @@@ static void hci_inquiry_complete_evt(st if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) return;
- smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */ + smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ wake_up_bit(&hdev->flags, HCI_INQUIRY);
if (!test_bit(HCI_MGMT, &hdev->dev_flags)) @@@ -1827,7 -1938,7 +1938,7 @@@ static void hci_inquiry_result_evt(stru name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, 0, !name_known, ssp, NULL, - 0); + 0, NULL, 0); }
hci_dev_unlock(hdev); @@@ -2579,6 -2690,14 +2690,14 @@@ static void hci_cmd_complete_evt(struc hci_cc_write_remote_amp_assoc(hdev, skb); break;
+ case HCI_OP_READ_RSSI: + hci_cc_read_rssi(hdev, skb); + break; + + case HCI_OP_READ_TX_POWER: + hci_cc_read_tx_power(hdev, skb); + break; + default: BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); break; @@@ -3102,7 -3221,7 +3221,7 @@@ static void hci_inquiry_result_with_rss false, &ssp); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, - !name_known, ssp, NULL, 0); + !name_known, ssp, NULL, 0, NULL, 0); } } else { struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); @@@ -3120,7 -3239,7 +3239,7 @@@ false, &ssp); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, - !name_known, ssp, NULL, 0); + !name_known, ssp, NULL, 0, NULL, 0); } }
@@@ -3309,7 -3428,7 +3428,7 @@@ static void hci_extended_inquiry_result eir_len = eir_get_length(info->data, sizeof(info->data)); mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, info->dev_class, info->rssi, !name_known, - ssp, info->data, eir_len); + ssp, info->data, eir_len, NULL, 0); }
hci_dev_unlock(hdev); @@@ -3367,24 -3486,20 +3486,20 @@@ unlock
static u8 hci_get_auth_req(struct hci_conn *conn) { - /* If remote requests dedicated bonding follow that lead */ - if (conn->remote_auth == HCI_AT_DEDICATED_BONDING || - conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) { - /* If both remote and local IO capabilities allow MITM - * protection then require it, otherwise don't */ - if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT || - conn->io_capability == HCI_IO_NO_INPUT_OUTPUT) - return HCI_AT_DEDICATED_BONDING; - else - return HCI_AT_DEDICATED_BONDING_MITM; - } - /* If remote requests no-bonding follow that lead */ if (conn->remote_auth == HCI_AT_NO_BONDING || conn->remote_auth == HCI_AT_NO_BONDING_MITM) return conn->remote_auth | (conn->auth_type & 0x01);
- return conn->auth_type; + /* If both remote and local have enough IO capabilities, require + * MITM protection + */ + if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && + conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) + return conn->remote_auth | 0x01; + + /* No MITM protection possible so ignore remote requirement */ + return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); }
static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) @@@ -3414,8 -3529,21 +3529,21 @@@ * to DisplayYesNo as it is not supported by BT spec. */ cp.capability = (conn->io_capability == 0x04) ? HCI_IO_DISPLAY_YESNO : conn->io_capability; - conn->auth_type = hci_get_auth_req(conn); - cp.authentication = conn->auth_type; + + /* If we are initiators, there is no remote information yet */ + if (conn->remote_auth == 0xff) { + cp.authentication = conn->auth_type; + + /* Request MITM protection if our IO caps allow it + * except for the no-bonding case + */ + if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && + cp.authentication != HCI_AT_NO_BONDING) + cp.authentication |= 0x01; + } else { + conn->auth_type = hci_get_auth_req(conn); + cp.authentication = conn->auth_type; + }
if (hci_find_remote_oob_data(hdev, &conn->dst) && (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) @@@ -3483,12 -3611,9 +3611,9 @@@ static void hci_user_confirm_request_ev rem_mitm = (conn->remote_auth & 0x01);
/* If we require MITM but the remote device can't provide that - * (it has NoInputNoOutput) then reject the confirmation - * request. The only exception is when we're dedicated bonding - * initiators (connect_cfm_cb set) since then we always have the MITM - * bit set. */ - if (!conn->connect_cfm_cb && loc_mitm && - conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { + * (it has NoInputNoOutput) then reject the confirmation request + */ + if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { BT_DBG("Rejecting request: remote device can't provide MITM"); hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, sizeof(ev->bdaddr), &ev->bdaddr); @@@ -3846,17 -3971,6 +3971,6 @@@ static void hci_le_conn_complete_evt(st
conn->dst_type = ev->bdaddr_type;
- /* The advertising parameters for own address type - * define which source address and source address - * type this connections has. - */ - if (bacmp(&conn->src, BDADDR_ANY)) { - conn->src_type = ADDR_LE_DEV_PUBLIC; - } else { - bacpy(&conn->src, &hdev->static_addr); - conn->src_type = ADDR_LE_DEV_RANDOM; - } - if (ev->role == LE_CONN_ROLE_MASTER) { conn->out = true; conn->link_mode |= HCI_LM_MASTER; @@@ -3881,27 -3995,24 +3995,24 @@@ &conn->init_addr, &conn->init_addr_type); } - } else { - /* Set the responder (our side) address type based on - * the advertising address type. - */ - conn->resp_addr_type = hdev->adv_addr_type; - if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) - bacpy(&conn->resp_addr, &hdev->random_addr); - else - bacpy(&conn->resp_addr, &hdev->bdaddr); - - conn->init_addr_type = ev->bdaddr_type; - bacpy(&conn->init_addr, &ev->bdaddr); } } else { cancel_delayed_work(&conn->le_conn_timeout); }
- /* Ensure that the hci_conn contains the identity address type - * regardless of which address the connection was made with. - */ - hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + if (!conn->out) { + /* Set the responder (our side) address type based on + * the advertising address type. + */ + conn->resp_addr_type = hdev->adv_addr_type; + if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) + bacpy(&conn->resp_addr, &hdev->random_addr); + else + bacpy(&conn->resp_addr, &hdev->bdaddr); + + conn->init_addr_type = ev->bdaddr_type; + bacpy(&conn->init_addr, &ev->bdaddr); + }
/* Lookup the identity address from the stored connection * address and address type. @@@ -3981,25 -4092,97 +4092,97 @@@ static void check_pending_le_conn(struc } }
+ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, + u8 bdaddr_type, s8 rssi, u8 *data, u8 len) + { + struct discovery_state *d = &hdev->discovery; + bool match; + + /* Passive scanning shouldn't trigger any device found events */ + if (hdev->le_scan_type == LE_SCAN_PASSIVE) { + if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND) + check_pending_le_conn(hdev, bdaddr, bdaddr_type); + return; + } + + /* If there's nothing pending either store the data from this + * event or send an immediate device found event if the data + * should not be stored for later. + */ + if (!has_pending_adv_report(hdev)) { + /* If the report will trigger a SCAN_REQ store it for + * later merging. + */ + if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + store_pending_adv_report(hdev, bdaddr, bdaddr_type, + rssi, data, len); + return; + } + + mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, + rssi, 0, 1, data, len, NULL, 0); + return; + } + + /* Check if the pending report is for the same device as the new one */ + match = (!bacmp(bdaddr, &d->last_adv_addr) && + bdaddr_type == d->last_adv_addr_type); + + /* If the pending data doesn't match this report or this isn't a + * scan response (e.g. we got a duplicate ADV_IND) then force + * sending of the pending data. + */ + if (type != LE_ADV_SCAN_RSP || !match) { + /* Send out whatever is in the cache, but skip duplicates */ + if (!match) + mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, + d->last_adv_addr_type, NULL, + d->last_adv_rssi, 0, 1, + d->last_adv_data, + d->last_adv_data_len, NULL, 0); + + /* If the new report will trigger a SCAN_REQ store it for + * later merging. + */ + if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + store_pending_adv_report(hdev, bdaddr, bdaddr_type, + rssi, data, len); + return; + } + + /* The advertising reports cannot be merged, so clear + * the pending report and send out a device found event. + */ + clear_pending_adv_report(hdev); + mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, + rssi, 0, 1, data, len, NULL, 0); + return; + } + + /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and + * the new event is a SCAN_RSP. We can therefore proceed with + * sending a merged device found event. + */ + mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, + d->last_adv_addr_type, NULL, rssi, 0, 1, data, len, + d->last_adv_data, d->last_adv_data_len); + clear_pending_adv_report(hdev); + } + static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) { u8 num_reports = skb->data[0]; void *ptr = &skb->data[1]; - s8 rssi;
hci_dev_lock(hdev);
while (num_reports--) { struct hci_ev_le_advertising_info *ev = ptr; - - if (ev->evt_type == LE_ADV_IND || - ev->evt_type == LE_ADV_DIRECT_IND) - check_pending_le_conn(hdev, &ev->bdaddr, - ev->bdaddr_type); + s8 rssi;
rssi = ev->data[ev->length]; - mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type, - NULL, rssi, 0, 1, ev->data, ev->length); + process_adv_report(hdev, ev->evt_type, &ev->bdaddr, + ev->bdaddr_type, rssi, ev->data, ev->length);
ptr += sizeof(*ev) + ev->length + 1; } diff --combined net/bridge/br_fdb.c index 474d36f,2c45c06..b524c36 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@@ -85,8 -85,58 +85,58 @@@ static void fdb_rcu_free(struct rcu_hea kmem_cache_free(br_fdb_cache, ent); }
+ /* When a static FDB entry is added, the mac address from the entry is + * added to the bridge private HW address list and all required ports + * are then updated with the new information. + * Called under RTNL. + */ + static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr) + { + int err; + struct net_bridge_port *p, *tmp; + + ASSERT_RTNL(); + + list_for_each_entry(p, &br->port_list, list) { + if (!br_promisc_port(p)) { + err = dev_uc_add(p->dev, addr); + if (err) + goto undo; + } + } + + return; + undo: + list_for_each_entry(tmp, &br->port_list, list) { + if (tmp == p) + break; + if (!br_promisc_port(tmp)) + dev_uc_del(tmp->dev, addr); + } + } + + /* When a static FDB entry is deleted, the HW address from that entry is + * also removed from the bridge private HW address list and updates all + * the ports with needed information. + * Called under RTNL. + */ + static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr) + { + struct net_bridge_port *p; + + ASSERT_RTNL(); + + list_for_each_entry(p, &br->port_list, list) { + if (!br_promisc_port(p)) + dev_uc_del(p->dev, addr); + } + } + static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) { + if (f->is_static) + fdb_del_hw(br, f->addr.addr); + hlist_del_rcu(&f->hlist); fdb_notify(br, f, RTM_DELNEIGH); call_rcu(&f->rcu, fdb_rcu_free); @@@ -466,6 -516,7 +516,7 @@@ static int fdb_insert(struct net_bridg return -ENOMEM;
fdb->is_local = fdb->is_static = 1; + fdb_add_hw(br, addr); fdb_notify(br, fdb, RTM_NEWNEIGH); return 0; } @@@ -487,7 -538,6 +538,7 @@@ void br_fdb_update(struct net_bridge *b { struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct net_bridge_fdb_entry *fdb; + bool fdb_modified = false;
/* some users want to always flood. */ if (hold_time(br) == 0) @@@ -508,15 -558,10 +559,15 @@@ source->dev->name); } else { /* fastpath: update of existing entry */ - fdb->dst = source; + if (unlikely(source != fdb->dst)) { + fdb->dst = source; + fdb_modified = true; + } fdb->updated = jiffies; if (unlikely(added_by_user)) fdb->added_by_user = 1; + if (unlikely(fdb_modified)) + fdb_notify(br, fdb, RTM_NEWNEIGH); } } else { spin_lock(&br->hash_lock); @@@ -571,6 -616,8 +622,8 @@@ static int fdb_fill_info(struct sk_buf
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr)) goto nla_put_failure; + if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex)) + goto nla_put_failure; ci.ndm_used = jiffies_to_clock_t(now - fdb->used); ci.ndm_confirmed = 0; ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); @@@ -592,6 -639,7 +645,7 @@@ static inline size_t fdb_nlmsg_size(voi { return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ + + nla_total_size(sizeof(u32)) /* NDA_MASTER */ + nla_total_size(sizeof(u16)) /* NDA_VLAN */ + nla_total_size(sizeof(struct nda_cacheinfo)); } @@@ -684,13 -732,25 +738,25 @@@ static int fdb_add_entry(struct net_bri }
if (fdb_to_nud(fdb) != state) { - if (state & NUD_PERMANENT) - fdb->is_local = fdb->is_static = 1; - else if (state & NUD_NOARP) { + if (state & NUD_PERMANENT) { + fdb->is_local = 1; + if (!fdb->is_static) { + fdb->is_static = 1; + fdb_add_hw(br, addr); + } + } else if (state & NUD_NOARP) { fdb->is_local = 0; - fdb->is_static = 1; - } else - fdb->is_local = fdb->is_static = 0; + if (!fdb->is_static) { + fdb->is_static = 1; + fdb_add_hw(br, addr); + } + } else { + fdb->is_local = 0; + if (fdb->is_static) { + fdb->is_static = 0; + fdb_del_hw(br, addr); + } + }
modified = true; } @@@ -880,3 -940,59 +946,59 @@@ int br_fdb_delete(struct ndmsg *ndm, st out: return err; } + + int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p) + { + struct net_bridge_fdb_entry *fdb, *tmp; + int i; + int err; + + ASSERT_RTNL(); + + for (i = 0; i < BR_HASH_SIZE; i++) { + hlist_for_each_entry(fdb, &br->hash[i], hlist) { + /* We only care for static entries */ + if (!fdb->is_static) + continue; + + err = dev_uc_add(p->dev, fdb->addr.addr); + if (err) + goto rollback; + } + } + return 0; + + rollback: + for (i = 0; i < BR_HASH_SIZE; i++) { + hlist_for_each_entry(tmp, &br->hash[i], hlist) { + /* If we reached the fdb that failed, we can stop */ + if (tmp == fdb) + break; + + /* We only care for static entries */ + if (!tmp->is_static) + continue; + + dev_uc_del(p->dev, tmp->addr.addr); + } + } + return err; + } + + void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p) + { + struct net_bridge_fdb_entry *fdb; + int i; + + ASSERT_RTNL(); + + for (i = 0; i < BR_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) { + /* We only care for static entries */ + if (!fdb->is_static) + continue; + + dev_uc_del(p->dev, fdb->addr.addr); + } + } + } diff --combined net/bridge/br_private.h index 59d3a85,53d6e32..bc17210 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@@ -174,6 -174,8 +174,8 @@@ struct net_bridge_por #define BR_ADMIN_COST 0x00000010 #define BR_LEARNING 0x00000020 #define BR_FLOOD 0x00000040 + #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) + #define BR_PROMISC 0x00000080
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING struct bridge_mcast_query ip4_query; @@@ -198,6 -200,9 +200,9 @@@ #endif };
+ #define br_auto_port(p) ((p)->flags & BR_AUTO_MASK) + #define br_promisc_port(p) ((p)->flags & BR_PROMISC) + #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) @@@ -290,6 -295,7 +295,7 @@@ struct net_bridg struct timer_list topology_change_timer; struct timer_list gc_timer; struct kobject *ifobj; + u32 auto_cnt; #ifdef CONFIG_BRIDGE_VLAN_FILTERING u8 vlan_enabled; struct net_port_vlans __rcu *vlan_info; @@@ -327,8 -333,6 +333,6 @@@ struct br_input_skb_cb #define br_debug(br, format, args...) \ pr_debug("%s: " format, (br)->dev->name, ##args)
- extern struct notifier_block br_device_notifier; - /* called under bridge lock */ static inline int br_is_root_bridge(const struct net_bridge *br) { @@@ -395,6 -399,8 +399,8 @@@ int br_fdb_add(struct ndmsg *nlh, struc const unsigned char *addr, u16 nlh_flags); int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, int idx); + int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p); + void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
/* br_forward.c */ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); @@@ -415,6 -421,8 +421,8 @@@ int br_del_if(struct net_bridge *br, st int br_min_mtu(const struct net_bridge *br); netdev_features_t br_features_recompute(struct net_bridge *br, netdev_features_t features); + void br_port_flags_change(struct net_bridge_port *port, unsigned long mask); + void br_manage_promisc(struct net_bridge *br);
/* br_input.c */ int br_handle_frame_finish(struct sk_buff *skb); @@@ -581,7 -589,6 +589,7 @@@ bool br_allowed_ingress(struct net_brid struct sk_buff *skb, u16 *vid); bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v, const struct sk_buff *skb); +bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid); struct sk_buff *br_handle_vlan(struct net_bridge *br, const struct net_port_vlans *v, struct sk_buff *skb); @@@ -633,6 -640,10 +641,10 @@@ static inline u16 br_get_pvid(const str return v->pvid ?: VLAN_N_VID; }
+ static inline int br_vlan_enabled(struct net_bridge *br) + { + return br->vlan_enabled; + } #else static inline bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, @@@ -649,12 -660,6 +661,12 @@@ static inline bool br_allowed_egress(st return true; }
+static inline bool br_should_learn(struct net_bridge_port *p, + struct sk_buff *skb, u16 *vid) +{ + return true; +} + static inline struct sk_buff *br_handle_vlan(struct net_bridge *br, const struct net_port_vlans *v, struct sk_buff *skb) @@@ -719,6 -724,11 +731,11 @@@ static inline u16 br_get_pvid(const str { return VLAN_N_VID; /* Returns invalid vid */ } + + static inline int br_vlan_enabled(struct net_bridge *br) + { + return 0; + } #endif
/* br_netfilter.c */ diff --combined net/bridge/br_vlan.c index 5fee2fe,24c5cc5..fcc9539 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@@ -241,34 -241,6 +241,34 @@@ bool br_allowed_egress(struct net_bridg return false; }
+/* Called under RCU */ +bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) +{ + struct net_bridge *br = p->br; + struct net_port_vlans *v; + + if (!br->vlan_enabled) + return true; + + v = rcu_dereference(p->vlan_info); + if (!v) + return false; + + br_vlan_get_tag(skb, vid); + if (!*vid) { + *vid = br_get_pvid(v); + if (*vid == VLAN_N_VID) + return false; + + return true; + } + + if (test_bit(*vid, v->vlan_bitmap)) + return true; + + return false; +} + /* Must be protected by RTNL. * Must be called with vid in range from 1 to 4094 inclusive. */ @@@ -360,6 -332,7 +360,7 @@@ int br_vlan_filter_toggle(struct net_br goto unlock;
br->vlan_enabled = val; + br_manage_promisc(br);
unlock: rtnl_unlock(); diff --combined net/core/dev.c index 8908a68,0355ca5..e4a2ffb --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -1326,7 -1326,7 +1326,7 @@@ static int __dev_close_many(struct list * dev->stop() will invoke napi_disable() on all of it's * napi_struct instances on this device. */ - smp_mb__after_clear_bit(); /* Commit netif_running(). */ + smp_mb__after_atomic(); /* Commit netif_running(). */ }
dev_deactivate_many(head); @@@ -1661,6 -1661,29 +1661,29 @@@ bool is_skb_forwardable(struct net_devi } EXPORT_SYMBOL_GPL(is_skb_forwardable);
+ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) + { + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + } + + if (unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_scrub_packet(skb, true); + skb->protocol = eth_type_trans(skb, dev); + + return 0; + } + EXPORT_SYMBOL_GPL(__dev_forward_skb); + /** * dev_forward_skb - loopback an skb to another netif * @@@ -1681,24 -1704,7 +1704,7 @@@ */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { - if (skb_copy_ubufs(skb, GFP_ATOMIC)) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - } - - if (unlikely(!is_skb_forwardable(dev, skb))) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - - skb_scrub_packet(skb, true); - skb->protocol = eth_type_trans(skb, dev); - - return netif_rx_internal(skb); + return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb);
@@@ -2283,8 -2289,8 +2289,8 @@@ EXPORT_SYMBOL(skb_checksum_help)
__be16 skb_network_protocol(struct sk_buff *skb, int *depth) { + unsigned int vlan_depth = skb->mac_len; __be16 type = skb->protocol; - int vlan_depth = skb->mac_len;
/* Tunnel gso handlers can set protocol to ethernet. */ if (type == htons(ETH_P_TEB)) { @@@ -2297,30 -2303,15 +2303,30 @@@ type = eth->h_proto; }
- while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { - struct vlan_hdr *vh; - - if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) - return 0; - - vh = (struct vlan_hdr *)(skb->data + vlan_depth); - type = vh->h_vlan_encapsulated_proto; - vlan_depth += VLAN_HLEN; + /* if skb->protocol is 802.1Q/AD then the header should already be + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at + * ETH_HLEN otherwise + */ + if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + if (vlan_depth) { + if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN))) + return 0; + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + do { + struct vlan_hdr *vh; + + if (unlikely(!pskb_may_pull(skb, + vlan_depth + VLAN_HLEN))) + return 0; + + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } while (type == htons(ETH_P_8021Q) || + type == htons(ETH_P_8021AD)); }
*depth = vlan_depth; @@@ -3356,7 -3347,7 +3362,7 @@@ static void net_tx_action(struct softir
root_lock = qdisc_lock(q); if (spin_trylock(root_lock)) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__QDISC_STATE_SCHED, &q->state); qdisc_run(q); @@@ -3366,7 -3357,7 +3372,7 @@@ &q->state)) { __netif_reschedule(q); } else { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(__QDISC_STATE_SCHED, &q->state); } @@@ -4258,7 -4249,7 +4264,7 @@@ void __napi_complete(struct napi_struc BUG_ON(n->gro_list);
list_del(&n->poll_list); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); } EXPORT_SYMBOL(__napi_complete); @@@ -5689,10 -5680,6 +5695,6 @@@ static void rollback_registered_many(st */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
- if (!dev->rtnl_link_ops || - dev->rtnl_link_state == RTNL_LINK_INITIALIZED) - rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); - /* * Flush the unicast and multicast chains */ @@@ -5702,6 -5689,10 +5704,10 @@@ if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev);
+ if (!dev->rtnl_link_ops || + dev->rtnl_link_state == RTNL_LINK_INITIALIZED) + rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); + /* Notifier chain MUST detach us all upper devices. */ WARN_ON(netdev_has_any_upper_dev(dev));
diff --combined net/core/filter.c index 4aec7b9,842f839..9de0c25 --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -45,6 -45,27 +45,27 @@@ #include <linux/seccomp.h> #include <linux/if_vlan.h>
+ /* Registers */ + #define BPF_R0 regs[BPF_REG_0] + #define BPF_R1 regs[BPF_REG_1] + #define BPF_R2 regs[BPF_REG_2] + #define BPF_R3 regs[BPF_REG_3] + #define BPF_R4 regs[BPF_REG_4] + #define BPF_R5 regs[BPF_REG_5] + #define BPF_R6 regs[BPF_REG_6] + #define BPF_R7 regs[BPF_REG_7] + #define BPF_R8 regs[BPF_REG_8] + #define BPF_R9 regs[BPF_REG_9] + #define BPF_R10 regs[BPF_REG_10] + + /* Named registers */ + #define A regs[insn->a_reg] + #define X regs[insn->x_reg] + #define FP regs[BPF_REG_FP] + #define ARG1 regs[BPF_REG_ARG1] + #define CTX regs[BPF_REG_CTX] + #define K insn->imm + /* No hurry in this branch * * Exported for the bpf jit load helper. @@@ -57,9 -78,9 +78,9 @@@ void *bpf_internal_load_pointer_neg_hel ptr = skb_network_header(skb) + k - SKF_NET_OFF; else if (k >= SKF_LL_OFF) ptr = skb_mac_header(skb) + k - SKF_LL_OFF; - if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr; + return NULL; }
@@@ -68,6 -89,7 +89,7 @@@ static inline void *load_pointer(const { if (k >= 0) return skb_header_pointer(skb, k, size, buffer); + return bpf_internal_load_pointer_neg_helper(skb, k, size); }
@@@ -122,13 -144,6 +144,6 @@@ noinline u64 __bpf_call_base(u64 r1, u6 return 0; }
- /* Register mappings for user programs. */ - #define A_REG 0 - #define X_REG 7 - #define TMP_REG 8 - #define ARG2_REG 2 - #define ARG3_REG 3 - /** * __sk_run_filter - run a filter on a given context * @ctx: buffer to run the filter on @@@ -138,212 -153,213 +153,213 @@@ * keep, 0 for none. @ctx is the data we are operating on, @insn is the * array of filter instructions. */ - unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) + static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) { u64 stack[MAX_BPF_STACK / sizeof(u64)]; u64 regs[MAX_BPF_REG], tmp; static const void *jumptable[256] = { [0 ... 255] = &&default_label, /* Now overwrite non-defaults ... */ - #define DL(A, B, C) [A|B|C] = &&A##_##B##_##C - DL(BPF_ALU, BPF_ADD, BPF_X), - DL(BPF_ALU, BPF_ADD, BPF_K), - DL(BPF_ALU, BPF_SUB, BPF_X), - DL(BPF_ALU, BPF_SUB, BPF_K), - DL(BPF_ALU, BPF_AND, BPF_X), - DL(BPF_ALU, BPF_AND, BPF_K), - DL(BPF_ALU, BPF_OR, BPF_X), - DL(BPF_ALU, BPF_OR, BPF_K), - DL(BPF_ALU, BPF_LSH, BPF_X), - DL(BPF_ALU, BPF_LSH, BPF_K), - DL(BPF_ALU, BPF_RSH, BPF_X), - DL(BPF_ALU, BPF_RSH, BPF_K), - DL(BPF_ALU, BPF_XOR, BPF_X), - DL(BPF_ALU, BPF_XOR, BPF_K), - DL(BPF_ALU, BPF_MUL, BPF_X), - DL(BPF_ALU, BPF_MUL, BPF_K), - DL(BPF_ALU, BPF_MOV, BPF_X), - DL(BPF_ALU, BPF_MOV, BPF_K), - DL(BPF_ALU, BPF_DIV, BPF_X), - DL(BPF_ALU, BPF_DIV, BPF_K), - DL(BPF_ALU, BPF_MOD, BPF_X), - DL(BPF_ALU, BPF_MOD, BPF_K), - DL(BPF_ALU, BPF_NEG, 0), - DL(BPF_ALU, BPF_END, BPF_TO_BE), - DL(BPF_ALU, BPF_END, BPF_TO_LE), - DL(BPF_ALU64, BPF_ADD, BPF_X), - DL(BPF_ALU64, BPF_ADD, BPF_K), - DL(BPF_ALU64, BPF_SUB, BPF_X), - DL(BPF_ALU64, BPF_SUB, BPF_K), - DL(BPF_ALU64, BPF_AND, BPF_X), - DL(BPF_ALU64, BPF_AND, BPF_K), - DL(BPF_ALU64, BPF_OR, BPF_X), - DL(BPF_ALU64, BPF_OR, BPF_K), - DL(BPF_ALU64, BPF_LSH, BPF_X), - DL(BPF_ALU64, BPF_LSH, BPF_K), - DL(BPF_ALU64, BPF_RSH, BPF_X), - DL(BPF_ALU64, BPF_RSH, BPF_K), - DL(BPF_ALU64, BPF_XOR, BPF_X), - DL(BPF_ALU64, BPF_XOR, BPF_K), - DL(BPF_ALU64, BPF_MUL, BPF_X), - DL(BPF_ALU64, BPF_MUL, BPF_K), - DL(BPF_ALU64, BPF_MOV, BPF_X), - DL(BPF_ALU64, BPF_MOV, BPF_K), - DL(BPF_ALU64, BPF_ARSH, BPF_X), - DL(BPF_ALU64, BPF_ARSH, BPF_K), - DL(BPF_ALU64, BPF_DIV, BPF_X), - DL(BPF_ALU64, BPF_DIV, BPF_K), - DL(BPF_ALU64, BPF_MOD, BPF_X), - DL(BPF_ALU64, BPF_MOD, BPF_K), - DL(BPF_ALU64, BPF_NEG, 0), - DL(BPF_JMP, BPF_CALL, 0), - DL(BPF_JMP, BPF_JA, 0), - DL(BPF_JMP, BPF_JEQ, BPF_X), - DL(BPF_JMP, BPF_JEQ, BPF_K), - DL(BPF_JMP, BPF_JNE, BPF_X), - DL(BPF_JMP, BPF_JNE, BPF_K), - DL(BPF_JMP, BPF_JGT, BPF_X), - DL(BPF_JMP, BPF_JGT, BPF_K), - DL(BPF_JMP, BPF_JGE, BPF_X), - DL(BPF_JMP, BPF_JGE, BPF_K), - DL(BPF_JMP, BPF_JSGT, BPF_X), - DL(BPF_JMP, BPF_JSGT, BPF_K), - DL(BPF_JMP, BPF_JSGE, BPF_X), - DL(BPF_JMP, BPF_JSGE, BPF_K), - DL(BPF_JMP, BPF_JSET, BPF_X), - DL(BPF_JMP, BPF_JSET, BPF_K), - DL(BPF_JMP, BPF_EXIT, 0), - DL(BPF_STX, BPF_MEM, BPF_B), - DL(BPF_STX, BPF_MEM, BPF_H), - DL(BPF_STX, BPF_MEM, BPF_W), - DL(BPF_STX, BPF_MEM, BPF_DW), - DL(BPF_STX, BPF_XADD, BPF_W), - DL(BPF_STX, BPF_XADD, BPF_DW), - DL(BPF_ST, BPF_MEM, BPF_B), - DL(BPF_ST, BPF_MEM, BPF_H), - DL(BPF_ST, BPF_MEM, BPF_W), - DL(BPF_ST, BPF_MEM, BPF_DW), - DL(BPF_LDX, BPF_MEM, BPF_B), - DL(BPF_LDX, BPF_MEM, BPF_H), - DL(BPF_LDX, BPF_MEM, BPF_W), - DL(BPF_LDX, BPF_MEM, BPF_DW), - DL(BPF_LD, BPF_ABS, BPF_W), - DL(BPF_LD, BPF_ABS, BPF_H), - DL(BPF_LD, BPF_ABS, BPF_B), - DL(BPF_LD, BPF_IND, BPF_W), - DL(BPF_LD, BPF_IND, BPF_H), - DL(BPF_LD, BPF_IND, BPF_B), - #undef DL + /* 32 bit ALU operations */ + [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X, + [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K, + [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X, + [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K, + [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X, + [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K, + [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X, + [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K, + [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X, + [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K, + [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X, + [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K, + [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X, + [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K, + [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X, + [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K, + [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X, + [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K, + [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X, + [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K, + [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X, + [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K, + [BPF_ALU | BPF_NEG] = &&ALU_NEG, + [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE, + [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE, + /* 64 bit ALU operations */ + [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X, + [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K, + [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X, + [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K, + [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X, + [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K, + [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X, + [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K, + [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X, + [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K, + [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X, + [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K, + [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X, + [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K, + [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X, + [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K, + [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X, + [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K, + [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X, + [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K, + [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X, + [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K, + [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X, + [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K, + [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG, + /* Call instruction */ + [BPF_JMP | BPF_CALL] = &&JMP_CALL, + /* Jumps */ + [BPF_JMP | BPF_JA] = &&JMP_JA, + [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X, + [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K, + [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X, + [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K, + [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X, + [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K, + [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X, + [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K, + [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X, + [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K, + [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X, + [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K, + [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X, + [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K, + /* Program return */ + [BPF_JMP | BPF_EXIT] = &&JMP_EXIT, + /* Store instructions */ + [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B, + [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H, + [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W, + [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW, + [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W, + [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW, + [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B, + [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H, + [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W, + [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW, + /* Load instructions */ + [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B, + [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H, + [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W, + [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW, + [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W, + [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H, + [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B, + [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W, + [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H, + [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B, }; + void *ptr; + int off; + + #define CONT ({ insn++; goto select_insn; }) + #define CONT_JMP ({ insn++; goto select_insn; })
- regs[FP_REG] = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; - regs[ARG1_REG] = (u64) (unsigned long) ctx; - regs[A_REG] = 0; - regs[X_REG] = 0; + FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; + ARG1 = (u64) (unsigned long) ctx; + + /* Register for user BPF programs need to be reset first. */ + regs[BPF_REG_A] = 0; + regs[BPF_REG_X] = 0;
select_insn: goto *jumptable[insn->code];
/* ALU */ #define ALU(OPCODE, OP) \ - BPF_ALU64_##OPCODE##_BPF_X: \ + ALU64_##OPCODE##_X: \ A = A OP X; \ CONT; \ - BPF_ALU_##OPCODE##_BPF_X: \ + ALU_##OPCODE##_X: \ A = (u32) A OP (u32) X; \ CONT; \ - BPF_ALU64_##OPCODE##_BPF_K: \ + ALU64_##OPCODE##_K: \ A = A OP K; \ CONT; \ - BPF_ALU_##OPCODE##_BPF_K: \ + ALU_##OPCODE##_K: \ A = (u32) A OP (u32) K; \ CONT;
- ALU(BPF_ADD, +) - ALU(BPF_SUB, -) - ALU(BPF_AND, &) - ALU(BPF_OR, |) - ALU(BPF_LSH, <<) - ALU(BPF_RSH, >>) - ALU(BPF_XOR, ^) - ALU(BPF_MUL, *) + ALU(ADD, +) + ALU(SUB, -) + ALU(AND, &) + ALU(OR, |) + ALU(LSH, <<) + ALU(RSH, >>) + ALU(XOR, ^) + ALU(MUL, *) #undef ALU - BPF_ALU_BPF_NEG_0: + ALU_NEG: A = (u32) -A; CONT; - BPF_ALU64_BPF_NEG_0: + ALU64_NEG: A = -A; CONT; - BPF_ALU_BPF_MOV_BPF_X: + ALU_MOV_X: A = (u32) X; CONT; - BPF_ALU_BPF_MOV_BPF_K: + ALU_MOV_K: A = (u32) K; CONT; - BPF_ALU64_BPF_MOV_BPF_X: + ALU64_MOV_X: A = X; CONT; - BPF_ALU64_BPF_MOV_BPF_K: + ALU64_MOV_K: A = K; CONT; - BPF_ALU64_BPF_ARSH_BPF_X: + ALU64_ARSH_X: (*(s64 *) &A) >>= X; CONT; - BPF_ALU64_BPF_ARSH_BPF_K: + ALU64_ARSH_K: (*(s64 *) &A) >>= K; CONT; - BPF_ALU64_BPF_MOD_BPF_X: + ALU64_MOD_X: if (unlikely(X == 0)) return 0; tmp = A; A = do_div(tmp, X); CONT; - BPF_ALU_BPF_MOD_BPF_X: + ALU_MOD_X: if (unlikely(X == 0)) return 0; tmp = (u32) A; A = do_div(tmp, (u32) X); CONT; - BPF_ALU64_BPF_MOD_BPF_K: + ALU64_MOD_K: tmp = A; A = do_div(tmp, K); CONT; - BPF_ALU_BPF_MOD_BPF_K: + ALU_MOD_K: tmp = (u32) A; A = do_div(tmp, (u32) K); CONT; - BPF_ALU64_BPF_DIV_BPF_X: + ALU64_DIV_X: if (unlikely(X == 0)) return 0; do_div(A, X); CONT; - BPF_ALU_BPF_DIV_BPF_X: + ALU_DIV_X: if (unlikely(X == 0)) return 0; tmp = (u32) A; do_div(tmp, (u32) X); A = (u32) tmp; CONT; - BPF_ALU64_BPF_DIV_BPF_K: + ALU64_DIV_K: do_div(A, K); CONT; - BPF_ALU_BPF_DIV_BPF_K: + ALU_DIV_K: tmp = (u32) A; do_div(tmp, (u32) K); A = (u32) tmp; CONT; - BPF_ALU_BPF_END_BPF_TO_BE: + ALU_END_TO_BE: switch (K) { case 16: A = (__force u16) cpu_to_be16(A); @@@ -356,7 -372,7 +372,7 @@@ break; } CONT; - BPF_ALU_BPF_END_BPF_TO_LE: + ALU_END_TO_LE: switch (K) { case 16: A = (__force u16) cpu_to_le16(A); @@@ -371,142 -387,144 +387,144 @@@ CONT;
/* CALL */ - BPF_JMP_BPF_CALL_0: - /* Function call scratches R1-R5 registers, preserves R6-R9, - * and stores return value into R0. + JMP_CALL: + /* Function call scratches BPF_R1-BPF_R5 registers, + * preserves BPF_R6-BPF_R9, and stores return value + * into BPF_R0. */ - R0 = (__bpf_call_base + insn->imm)(regs[1], regs[2], regs[3], - regs[4], regs[5]); + BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, + BPF_R4, BPF_R5); CONT;
/* JMP */ - BPF_JMP_BPF_JA_0: + JMP_JA: insn += insn->off; CONT; - BPF_JMP_BPF_JEQ_BPF_X: + JMP_JEQ_X: if (A == X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JEQ_BPF_K: + JMP_JEQ_K: if (A == K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JNE_BPF_X: + JMP_JNE_X: if (A != X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JNE_BPF_K: + JMP_JNE_K: if (A != K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JGT_BPF_X: + JMP_JGT_X: if (A > X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JGT_BPF_K: + JMP_JGT_K: if (A > K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JGE_BPF_X: + JMP_JGE_X: if (A >= X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JGE_BPF_K: + JMP_JGE_K: if (A >= K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSGT_BPF_X: - if (((s64)A) > ((s64)X)) { + JMP_JSGT_X: + if (((s64) A) > ((s64) X)) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSGT_BPF_K: - if (((s64)A) > ((s64)K)) { + JMP_JSGT_K: + if (((s64) A) > ((s64) K)) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSGE_BPF_X: - if (((s64)A) >= ((s64)X)) { + JMP_JSGE_X: + if (((s64) A) >= ((s64) X)) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSGE_BPF_K: - if (((s64)A) >= ((s64)K)) { + JMP_JSGE_K: + if (((s64) A) >= ((s64) K)) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSET_BPF_X: + JMP_JSET_X: if (A & X) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_JSET_BPF_K: + JMP_JSET_K: if (A & K) { insn += insn->off; CONT_JMP; } CONT; - BPF_JMP_BPF_EXIT_0: - return R0; + JMP_EXIT: + return BPF_R0;
/* STX and ST and LDX*/ #define LDST(SIZEOP, SIZE) \ - BPF_STX_BPF_MEM_##SIZEOP: \ + STX_MEM_##SIZEOP: \ *(SIZE *)(unsigned long) (A + insn->off) = X; \ CONT; \ - BPF_ST_BPF_MEM_##SIZEOP: \ + ST_MEM_##SIZEOP: \ *(SIZE *)(unsigned long) (A + insn->off) = K; \ CONT; \ - BPF_LDX_BPF_MEM_##SIZEOP: \ + LDX_MEM_##SIZEOP: \ A = *(SIZE *)(unsigned long) (X + insn->off); \ CONT;
- LDST(BPF_B, u8) - LDST(BPF_H, u16) - LDST(BPF_W, u32) - LDST(BPF_DW, u64) + LDST(B, u8) + LDST(H, u16) + LDST(W, u32) + LDST(DW, u64) #undef LDST - BPF_STX_BPF_XADD_BPF_W: /* lock xadd *(u32 *)(A + insn->off) += X */ + STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */ atomic_add((u32) X, (atomic_t *)(unsigned long) (A + insn->off)); CONT; - BPF_STX_BPF_XADD_BPF_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ + STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ atomic64_add((u64) X, (atomic64_t *)(unsigned long) (A + insn->off)); CONT; - BPF_LD_BPF_ABS_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */ + LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */ off = K; load_word: - /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only - * appearing in the programs where ctx == skb. All programs - * keep 'ctx' in regs[CTX_REG] == R6, sk_convert_filter() - * saves it in R6, internal BPF verifier will check that - * R6 == ctx. + /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are + * only appearing in the programs where ctx == + * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] + * == BPF_R6, sk_convert_filter() saves it in BPF_R6, + * internal BPF verifier will check that BPF_R6 == + * ctx. * - * BPF_ABS and BPF_IND are wrappers of function calls, so - * they scratch R1-R5 registers, preserve R6-R9, and store - * return value into R0. + * BPF_ABS and BPF_IND are wrappers of function calls, + * so they scratch BPF_R1-BPF_R5 registers, preserve + * BPF_R6-BPF_R9, and store return value into BPF_R0. * * Implicit input: * ctx @@@ -516,39 -534,43 +534,43 @@@ * K == 32-bit immediate * * Output: - * R0 - 8/16/32-bit skb data converted to cpu endianness + * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness */ + ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); if (likely(ptr != NULL)) { - R0 = get_unaligned_be32(ptr); + BPF_R0 = get_unaligned_be32(ptr); CONT; } + return 0; - BPF_LD_BPF_ABS_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */ + LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ off = K; load_half: ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); if (likely(ptr != NULL)) { - R0 = get_unaligned_be16(ptr); + BPF_R0 = get_unaligned_be16(ptr); CONT; } + return 0; - BPF_LD_BPF_ABS_BPF_B: /* R0 = *(u8 *) (ctx + K) */ + LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ off = K; load_byte: ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); if (likely(ptr != NULL)) { - R0 = *(u8 *)ptr; + BPF_R0 = *(u8 *)ptr; CONT; } + return 0; - BPF_LD_BPF_IND_BPF_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */ + LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ off = K + X; goto load_word; - BPF_LD_BPF_IND_BPF_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */ + LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */ off = K + X; goto load_half; - BPF_LD_BPF_IND_BPF_B: /* R0 = *(u8 *) (skb->data + X + K) */ + LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */ off = K + X; goto load_byte;
@@@ -556,24 -578,8 +578,8 @@@ /* If we ever reach this, we have a bug somewhere. */ WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code); return 0; - #undef CONT_JMP - #undef CONT - - #undef R0 - #undef X - #undef A - #undef K }
- u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx, - const struct sock_filter_int *insni) - __attribute__ ((alias ("__sk_run_filter"))); - - u32 sk_run_filter_int_skb(const struct sk_buff *ctx, - const struct sock_filter_int *insni) - __attribute__ ((alias ("__sk_run_filter"))); - EXPORT_SYMBOL_GPL(sk_run_filter_int_skb); - /* Helper to find the offset of pkt_type in sk_buff structure. We want * to make sure its still a 3bit field starting at a byte boundary; * taken from arch/x86/net/bpf_jit_comp.c. @@@ -594,16 -600,14 +600,14 @@@ static unsigned int pkt_type_offset(voi return -1; }
- static u64 __skb_get_pay_offset(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) + static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *)(long) ctx; - - return __skb_get_poff(skb); + return __skb_get_poff((struct sk_buff *)(unsigned long) ctx); }
- static u64 __skb_get_nlattr(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) + static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *)(long) ctx; + struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; struct nlattr *nla;
if (skb_is_nonlinear(skb)) @@@ -612,19 -616,19 +616,19 @@@ if (skb->len < sizeof(struct nlattr)) return 0;
- if (A > skb->len - sizeof(struct nlattr)) + if (a > skb->len - sizeof(struct nlattr)) return 0;
- nla = nla_find((struct nlattr *) &skb->data[A], skb->len - A, X); + nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); if (nla) return (void *) nla - (void *) skb->data;
return 0; }
- static u64 __skb_get_nlattr_nest(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) + static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *)(long) ctx; + struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; struct nlattr *nla;
if (skb_is_nonlinear(skb)) @@@ -633,25 -637,31 +637,31 @@@ if (skb->len < sizeof(struct nlattr)) return 0;
- if (A > skb->len - sizeof(struct nlattr)) + if (a > skb->len - sizeof(struct nlattr)) return 0;
- nla = (struct nlattr *) &skb->data[A]; - if (nla->nla_len > skb->len - A) + nla = (struct nlattr *) &skb->data[a]; + if (nla->nla_len > skb->len - a) return 0;
- nla = nla_find_nested(nla, X); + nla = nla_find_nested(nla, x); if (nla) return (void *) nla - (void *) skb->data;
return 0; }
- static u64 __get_raw_cpu_id(u64 ctx, u64 A, u64 X, u64 r4, u64 r5) + static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { return raw_smp_processor_id(); }
+ /* note that this only generates 32-bit random numbers */ + static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) + { + return prandom_u32(); + } + static bool convert_bpf_extensions(struct sock_filter *fp, struct sock_filter_int **insnp) { @@@ -661,119 -671,79 +671,79 @@@ case SKF_AD_OFF + SKF_AD_PROTOCOL: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
- insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; - insn->off = offsetof(struct sk_buff, protocol); - insn++; - + /* A = *(u16 *) (ctx + offsetof(protocol)) */ + *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, protocol)); /* A = ntohs(A) [emitting a nop or swap16] */ - insn->code = BPF_ALU | BPF_END | BPF_FROM_BE; - insn->a_reg = A_REG; - insn->imm = 16; + *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); break;
case SKF_AD_OFF + SKF_AD_PKTTYPE: - insn->code = BPF_LDX | BPF_MEM | BPF_B; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; - insn->off = pkt_type_offset(); + *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX, + pkt_type_offset()); if (insn->off < 0) return false; insn++; - - insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = A_REG; - insn->imm = PKT_TYPE_MAX; + *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX); break;
case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_HATYPE: - if (FIELD_SIZEOF(struct sk_buff, dev) == 8) - insn->code = BPF_LDX | BPF_MEM | BPF_DW; - else - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = TMP_REG; - insn->x_reg = CTX_REG; - insn->off = offsetof(struct sk_buff, dev); - insn++; - - insn->code = BPF_JMP | BPF_JNE | BPF_K; - insn->a_reg = TMP_REG; - insn->imm = 0; - insn->off = 1; - insn++; - - insn->code = BPF_JMP | BPF_EXIT; - insn++; - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); - - insn->a_reg = A_REG; - insn->x_reg = TMP_REG; - - if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) { - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->off = offsetof(struct net_device, ifindex); - } else { - insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->off = offsetof(struct net_device, type); - } + BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0); + + *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), + BPF_REG_TMP, BPF_REG_CTX, + offsetof(struct sk_buff, dev)); + /* if (tmp != 0) goto pc + 1 */ + *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); + *insn++ = BPF_EXIT_INSN(); + if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) + *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, + offsetof(struct net_device, ifindex)); + else + *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, + offsetof(struct net_device, type)); break;
case SKF_AD_OFF + SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
- insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; - insn->off = offsetof(struct sk_buff, mark); + *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, mark)); break;
case SKF_AD_OFF + SKF_AD_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; - insn->off = offsetof(struct sk_buff, hash); + *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, hash)); break;
case SKF_AD_OFF + SKF_AD_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
- insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; - insn->off = offsetof(struct sk_buff, queue_mapping); + *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, queue_mapping)); break;
case SKF_AD_OFF + SKF_AD_VLAN_TAG: case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - - insn->code = BPF_LDX | BPF_MEM | BPF_H; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; - insn->off = offsetof(struct sk_buff, vlan_tci); - insn++; - BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+ /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ + *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, + offsetof(struct sk_buff, vlan_tci)); if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { - insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = A_REG; - insn->imm = ~VLAN_TAG_PRESENT; + *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, + ~VLAN_TAG_PRESENT); } else { - insn->code = BPF_ALU | BPF_RSH | BPF_K; - insn->a_reg = A_REG; - insn->imm = 12; - insn++; - - insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = A_REG; - insn->imm = 1; + /* A >>= 12 */ + *insn++ = BPF_ALU32_IMM(BPF_RSH, BPF_REG_A, 12); + /* A &= 1 */ + *insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 1); } break;
@@@ -781,46 -751,36 +751,36 @@@ case SKF_AD_OFF + SKF_AD_NLATTR: case SKF_AD_OFF + SKF_AD_NLATTR_NEST: case SKF_AD_OFF + SKF_AD_CPU: + case SKF_AD_OFF + SKF_AD_RANDOM: /* arg1 = ctx */ - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = ARG1_REG; - insn->x_reg = CTX_REG; - insn++; - + *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); /* arg2 = A */ - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = ARG2_REG; - insn->x_reg = A_REG; - insn++; - + *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); /* arg3 = X */ - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = ARG3_REG; - insn->x_reg = X_REG; - insn++; - + *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); /* Emit call(ctx, arg2=A, arg3=X) */ - insn->code = BPF_JMP | BPF_CALL; switch (fp->k) { case SKF_AD_OFF + SKF_AD_PAY_OFFSET: - insn->imm = __skb_get_pay_offset - __bpf_call_base; + *insn = BPF_EMIT_CALL(__skb_get_pay_offset); break; case SKF_AD_OFF + SKF_AD_NLATTR: - insn->imm = __skb_get_nlattr - __bpf_call_base; + *insn = BPF_EMIT_CALL(__skb_get_nlattr); break; case SKF_AD_OFF + SKF_AD_NLATTR_NEST: - insn->imm = __skb_get_nlattr_nest - __bpf_call_base; + *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest); break; case SKF_AD_OFF + SKF_AD_CPU: - insn->imm = __get_raw_cpu_id - __bpf_call_base; + *insn = BPF_EMIT_CALL(__get_raw_cpu_id); + break; + case SKF_AD_OFF + SKF_AD_RANDOM: + *insn = BPF_EMIT_CALL(__get_random_u32); break; } break;
case SKF_AD_OFF + SKF_AD_ALU_XOR_X: - insn->code = BPF_ALU | BPF_XOR | BPF_X; - insn->a_reg = A_REG; - insn->x_reg = X_REG; + /* A ^= X */ + *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); break;
default: @@@ -870,7 -830,7 +830,7 @@@ int sk_convert_filter(struct sock_filte u8 bpf_src;
BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); - BUILD_BUG_ON(FP_REG + 1 != MAX_BPF_REG); + BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
if (len <= 0 || len >= BPF_MAXINSNS) return -EINVAL; @@@ -885,11 -845,8 +845,8 @@@ do_pass new_insn = new_prog; fp = prog;
- if (new_insn) { - new_insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - new_insn->a_reg = CTX_REG; - new_insn->x_reg = ARG1_REG; - } + if (new_insn) + *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); new_insn++;
for (i = 0; i < len; fp++, i++) { @@@ -937,17 -894,16 +894,16 @@@ convert_bpf_extensions(fp, &insn)) break;
- insn->code = fp->code; - insn->a_reg = A_REG; - insn->x_reg = X_REG; - insn->imm = fp->k; + *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); break;
- /* Jump opcodes map as-is, but offsets need adjustment. */ - case BPF_JMP | BPF_JA: - target = i + fp->k + 1; - insn->code = fp->code; - #define EMIT_JMP \ + /* Jump transformation cannot use BPF block macros + * everywhere as offset calculation and target updates + * require a bit more work than the rest, i.e. jump + * opcodes map as-is, but offsets need adjustment. + */ + + #define BPF_EMIT_JMP \ do { \ if (target >= len || target < 0) \ goto err; \ @@@ -956,7 -912,10 +912,10 @@@ insn->off -= insn - tmp_insns; \ } while (0)
- EMIT_JMP; + case BPF_JMP | BPF_JA: + target = i + fp->k + 1; + insn->code = fp->code; + BPF_EMIT_JMP; break;
case BPF_JMP | BPF_JEQ | BPF_K: @@@ -972,17 -931,14 +931,14 @@@ * immediate into tmp register and use it * in compare insn. */ - insn->code = BPF_ALU | BPF_MOV | BPF_K; - insn->a_reg = TMP_REG; - insn->imm = fp->k; - insn++; + *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
- insn->a_reg = A_REG; - insn->x_reg = TMP_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_TMP; bpf_src = BPF_X; } else { - insn->a_reg = A_REG; - insn->x_reg = X_REG; + insn->a_reg = BPF_REG_A; + insn->x_reg = BPF_REG_X; insn->imm = fp->k; bpf_src = BPF_SRC(fp->code); } @@@ -991,7 -947,7 +947,7 @@@ if (fp->jf == 0) { insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; target = i + fp->jt + 1; - EMIT_JMP; + BPF_EMIT_JMP; break; }
@@@ -999,127 -955,94 +955,94 @@@ if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { insn->code = BPF_JMP | BPF_JNE | bpf_src; target = i + fp->jf + 1; - EMIT_JMP; + BPF_EMIT_JMP; break; }
/* Other jumps are mapped into two insns: Jxx and JA. */ target = i + fp->jt + 1; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; - EMIT_JMP; + BPF_EMIT_JMP; insn++;
insn->code = BPF_JMP | BPF_JA; target = i + fp->jf + 1; - EMIT_JMP; + BPF_EMIT_JMP; break;
/* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ case BPF_LDX | BPF_MSH | BPF_B: - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = TMP_REG; - insn->x_reg = A_REG; - insn++; - - insn->code = BPF_LD | BPF_ABS | BPF_B; - insn->a_reg = A_REG; - insn->imm = fp->k; - insn++; - - insn->code = BPF_ALU | BPF_AND | BPF_K; - insn->a_reg = A_REG; - insn->imm = 0xf; - insn++; - - insn->code = BPF_ALU | BPF_LSH | BPF_K; - insn->a_reg = A_REG; - insn->imm = 2; - insn++; - - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = X_REG; - insn->x_reg = A_REG; - insn++; - - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = A_REG; - insn->x_reg = TMP_REG; + /* tmp = A */ + *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A); + /* A = BPF_R0 = *(u8 *) (skb->data + K) */ + *insn++ = BPF_LD_ABS(BPF_B, fp->k); + /* A &= 0xf */ + *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); + /* A <<= 2 */ + *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); + /* X = A */ + *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); + /* A = tmp */ + *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); break;
/* RET_K, RET_A are remaped into 2 insns. */ case BPF_RET | BPF_A: case BPF_RET | BPF_K: - insn->code = BPF_ALU | BPF_MOV | - (BPF_RVAL(fp->code) == BPF_K ? - BPF_K : BPF_X); - insn->a_reg = 0; - insn->x_reg = A_REG; - insn->imm = fp->k; - insn++; - - insn->code = BPF_JMP | BPF_EXIT; + *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ? + BPF_K : BPF_X, BPF_REG_0, + BPF_REG_A, fp->k); + *insn = BPF_EXIT_INSN(); break;
/* Store to stack. */ case BPF_ST: case BPF_STX: - insn->code = BPF_STX | BPF_MEM | BPF_W; - insn->a_reg = FP_REG; - insn->x_reg = fp->code == BPF_ST ? A_REG : X_REG; - insn->off = -(BPF_MEMWORDS - fp->k) * 4; + *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == + BPF_ST ? BPF_REG_A : BPF_REG_X, + -(BPF_MEMWORDS - fp->k) * 4); break;
/* Load from stack. */ case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - A_REG : X_REG; - insn->x_reg = FP_REG; - insn->off = -(BPF_MEMWORDS - fp->k) * 4; + *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? + BPF_REG_A : BPF_REG_X, BPF_REG_FP, + -(BPF_MEMWORDS - fp->k) * 4); break;
/* A = K or X = K */ case BPF_LD | BPF_IMM: case BPF_LDX | BPF_IMM: - insn->code = BPF_ALU | BPF_MOV | BPF_K; - insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - A_REG : X_REG; - insn->imm = fp->k; + *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? + BPF_REG_A : BPF_REG_X, fp->k); break;
/* X = A */ case BPF_MISC | BPF_TAX: - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = X_REG; - insn->x_reg = A_REG; + *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); break;
/* A = X */ case BPF_MISC | BPF_TXA: - insn->code = BPF_ALU64 | BPF_MOV | BPF_X; - insn->a_reg = A_REG; - insn->x_reg = X_REG; + *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); break;
/* A = skb->len or X = skb->len */ case BPF_LD | BPF_W | BPF_LEN: case BPF_LDX | BPF_W | BPF_LEN: - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = BPF_CLASS(fp->code) == BPF_LD ? - A_REG : X_REG; - insn->x_reg = CTX_REG; - insn->off = offsetof(struct sk_buff, len); + *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? + BPF_REG_A : BPF_REG_X, BPF_REG_CTX, + offsetof(struct sk_buff, len)); break;
- /* access seccomp_data fields */ + /* Access seccomp_data fields. */ case BPF_LDX | BPF_ABS | BPF_W: - insn->code = BPF_LDX | BPF_MEM | BPF_W; - insn->a_reg = A_REG; - insn->x_reg = CTX_REG; - insn->off = fp->k; + /* A = *(u32 *) (ctx + K) */ + *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); break;
+ /* Unkown instruction. */ default: goto err; } @@@ -1128,7 -1051,6 +1051,6 @@@ if (new_prog) memcpy(new_insn, tmp_insns, sizeof(*insn) * (insn - tmp_insns)); - new_insn += insn - tmp_insns; }
@@@ -1143,7 -1065,6 +1065,6 @@@ new_flen = new_insn - new_prog; if (pass > 2) goto err; - goto do_pass; }
@@@ -1167,44 -1088,46 +1088,46 @@@ err */ static int check_load_and_stores(struct sock_filter *filter, int flen) { - u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ + u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ int pc, ret = 0;
BUILD_BUG_ON(BPF_MEMWORDS > 16); + masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); if (!masks) return -ENOMEM; + memset(masks, 0xff, flen * sizeof(*masks));
for (pc = 0; pc < flen; pc++) { memvalid &= masks[pc];
switch (filter[pc].code) { - case BPF_S_ST: - case BPF_S_STX: + case BPF_ST: + case BPF_STX: memvalid |= (1 << filter[pc].k); break; - case BPF_S_LD_MEM: - case BPF_S_LDX_MEM: + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: if (!(memvalid & (1 << filter[pc].k))) { ret = -EINVAL; goto error; } break; - case BPF_S_JMP_JA: - /* a jump must set masks on target */ + case BPF_JMP | BPF_JA: + /* A jump must set masks on target */ masks[pc + 1 + filter[pc].k] &= memvalid; memvalid = ~0; break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JSET_X: - case BPF_S_JMP_JSET_K: - /* a jump must set masks on targets */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + /* A jump must set masks on targets */ masks[pc + 1 + filter[pc].jt] &= memvalid; masks[pc + 1 + filter[pc].jf] &= memvalid; memvalid = ~0; @@@ -1216,6 -1139,72 +1139,72 @@@ error return ret; }
+ static bool chk_code_allowed(u16 code_to_probe) + { + static const bool codes[] = { + /* 32 bit ALU operations */ + [BPF_ALU | BPF_ADD | BPF_K] = true, + [BPF_ALU | BPF_ADD | BPF_X] = true, + [BPF_ALU | BPF_SUB | BPF_K] = true, + [BPF_ALU | BPF_SUB | BPF_X] = true, + [BPF_ALU | BPF_MUL | BPF_K] = true, + [BPF_ALU | BPF_MUL | BPF_X] = true, + [BPF_ALU | BPF_DIV | BPF_K] = true, + [BPF_ALU | BPF_DIV | BPF_X] = true, + [BPF_ALU | BPF_MOD | BPF_K] = true, + [BPF_ALU | BPF_MOD | BPF_X] = true, + [BPF_ALU | BPF_AND | BPF_K] = true, + [BPF_ALU | BPF_AND | BPF_X] = true, + [BPF_ALU | BPF_OR | BPF_K] = true, + [BPF_ALU | BPF_OR | BPF_X] = true, + [BPF_ALU | BPF_XOR | BPF_K] = true, + [BPF_ALU | BPF_XOR | BPF_X] = true, + [BPF_ALU | BPF_LSH | BPF_K] = true, + [BPF_ALU | BPF_LSH | BPF_X] = true, + [BPF_ALU | BPF_RSH | BPF_K] = true, + [BPF_ALU | BPF_RSH | BPF_X] = true, + [BPF_ALU | BPF_NEG] = true, + /* Load instructions */ + [BPF_LD | BPF_W | BPF_ABS] = true, + [BPF_LD | BPF_H | BPF_ABS] = true, + [BPF_LD | BPF_B | BPF_ABS] = true, + [BPF_LD | BPF_W | BPF_LEN] = true, + [BPF_LD | BPF_W | BPF_IND] = true, + [BPF_LD | BPF_H | BPF_IND] = true, + [BPF_LD | BPF_B | BPF_IND] = true, + [BPF_LD | BPF_IMM] = true, + [BPF_LD | BPF_MEM] = true, + [BPF_LDX | BPF_W | BPF_LEN] = true, + [BPF_LDX | BPF_B | BPF_MSH] = true, + [BPF_LDX | BPF_IMM] = true, + [BPF_LDX | BPF_MEM] = true, + /* Store instructions */ + [BPF_ST] = true, + [BPF_STX] = true, + /* Misc instructions */ + [BPF_MISC | BPF_TAX] = true, + [BPF_MISC | BPF_TXA] = true, + /* Return instructions */ + [BPF_RET | BPF_K] = true, + [BPF_RET | BPF_A] = true, + /* Jump instructions */ + [BPF_JMP | BPF_JA] = true, + [BPF_JMP | BPF_JEQ | BPF_K] = true, + [BPF_JMP | BPF_JEQ | BPF_X] = true, + [BPF_JMP | BPF_JGE | BPF_K] = true, + [BPF_JMP | BPF_JGE | BPF_X] = true, + [BPF_JMP | BPF_JGT | BPF_K] = true, + [BPF_JMP | BPF_JGT | BPF_X] = true, + [BPF_JMP | BPF_JSET | BPF_K] = true, + [BPF_JMP | BPF_JSET | BPF_X] = true, + }; + + if (code_to_probe >= ARRAY_SIZE(codes)) + return false; + + return codes[code_to_probe]; + } + /** * sk_chk_filter - verify socket filter code * @filter: filter to verify @@@ -1232,153 -1221,76 +1221,76 @@@ */ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) { - /* - * Valid instructions are initialized to non-0. - * Invalid instructions are initialized to 0. - */ - static const u8 codes[] = { - [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K, - [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X, - [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K, - [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X, - [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K, - [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X, - [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X, - [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K, - [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X, - [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K, - [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X, - [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K, - [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X, - [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K, - [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X, - [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K, - [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X, - [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K, - [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X, - [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG, - [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS, - [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS, - [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS, - [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN, - [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND, - [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND, - [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND, - [BPF_LD|BPF_IMM] = BPF_S_LD_IMM, - [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN, - [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH, - [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM, - [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX, - [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA, - [BPF_RET|BPF_K] = BPF_S_RET_K, - [BPF_RET|BPF_A] = BPF_S_RET_A, - [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K, - [BPF_LD|BPF_MEM] = BPF_S_LD_MEM, - [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM, - [BPF_ST] = BPF_S_ST, - [BPF_STX] = BPF_S_STX, - [BPF_JMP|BPF_JA] = BPF_S_JMP_JA, - [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K, - [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X, - [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K, - [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X, - [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K, - [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X, - [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K, - [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X, - }; - int pc; bool anc_found; + int pc;
if (flen == 0 || flen > BPF_MAXINSNS) return -EINVAL;
- /* check the filter code now */ + /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { struct sock_filter *ftest = &filter[pc]; - u16 code = ftest->code;
- if (code >= ARRAY_SIZE(codes)) - return -EINVAL; - code = codes[code]; - if (!code) + /* May we actually operate on this code? */ + if (!chk_code_allowed(ftest->code)) return -EINVAL; + /* Some instructions need special checks */ - switch (code) { - case BPF_S_ALU_DIV_K: - case BPF_S_ALU_MOD_K: - /* check for division by zero */ + switch (ftest->code) { + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_MOD | BPF_K: + /* Check for division by zero */ if (ftest->k == 0) return -EINVAL; break; - case BPF_S_LD_MEM: - case BPF_S_LDX_MEM: - case BPF_S_ST: - case BPF_S_STX: - /* check for invalid memory addresses */ + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: + case BPF_ST: + case BPF_STX: + /* Check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; break; - case BPF_S_JMP_JA: - /* - * Note, the large ftest->k might cause loops. + case BPF_JMP | BPF_JA: + /* Note, the large ftest->k might cause loops. * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ - if (ftest->k >= (unsigned int)(flen-pc-1)) + if (ftest->k >= (unsigned int)(flen - pc - 1)) return -EINVAL; break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JSET_X: - case BPF_S_JMP_JSET_K: - /* for conditionals both must be safe */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + /* Both conditionals must be safe */ if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; break; - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: anc_found = false; - #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ - code = BPF_S_ANC_##CODE; \ - anc_found = true; \ - break - switch (ftest->k) { - ANCILLARY(PROTOCOL); - ANCILLARY(PKTTYPE); - ANCILLARY(IFINDEX); - ANCILLARY(NLATTR); - ANCILLARY(NLATTR_NEST); - ANCILLARY(MARK); - ANCILLARY(QUEUE); - ANCILLARY(HATYPE); - ANCILLARY(RXHASH); - ANCILLARY(CPU); - ANCILLARY(ALU_XOR_X); - ANCILLARY(VLAN_TAG); - ANCILLARY(VLAN_TAG_PRESENT); - ANCILLARY(PAY_OFFSET); - } - - /* ancillary operation unknown or unsupported */ + if (bpf_anc_helper(ftest) & BPF_ANC) + anc_found = true; + /* Ancillary operation unknown or unsupported */ if (anc_found == false && ftest->k >= SKF_AD_OFF) return -EINVAL; } - ftest->code = code; }
- /* last instruction must be a RET code */ + /* Last instruction must be a RET code */ switch (filter[flen - 1].code) { - case BPF_S_RET_K: - case BPF_S_RET_A: + case BPF_RET | BPF_K: + case BPF_RET | BPF_A: return check_load_and_stores(filter, flen); } + return -EINVAL; } EXPORT_SYMBOL(sk_chk_filter); @@@ -1423,7 -1335,7 +1335,7 @@@ static void sk_filter_release_rcu(struc struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
sk_release_orig_filter(fp); - bpf_jit_free(fp); + sk_filter_free(fp); }
/** @@@ -1461,7 -1373,7 +1373,7 @@@ static struct sk_filter *__sk_migrate_r
fp_new = sock_kmalloc(sk, len, GFP_KERNEL); if (fp_new) { - memcpy(fp_new, fp, sizeof(struct sk_filter)); + *fp_new = *fp; /* As we're kepping orig_prog in fp_new along, * we need to make sure we're not evicting it * from the old fp. @@@ -1478,7 -1390,7 +1390,7 @@@ static struct sk_filter *__sk_migrate_f { struct sock_filter *old_prog; struct sk_filter *old_fp; - int i, err, new_len, old_len = fp->len; + int err, new_len, old_len = fp->len;
/* We are free to overwrite insns et al right here as it * won't be used at this point in time anymore internally @@@ -1488,13 -1400,6 +1400,6 @@@ BUILD_BUG_ON(sizeof(struct sock_filter) != sizeof(struct sock_filter_int));
- /* For now, we need to unfiddle BPF_S_* identifiers in place. - * This can sooner or later on be subject to removal, e.g. when - * JITs have been converted. - */ - for (i = 0; i < fp->len; i++) - sk_decode_filter(&fp->insns[i], &fp->insns[i]); - /* Conversion cannot happen on overlapping memory areas, * so we need to keep the user BPF around until the 2nd * pass. At this time, the user BPF is stored in fp->insns. @@@ -1523,7 -1428,6 +1428,6 @@@ goto out_err_free; }
- fp->bpf_func = sk_run_filter_int_skb; fp->len = new_len;
/* 2nd pass: remap sock_filter insns into sock_filter_int insns. */ @@@ -1536,6 -1440,8 +1440,8 @@@ */ goto out_err_free;
+ sk_filter_select_runtime(fp); + kfree(old_prog); return fp;
@@@ -1550,6 -1456,33 +1456,33 @@@ out_err return ERR_PTR(err); }
+ void __weak bpf_int_jit_compile(struct sk_filter *prog) + { + } + + /** + * sk_filter_select_runtime - select execution runtime for BPF program + * @fp: sk_filter populated with internal BPF program + * + * try to JIT internal BPF program, if JIT is not available select interpreter + * BPF program will be executed via SK_RUN_FILTER() macro + */ + void sk_filter_select_runtime(struct sk_filter *fp) + { + fp->bpf_func = (void *) __sk_run_filter; + + /* Probe if internal BPF can be JITed */ + bpf_int_jit_compile(fp); + } + EXPORT_SYMBOL_GPL(sk_filter_select_runtime); + + /* free internal BPF program */ + void sk_filter_free(struct sk_filter *fp) + { + bpf_jit_free(fp); + } + EXPORT_SYMBOL_GPL(sk_filter_free); + static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, struct sock *sk) { @@@ -1559,13 -1492,8 +1492,13 @@@ fp->jited = 0;
err = sk_chk_filter(fp->insns, fp->len); - if (err) + if (err) { + if (sk != NULL) + sk_filter_uncharge(sk, fp); + else + kfree(fp); return ERR_PTR(err); + }
/* Probe if we can JIT compile the filter and if so, do * the compilation of the filter. @@@ -1592,7 -1520,7 +1525,7 @@@ * a negative errno code is returned. On success the return is zero. */ int sk_unattached_filter_create(struct sk_filter **pfp, - struct sock_fprog *fprog) + struct sock_fprog_kern *fprog) { unsigned int fsize = sk_filter_proglen(fprog); struct sk_filter *fp; @@@ -1713,83 -1641,6 +1646,6 @@@ int sk_detach_filter(struct sock *sk } EXPORT_SYMBOL_GPL(sk_detach_filter);
- void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) - { - static const u16 decodes[] = { - [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K, - [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X, - [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K, - [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X, - [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K, - [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X, - [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X, - [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K, - [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X, - [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K, - [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X, - [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K, - [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X, - [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K, - [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X, - [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K, - [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X, - [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K, - [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X, - [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG, - [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS, - [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS, - [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN, - [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND, - [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND, - [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND, - [BPF_S_LD_IMM] = BPF_LD|BPF_IMM, - [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN, - [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH, - [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM, - [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX, - [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA, - [BPF_S_RET_K] = BPF_RET|BPF_K, - [BPF_S_RET_A] = BPF_RET|BPF_A, - [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K, - [BPF_S_LD_MEM] = BPF_LD|BPF_MEM, - [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM, - [BPF_S_ST] = BPF_ST, - [BPF_S_STX] = BPF_STX, - [BPF_S_JMP_JA] = BPF_JMP|BPF_JA, - [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K, - [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X, - [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K, - [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X, - [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K, - [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X, - [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K, - [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X, - }; - u16 code; - - code = filt->code; - - to->code = decodes[code]; - to->jt = filt->jt; - to->jf = filt->jf; - to->k = filt->k; - } - int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) { diff --combined net/ipv4/inetpeer.c index 56cd458,4ced1b9..bd5f592 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@@ -26,20 -26,7 +26,7 @@@ * Theory of operations. * We keep one entry for each peer IP address. The nodes contains long-living * information about the peer which doesn't depend on routes. - * At this moment this information consists only of ID field for the next - * outgoing IP packet. This field is incremented with each packet as encoded - * in inet_getid() function (include/net/inetpeer.h). - * At the moment of writing this notes identifier of IP packets is generated - * to be unpredictable using this code only for packets subjected - * (actually or potentially) to defragmentation. I.e. DF packets less than - * PMTU in size when local fragmentation is disabled use a constant ID and do - * not use this code (see ip_select_ident() in include/net/ip.h). * - * Route cache entries hold references to our nodes. - * New cache entries get references via lookup by destination IP address in - * the avl tree. The reference is grabbed only when it's needed i.e. only - * when we try to output IP packet which needs an unpredictable ID (see - * __ip_select_ident() in net/ipv4/route.c). * Nodes are removed only when reference counter goes to 0. * When it's happened the node may be removed when a sufficient amount of * time has been passed since its last use. The less-recently-used entry can @@@ -62,7 -49,6 +49,6 @@@ * refcnt: atomically against modifications on other CPU; * usually under some other lock to prevent node disappearing * daddr: unchangeable - * ip_id_count: atomic value (no lock needed) */
static struct kmem_cache *peer_cachep __read_mostly; @@@ -120,7 -106,7 +106,7 @@@ int inet_peer_maxttl __read_mostly = 1 static void inetpeer_gc_worker(struct work_struct *work) { struct inet_peer *p, *n, *c; - LIST_HEAD(list); + struct list_head list;
spin_lock_bh(&gc_lock); list_replace_init(&gc_list, &list); @@@ -497,10 -483,6 +483,6 @@@ relookup p->daddr = *daddr; atomic_set(&p->refcnt, 1); atomic_set(&p->rid, 0); - atomic_set(&p->ip_id_count, - (daddr->family == AF_INET) ? - secure_ip_id(daddr->addr.a4) : - secure_ipv6_id(daddr->addr.a6)); p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->rate_tokens = 0; /* 60*HZ is arbitrary, but chosen enough high so that the first @@@ -522,7 -504,7 +504,7 @@@ EXPORT_SYMBOL_GPL(inet_getpeer) void inet_putpeer(struct inet_peer *p) { p->dtime = (__u32)jiffies; - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_dec(&p->refcnt); } EXPORT_SYMBOL_GPL(inet_putpeer); diff --combined net/ipv4/tcp_input.c index 3a26b3b,350b207..931529d --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -2684,12 -2684,13 +2684,12 @@@ static void tcp_process_loss(struct soc bool recovered = !before(tp->snd_una, tp->high_seq);
if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ - if (flag & FLAG_ORIG_SACK_ACKED) { - /* Step 3.b. A timeout is spurious if not all data are - * lost, i.e., never-retransmitted data are (s)acked. - */ - tcp_try_undo_loss(sk, true); + /* Step 3.b. A timeout is spurious if not all data are + * lost, i.e., never-retransmitted data are (s)acked. + */ + if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED)) return; - } + if (after(tp->snd_nxt, tp->high_seq) && (flag & FLAG_DATA_SACKED || is_dupack)) { tp->frto = 0; /* Loss was real: 2nd part of step 3.a */ @@@ -2937,10 -2938,11 +2937,11 @@@ static void tcp_synack_rtt_meas(struct tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); }
- static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) + static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) { const struct inet_connection_sock *icsk = inet_csk(sk); - icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight); + + icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; }
@@@ -3363,7 -3365,6 +3364,6 @@@ static int tcp_ack(struct sock *sk, con u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; bool is_dupack = false; - u32 prior_in_flight; u32 prior_fackets; int prior_packets = tp->packets_out; const int prior_unsacked = tp->packets_out - tp->sacked_out; @@@ -3396,7 -3397,6 +3396,6 @@@ flag |= FLAG_SND_UNA_ADVANCED;
prior_fackets = tp->fackets_out; - prior_in_flight = tcp_packets_in_flight(tp);
/* ts_recent update must be made after we are sure that the packet * is in window. @@@ -3451,7 -3451,7 +3450,7 @@@
/* Advance cwnd if state allows */ if (tcp_may_raise_cwnd(sk, flag)) - tcp_cong_avoid(sk, ack, acked, prior_in_flight); + tcp_cong_avoid(sk, ack, acked);
if (tcp_ack_is_dubious(sk, flag)) { is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); @@@ -4702,28 -4702,6 +4701,6 @@@ static int tcp_prune_queue(struct sock return -1; }
- /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. - * As additional protections, we do not touch cwnd in retransmission phases, - * and if application hit its sndbuf limit recently. - */ - void tcp_cwnd_application_limited(struct sock *sk) - { - struct tcp_sock *tp = tcp_sk(sk); - - if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && - sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { - /* Limited by application or receiver window. */ - u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); - u32 win_used = max(tp->snd_cwnd_used, init_win); - if (win_used < tp->snd_cwnd) { - tp->snd_ssthresh = tcp_current_ssthresh(sk); - tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; - } - tp->snd_cwnd_used = 0; - } - tp->snd_cwnd_stamp = tcp_time_stamp; - } - static bool tcp_should_expand_sndbuf(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); diff --combined net/ipv4/tcp_output.c index 2d340bd,d463c35..faccd39 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -627,7 -627,7 +627,7 @@@ static unsigned int tcp_synack_options( if (unlikely(!ireq->tstamp_ok)) remaining -= TCPOLEN_SACKPERM_ALIGNED; } - if (foc != NULL) { + if (foc != NULL && foc->len >= 0) { u32 need = TCPOLEN_EXP_FASTOPEN_BASE + foc->len; need = (need + 3) & ~3U; /* Align to 32 bits */ if (remaining >= need) { @@@ -878,15 -878,8 +878,8 @@@ static int tcp_transmit_skb(struct soc BUG_ON(!skb || !tcp_skb_pcount(skb));
if (clone_it) { - const struct sk_buff *fclone = skb + 1; - skb_mstamp_get(&skb->skb_mstamp);
- if (unlikely(skb->fclone == SKB_FCLONE_ORIG && - fclone->fclone == SKB_FCLONE_CLONE)) - NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); - if (unlikely(skb_cloned(skb))) skb = pskb_copy(skb, gfp_mask); else @@@ -1387,12 -1380,43 +1380,43 @@@ unsigned int tcp_current_mss(struct soc return mss_now; }
- /* Congestion window validation. (RFC2861) */ - static void tcp_cwnd_validate(struct sock *sk) + /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. + * As additional protections, we do not touch cwnd in retransmission phases, + * and if application hit its sndbuf limit recently. + */ + static void tcp_cwnd_application_limited(struct sock *sk) + { + struct tcp_sock *tp = tcp_sk(sk); + + if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open && + sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { + /* Limited by application or receiver window. */ + u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); + u32 win_used = max(tp->snd_cwnd_used, init_win); + if (win_used < tp->snd_cwnd) { + tp->snd_ssthresh = tcp_current_ssthresh(sk); + tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; + } + tp->snd_cwnd_used = 0; + } + tp->snd_cwnd_stamp = tcp_time_stamp; + } + + static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) { struct tcp_sock *tp = tcp_sk(sk);
- if (tp->packets_out >= tp->snd_cwnd) { + /* Track the maximum number of outstanding packets in each + * window, and remember whether we were cwnd-limited then. + */ + if (!before(tp->snd_una, tp->max_packets_seq) || + tp->packets_out > tp->max_packets_out) { + tp->max_packets_out = tp->packets_out; + tp->max_packets_seq = tp->snd_nxt; + tp->is_cwnd_limited = is_cwnd_limited; + } + + if (tcp_is_cwnd_limited(sk)) { /* Network is feed fully. */ tp->snd_cwnd_used = 0; tp->snd_cwnd_stamp = tcp_time_stamp; @@@ -1644,7 -1668,8 +1668,8 @@@ static int tso_fragment(struct sock *sk * * This algorithm is from John Heffner. */ - static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) + static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, + bool *is_cwnd_limited) { struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@@ -1708,6 -1733,9 +1733,9 @@@ if (!tp->tso_deferred) tp->tso_deferred = 1 | (jiffies << 1);
+ if (cong_win < send_win && cong_win < skb->len) + *is_cwnd_limited = true; + return true;
send_now: @@@ -1868,6 -1896,7 +1896,7 @@@ static bool tcp_write_xmit(struct sock unsigned int tso_segs, sent_pkts; int cwnd_quota; int result; + bool is_cwnd_limited = false;
sent_pkts = 0;
@@@ -1892,6 -1921,7 +1921,7 @@@
cwnd_quota = tcp_cwnd_test(tp, skb); if (!cwnd_quota) { + is_cwnd_limited = true; if (push_one == 2) /* Force out a loss probe pkt. */ cwnd_quota = 1; @@@ -1908,7 -1938,8 +1938,8 @@@ nonagle : TCP_NAGLE_PUSH)))) break; } else { - if (!push_one && tcp_tso_should_defer(sk, skb)) + if (!push_one && + tcp_tso_should_defer(sk, skb, &is_cwnd_limited)) break; }
@@@ -1930,8 -1961,10 +1961,8 @@@ /* It is possible TX completion already happened * before we set TSQ_THROTTLED, so we must * test again the condition. - * We abuse smp_mb__after_clear_bit() because - * there is no smp_mb__after_set_bit() yet */ - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); if (atomic_read(&sk->sk_wmem_alloc) > limit) break; } @@@ -1973,7 -2006,7 +2004,7 @@@ repair /* Send one loss probe per tail loss episode. */ if (push_one != 2) tcp_schedule_loss_probe(sk); - tcp_cwnd_validate(sk); + tcp_cwnd_validate(sk, is_cwnd_limited); return false; } return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk)); @@@ -2037,6 -2070,25 +2068,25 @@@ bool tcp_schedule_loss_probe(struct soc return true; }
+ /* Thanks to skb fast clones, we can detect if a prior transmit of + * a packet is still in a qdisc or driver queue. + * In this case, there is very little point doing a retransmit ! + * Note: This is called from BH context only. + */ + static bool skb_still_in_host_queue(const struct sock *sk, + const struct sk_buff *skb) + { + const struct sk_buff *fclone = skb + 1; + + if (unlikely(skb->fclone == SKB_FCLONE_ORIG && + fclone->fclone == SKB_FCLONE_CLONE)) { + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); + return true; + } + return false; + } + /* When probe timeout (PTO) fires, send a new segment if one exists, else * retransmit the last segment. */ @@@ -2062,6 -2114,9 +2112,9 @@@ void tcp_send_loss_probe(struct sock *s if (WARN_ON(!skb)) goto rearm_timer;
+ if (skb_still_in_host_queue(sk, skb)) + goto rearm_timer; + pcount = tcp_skb_pcount(skb); if (WARN_ON(!pcount)) goto rearm_timer; @@@ -2383,6 -2438,9 +2436,9 @@@ int __tcp_retransmit_skb(struct sock *s min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) return -EAGAIN;
+ if (skb_still_in_host_queue(sk, skb)) + return -EBUSY; + if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) BUG(); @@@ -2476,7 -2534,7 +2532,7 @@@ int tcp_retransmit_skb(struct sock *sk * see tcp_input.c tcp_sacktag_write_queue(). */ TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; - } else { + } else if (err != -EBUSY) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); } return err; @@@ -2754,27 -2812,6 +2810,6 @@@ struct sk_buff *tcp_make_synack(struct if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) mss = tp->rx_opt.user_mss;
- if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ - __u8 rcv_wscale; - /* Set this up on the first call only */ - req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); - - /* limit the window selection if the user enforce a smaller rx buffer */ - if (sk->sk_userlocks & SOCK_RCVBUF_LOCK && - (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0)) - req->window_clamp = tcp_full_space(sk); - - /* tcp_full_space because it is guaranteed to be the first packet */ - tcp_select_initial_window(tcp_full_space(sk), - mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), - &req->rcv_wnd, - &req->window_clamp, - ireq->wscale_ok, - &rcv_wscale, - dst_metric(dst, RTAX_INITRWND)); - ireq->rcv_wscale = rcv_wscale; - } - memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES if (unlikely(req->cookie_ts)) diff --combined net/netfilter/ipvs/ip_vs_core.c index 3d2d2c8,d9da8c4..e683675 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@@ -97,7 -97,7 +97,7 @@@ const char *ip_vs_proto_name(unsigned i return "ICMPv6"; #endif default: - sprintf(buf, "IP_%d", proto); + sprintf(buf, "IP_%u", proto); return buf; } } @@@ -1392,19 -1392,15 +1392,19 @@@ ip_vs_in_icmp(struct sk_buff *skb, int
if (ipip) { __be32 info = ic->un.gateway; + __u8 type = ic->type; + __u8 code = ic->code;
/* Update the MTU */ if (ic->type == ICMP_DEST_UNREACH && ic->code == ICMP_FRAG_NEEDED) { struct ip_vs_dest *dest = cp->dest; u32 mtu = ntohs(ic->un.frag.mtu); + __be16 frag_off = cih->frag_off;
/* Strip outer IP and ICMP, go to IPIP header */ - __skb_pull(skb, ihl + sizeof(_icmph)); + if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL) + goto ignore_ipip; offset2 -= ihl + sizeof(_icmph); skb_reset_network_header(skb); IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", @@@ -1412,7 -1408,7 +1412,7 @@@ ipv4_update_pmtu(skb, dev_net(skb->dev), mtu, 0, 0, 0, 0); /* Client uses PMTUD? */ - if (!(cih->frag_off & htons(IP_DF))) + if (!(frag_off & htons(IP_DF))) goto ignore_ipip; /* Prefer the resulting PMTU */ if (dest) { @@@ -1431,13 -1427,12 +1431,13 @@@ /* Strip outer IP, ICMP and IPIP, go to IP header of * original request. */ - __skb_pull(skb, offset2); + if (pskb_pull(skb, offset2) == NULL) + goto ignore_ipip; skb_reset_network_header(skb); IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n", &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, - ic->type, ic->code, ntohl(info)); - icmp_send(skb, ic->type, ic->code, info); + type, code, ntohl(info)); + icmp_send(skb, type, code, info); /* ICMP can be shorter but anyways, account it */ ip_vs_out_stats(cp, skb);
diff --combined net/netlink/af_netlink.c index f22757a,e0ccd84..15c731f --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -1206,7 -1206,8 +1206,8 @@@ static int netlink_create(struct net *n struct module *module = NULL; struct mutex *cb_mutex; struct netlink_sock *nlk; - void (*bind)(int group); + int (*bind)(int group); + void (*unbind)(int group); int err = 0;
sock->state = SS_UNCONNECTED; @@@ -1232,6 -1233,7 +1233,7 @@@ err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; bind = nl_table[protocol].bind; + unbind = nl_table[protocol].unbind; netlink_unlock_table();
if (err < 0) @@@ -1248,6 -1250,7 +1250,7 @@@ nlk = nlk_sk(sock->sk); nlk->module = module; nlk->netlink_bind = bind; + nlk->netlink_unbind = unbind; out: return err;
@@@ -1301,6 -1304,7 +1304,7 @@@ static int netlink_release(struct socke kfree_rcu(old, rcu); nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].bind = NULL; + nl_table[sk->sk_protocol].unbind = NULL; nl_table[sk->sk_protocol].flags = 0; nl_table[sk->sk_protocol].registered = 0; } @@@ -1373,9 -1377,7 +1377,9 @@@ retry bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *user_ns, int cap) { - return sk_ns_capable(nsp->sk, user_ns, cap); + return ((nsp->flags & NETLINK_SKB_DST) || + file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && + ns_capable(user_ns, cap); } EXPORT_SYMBOL(__netlink_ns_capable);
@@@ -1478,6 -1480,19 +1482,19 @@@ static int netlink_realloc_groups(struc return err; }
+ static void netlink_unbind(int group, long unsigned int groups, + struct netlink_sock *nlk) + { + int undo; + + if (!nlk->netlink_unbind) + return; + + for (undo = 0; undo < group; undo++) + if (test_bit(group, &groups)) + nlk->netlink_unbind(undo); + } + static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { @@@ -1486,6 -1501,7 +1503,7 @@@ struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err; + long unsigned int groups = nladdr->nl_groups;
if (addr_len < sizeof(struct sockaddr_nl)) return -EINVAL; @@@ -1494,7 -1510,7 +1512,7 @@@ return -EINVAL;
/* Only superuser is allowed to listen multicasts */ - if (nladdr->nl_groups) { + if (groups) { if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) return -EPERM; err = netlink_realloc_groups(sk); @@@ -1502,37 -1518,45 +1520,45 @@@ return err; }
- if (nlk->portid) { + if (nlk->portid) if (nladdr->nl_pid != nlk->portid) return -EINVAL; - } else { + + if (nlk->netlink_bind && groups) { + int group; + + for (group = 0; group < nlk->ngroups; group++) { + if (!test_bit(group, &groups)) + continue; + err = nlk->netlink_bind(group); + if (!err) + continue; + netlink_unbind(group, groups, nlk); + return err; + } + } + + if (!nlk->portid) { err = nladdr->nl_pid ? netlink_insert(sk, net, nladdr->nl_pid) : netlink_autobind(sock); - if (err) + if (err) { + netlink_unbind(nlk->ngroups - 1, groups, nlk); return err; + } }
- if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) + if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) return 0;
netlink_table_grab(); netlink_update_subscriptions(sk, nlk->subscriptions + - hweight32(nladdr->nl_groups) - + hweight32(groups) - hweight32(nlk->groups[0])); - nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; + nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups; netlink_update_listeners(sk); netlink_table_ungrab();
- if (nlk->netlink_bind && nlk->groups[0]) { - int i; - - for (i = 0; i < nlk->ngroups; i++) { - if (test_bit(i, nlk->groups)) - nlk->netlink_bind(i); - } - } - return 0; }
@@@ -2170,13 -2194,17 +2196,17 @@@ static int netlink_setsockopt(struct so return err; if (!val || val - 1 >= nlk->ngroups) return -EINVAL; + if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) { + err = nlk->netlink_bind(val); + if (err) + return err; + } netlink_table_grab(); netlink_update_socket_mc(nlk, val, optname == NETLINK_ADD_MEMBERSHIP); netlink_table_ungrab(); - - if (nlk->netlink_bind) - nlk->netlink_bind(val); + if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind) + nlk->netlink_unbind(val);
err = 0; break; @@@ -2295,7 -2323,6 +2325,7 @@@ static int netlink_sendmsg(struct kioc struct sk_buff *skb; int err; struct scm_cookie scm; + u32 netlink_skb_flags = 0;
if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; @@@ -2317,7 -2344,6 +2347,7 @@@ if ((dst_group || dst_portid) && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) goto out; + netlink_skb_flags |= NETLINK_SKB_DST; } else { dst_portid = nlk->dst_portid; dst_group = nlk->dst_group; @@@ -2347,7 -2373,6 +2377,7 @@@ NETLINK_CB(skb).portid = nlk->portid; NETLINK_CB(skb).dst_group = dst_group; NETLINK_CB(skb).creds = siocb->scm->creds; + NETLINK_CB(skb).flags = netlink_skb_flags;
err = -EFAULT; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { diff --combined net/sunrpc/xprtsock.c index 402a7e9,1dec604..be8bbd5 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@@ -866,8 -866,6 +866,6 @@@ static void xs_reset_transport(struct s xs_restore_old_callbacks(transport, sk); write_unlock_bh(&sk->sk_callback_lock);
- sk->sk_no_check = 0; - trace_rpc_socket_close(&transport->xprt, sock); sock_release(sock); } @@@ -893,11 -891,11 +891,11 @@@ static void xs_close(struct rpc_xprt *x xs_reset_transport(transport); xprt->reestablish_timeout = 0;
- smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); clear_bit(XPRT_CLOSING, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); xprt_disconnect_done(xprt); }
@@@ -1497,12 -1495,12 +1495,12 @@@ static void xs_tcp_cancel_linger_timeou
static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) { - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); clear_bit(XPRT_CLOSING, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); }
static void xs_sock_mark_closed(struct rpc_xprt *xprt) @@@ -1556,10 -1554,10 +1554,10 @@@ static void xs_tcp_state_change(struct xprt->connect_cookie++; xprt->reestablish_timeout = 0; set_bit(XPRT_CLOSING, &xprt->state); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTED, &xprt->state); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); break; case TCP_CLOSE_WAIT: @@@ -1578,9 -1576,9 +1576,9 @@@ case TCP_LAST_ACK: set_bit(XPRT_CLOSING, &xprt->state); xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(XPRT_CONNECTED, &xprt->state); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); break; case TCP_CLOSE: xs_tcp_cancel_linger_timeout(xprt); @@@ -2046,7 -2044,6 +2044,6 @@@ static void xs_udp_finish_connecting(st sk->sk_user_data = xprt; sk->sk_data_ready = xs_udp_data_ready; sk->sk_write_space = xs_udp_write_space; - sk->sk_no_check = UDP_CSUM_NORCV; sk->sk_allocation = GFP_ATOMIC;
xprt_set_connected(xprt); diff --combined net/unix/af_unix.c index 749f80c,7b9114e..e968843 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@@ -1207,7 -1207,7 +1207,7 @@@ restart sk->sk_state = TCP_ESTABLISHED; sock_hold(newsk);
- smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */ + smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */ unix_peer(sk) = newsk;
unix_state_unlock(sk); @@@ -1492,10 -1492,14 +1492,14 @@@ static int unix_dgram_sendmsg(struct ki if (len > sk->sk_sndbuf - 32) goto out;
- if (len > SKB_MAX_ALLOC) + if (len > SKB_MAX_ALLOC) { data_len = min_t(size_t, len - SKB_MAX_ALLOC, MAX_SKB_FRAGS * PAGE_SIZE); + data_len = PAGE_ALIGN(data_len); + + BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE); + }
skb = sock_alloc_send_pskb(sk, len - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err, @@@ -1670,6 -1674,8 +1674,8 @@@ static int unix_stream_sendmsg(struct k
data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
+ data_len = min_t(size_t, size, PAGE_ALIGN(data_len)); + skb = sock_alloc_send_pskb(sk, size - data_len, data_len, msg->msg_flags & MSG_DONTWAIT, &err, get_order(UNIX_SKB_FRAGS_SZ)); diff --combined net/xfrm/xfrm_user.c index 09336b2,fd9a16a..412d9dc --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@@ -597,9 -597,6 +597,6 @@@ static int xfrm_add_sa(struct sk_buff * struct xfrm_state *x; int err; struct km_event c; - kuid_t loginuid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - u32 sid;
err = verify_newsa_info(p, attrs); if (err) @@@ -615,8 -612,7 +612,7 @@@ else err = xfrm_state_update(x);
- security_task_getsecid(current, &sid); - xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid); + xfrm_audit_state_add(x, err ? 0 : 1, true);
if (err < 0) { x->km.state = XFRM_STATE_DEAD; @@@ -676,9 -672,6 +672,6 @@@ static int xfrm_del_sa(struct sk_buff * int err = -ESRCH; struct km_event c; struct xfrm_usersa_id *p = nlmsg_data(nlh); - kuid_t loginuid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - u32 sid;
x = xfrm_user_state_lookup(net, p, attrs, &err); if (x == NULL) @@@ -703,8 -696,7 +696,7 @@@ km_state_notify(x, &c);
out: - security_task_getsecid(current, &sid); - xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid); + xfrm_audit_state_delete(x, err ? 0 : 1, true); xfrm_state_put(x); return err; } @@@ -955,20 -947,6 +947,20 @@@ static struct sk_buff *xfrm_state_netli return skb; }
+/* A wrapper for nlmsg_multicast() checking that nlsk is still available. + * Must be called with RCU read lock. + */ +static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, + u32 pid, unsigned int group) +{ + struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); + + if (nlsk) + return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); + else + return -1; +} + static inline size_t xfrm_spdinfo_msgsize(void) { return NLMSG_ALIGN(4) @@@ -1428,9 -1406,6 +1420,6 @@@ static int xfrm_add_policy(struct sk_bu struct km_event c; int err; int excl; - kuid_t loginuid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - u32 sid;
err = verify_newpolicy_info(p); if (err) @@@ -1449,8 -1424,7 +1438,7 @@@ * a type XFRM_MSG_UPDPOLICY - JHS */ excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; err = xfrm_policy_insert(p->dir, xp, excl); - security_task_getsecid(current, &sid); - xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid); + xfrm_audit_policy_add(xp, err ? 0 : 1, true);
if (err) { security_xfrm_policy_free(xp->security); @@@ -1687,13 -1661,7 +1675,7 @@@ static int xfrm_get_policy(struct sk_bu NETLINK_CB(skb).portid); } } else { - kuid_t loginuid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - u32 sid; - - security_task_getsecid(current, &sid); - xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid, - sid); + xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
if (err != 0) goto out; @@@ -1718,13 -1686,9 +1700,9 @@@ static int xfrm_flush_sa(struct sk_buf struct net *net = sock_net(skb->sk); struct km_event c; struct xfrm_usersa_flush *p = nlmsg_data(nlh); - struct xfrm_audit audit_info; int err;
- audit_info.loginuid = audit_get_loginuid(current); - audit_info.sessionid = audit_get_sessionid(current); - security_task_getsecid(current, &audit_info.secid); - err = xfrm_state_flush(net, p->proto, &audit_info); + err = xfrm_state_flush(net, p->proto, true); if (err) { if (err == -ESRCH) /* empty table */ return 0; @@@ -1908,16 -1872,12 +1886,12 @@@ static int xfrm_flush_policy(struct sk_ struct km_event c; u8 type = XFRM_POLICY_TYPE_MAIN; int err; - struct xfrm_audit audit_info;
err = copy_from_user_policy_type(&type, attrs); if (err) return err;
- audit_info.loginuid = audit_get_loginuid(current); - audit_info.sessionid = audit_get_sessionid(current); - security_task_getsecid(current, &audit_info.secid); - err = xfrm_policy_flush(net, type, &audit_info); + err = xfrm_policy_flush(net, type, true); if (err) { if (err == -ESRCH) /* empty table */ return 0; @@@ -1983,14 -1943,8 +1957,8 @@@ static int xfrm_add_pol_expire(struct s
err = 0; if (up->hard) { - kuid_t loginuid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - u32 sid; - - security_task_getsecid(current, &sid); xfrm_policy_delete(xp, p->dir); - xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); - + xfrm_audit_policy_delete(xp, 1, true); } else { // reset the timers here? WARN(1, "Dont know what to do with soft policy expire\n"); @@@ -2026,13 -1980,8 +1994,8 @@@ static int xfrm_add_sa_expire(struct sk km_state_expired(x, ue->hard, nlh->nlmsg_pid);
if (ue->hard) { - kuid_t loginuid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - u32 sid; - - security_task_getsecid(current, &sid); __xfrm_state_delete(x); - xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); + xfrm_audit_state_delete(x, 1, true); } err = 0; out: @@@ -2279,7 -2228,7 +2242,7 @@@ static int xfrm_send_migrate(const stru if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0) BUG();
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE); } #else static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, @@@ -2470,7 -2419,7 +2433,7 @@@ static int xfrm_exp_state_notify(struc return -EMSGSIZE; }
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); }
static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) @@@ -2485,7 -2434,7 +2448,7 @@@ if (build_aevent(skb, x, c) < 0) BUG();
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS); }
static int xfrm_notify_sa_flush(const struct km_event *c) @@@ -2511,7 -2460,7 +2474,7 @@@
nlmsg_end(skb, nlh);
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); }
static inline size_t xfrm_sa_len(struct xfrm_state *x) @@@ -2598,7 -2547,7 +2561,7 @@@ static int xfrm_notify_sa(struct xfrm_s
nlmsg_end(skb, nlh);
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
out_free_skb: kfree_skb(skb); @@@ -2689,7 -2638,7 +2652,7 @@@ static int xfrm_send_acquire(struct xfr if (build_acquire(skb, x, xt, xp) < 0) BUG();
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE); }
/* User gives us xfrm_user_policy_info followed by an array of 0 @@@ -2803,7 -2752,7 +2766,7 @@@ static int xfrm_exp_policy_notify(struc if (build_polexpire(skb, xp, dir, c) < 0) BUG();
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); }
static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) @@@ -2865,7 -2814,7 +2828,7 @@@
nlmsg_end(skb, nlh);
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
out_free_skb: kfree_skb(skb); @@@ -2893,7 -2842,7 +2856,7 @@@ static int xfrm_notify_policy_flush(con
nlmsg_end(skb, nlh);
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
out_free_skb: kfree_skb(skb); @@@ -2962,7 -2911,7 +2925,7 @@@ static int xfrm_send_report(struct net if (build_report(skb, proto, sel, addr) < 0) BUG();
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT); }
static inline size_t xfrm_mapping_msgsize(void) @@@ -3014,7 -2963,7 +2977,7 @@@ static int xfrm_send_mapping(struct xfr if (build_mapping(skb, x, ipaddr, sport) < 0) BUG();
- return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC); + return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING); }
static bool xfrm_is_alive(const struct km_event *c)