The following commit has been merged in the master branch: commit 28eb223b9e39b9f0bd549d00efca4404b8f71ecb Merge: 621e41896e4baeef076918303b4c0ada5c1f77d1 e9d9450497f77ee40c7d895bf1b5b134b9347d5f Author: Stephen Rothwell sfr@canb.auug.org.au Date: Tue Jun 3 12:30:59 2014 +1000
Merge remote-tracking branch 'net-next/master'
Conflicts: Documentation/driver-model/devres.txt include/net/inetpeer.h net/ipv6/output_core.c
diff --combined Documentation/driver-model/devres.txt index 10b8c5d,c74e044..4ec7655 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@@ -309,9 -309,7 +309,14 @@@ SLAVE DMA ENGIN SPI devm_spi_register_master()
+ MDIO + devm_mdiobus_alloc() + devm_mdiobus_alloc_size() + devm_mdiobus_free() ++ +GPIO + devm_gpiod_get() + devm_gpiod_get_index() + devm_gpiod_get_optional() + devm_gpiod_get_index_optional() + devm_gpiod_put() diff --combined MAINTAINERS index 11c71a9,dd33abf..01ab014 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -355,7 -355,7 +355,7 @@@ F: Documentation/hwmon/adm102 F: drivers/hwmon/adm1025.c
ADM1029 HARDWARE MONITOR DRIVER -M: Corentin Labbe corentin.labbe@geomatys.fr +M: Corentin Labbe clabbe.montjoie@gmail.com L: lm-sensors@lm-sensors.org S: Maintained F: drivers/hwmon/adm1029.c @@@ -1617,6 -1617,12 +1617,6 @@@ S: Supporte F: drivers/misc/atmel_tclib.c F: drivers/clocksource/tcb_clksrc.c
-ATMEL TSADCC DRIVER -M: Josh Wu josh.wu@atmel.com -L: linux-input@vger.kernel.org -S: Supported -F: drivers/input/touchscreen/atmel_tsadcc.c - ATMEL USBA UDC DRIVER M: Nicolas Ferre nicolas.ferre@atmel.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -1882,7 -1888,7 +1882,7 @@@ F: drivers/net/ethernet/broadcom/bnx2. F: drivers/net/ethernet/broadcom/bnx2_*
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER - M: Ariel Elior ariele@broadcom.com + M: Ariel Elior ariel.elior@qlogic.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bnx2x/ @@@ -1962,6 -1968,12 +1962,12 @@@ S: Maintaine F: drivers/bcma/ F: include/linux/bcma/
+ BROADCOM SYSTEMPORT ETHERNET DRIVER + M: Florian Fainelli f.fainelli@gmail.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/ethernet/broadcom/bcmsysport.* + BROCADE BFA FC SCSI DRIVER M: Anil Gurumurthy anil.gurumurthy@qlogic.com M: Sudarsana Kalluru sudarsana.kalluru@qlogic.com @@@ -2217,9 -2229,8 +2223,8 @@@ F: drivers/platform/chrome CISCO VIC ETHERNET NIC DRIVER M: Christian Benvenuti benve@cisco.com M: Sujith Sankar ssujith@cisco.com - M: Govindarajulu Varadarajan govindarajulu90@gmail.com + M: Govindarajulu Varadarajan _govind@gmx.com M: Neel Patel neepatel@cisco.com - M: Nishank Trivedi nistrive@cisco.com S: Supported F: drivers/net/ethernet/cisco/enic/
@@@ -2404,6 -2415,7 +2409,6 @@@ F: drivers/net/ethernet/ti/cpmac. CPU FREQUENCY DRIVERS M: Rafael J. Wysocki rjw@rjwysocki.net M: Viresh Kumar viresh.kumar@linaro.org -L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git @@@ -2414,6 -2426,7 +2419,6 @@@ F: include/linux/cpufreq. CPU FREQUENCY DRIVERS - ARM BIG LITTLE M: Viresh Kumar viresh.kumar@linaro.org M: Sudeep Holla sudeep.holla@arm.com -L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php S: Maintained @@@ -3145,9 -3158,10 +3150,9 @@@ S: Maintaine F: drivers/scsi/eata_pio.*
EBTABLES -M: Bart De Schuymer bart.de.schuymer@pandora.be L: netfilter-devel@vger.kernel.org W: http://ebtables.sourceforge.net/ -S: Maintained +S: Orphan F: include/linux/netfilter_bridge/ebt_*.h F: include/uapi/linux/netfilter_bridge/ebt_*.h F: net/bridge/netfilter/ebt*.c @@@ -3760,8 -3774,7 +3765,8 @@@ F: fs/fscache F: include/linux/fscache*.h
F2FS FILE SYSTEM -M: Jaegeuk Kim jaegeuk.kim@samsung.com +M: Jaegeuk Kim jaegeuk@kernel.org +M: Changman Lee cm224.lee@samsung.com L: linux-f2fs-devel@lists.sourceforge.net W: http://en.wikipedia.org/wiki/F2FS T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git @@@ -4196,11 -4209,9 +4201,11 @@@ S: Maintaine F: fs/hpfs/
HSI SUBSYSTEM -M: Sebastian Reichel sre@debian.org +M: Sebastian Reichel sre@kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-hsi.git S: Maintained F: Documentation/ABI/testing/sysfs-bus-hsi +F: Documentation/hsi.txt F: drivers/hsi/ F: include/linux/hsi/ F: include/uapi/linux/hsi/ @@@ -6412,7 -6423,6 +6417,7 @@@ F: drivers/usb/*/*omap F: arch/arm/*omap*/usb*
OMAP GPIO DRIVER +M: Javier Martinez Canillas javier@dowhile0.org M: Santosh Shilimkar santosh.shilimkar@ti.com M: Kevin Hilman khilman@deeprootsystems.com L: linux-omap@vger.kernel.org @@@ -6702,7 -6712,6 +6707,7 @@@ F: Documentation/PCI F: drivers/pci/ F: include/linux/pci* F: arch/x86/pci/ +F: arch/x86/kernel/quirks.c
PCI DRIVER FOR IMX6 M: Richard Zhu r65037@freescale.com @@@ -6750,14 -6759,6 +6755,14 @@@ L: linux-pci@vger.kernel.or S: Maintained F: drivers/pci/host/*designware*
+PCI DRIVER FOR GENERIC OF HOSTS +M: Will Deacon will.deacon@arm.com +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/pci/host-generic-pci.txt +F: drivers/pci/host/pci-host-generic.c + PCMCIA SUBSYSTEM P: Linux PCMCIA Team L: linux-pcmcia@lists.infradead.org @@@ -6892,7 -6893,7 +6897,7 @@@ PKUNITY SOC DRIVER M: Guan Xuetao gxt@mprc.pku.edu.cn W: http://mprc.pku.edu.cn/~guanxuetao/linux S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git +T: git git://github.com/gxt/linux.git F: drivers/input/serio/i8042-unicore32io.h F: drivers/i2c/busses/i2c-puv3.c F: drivers/video/fb-puv3.c @@@ -6942,6 -6943,7 +6947,6 @@@ F: drivers/power
PNP SUPPORT M: Rafael J. Wysocki rafael.j.wysocki@intel.com -M: Bjorn Helgaas bhelgaas@google.com S: Maintained F: drivers/pnp/
@@@ -7408,14 -7410,6 +7413,14 @@@ F: drivers/rpmsg F: Documentation/rpmsg.txt F: include/linux/rpmsg.h
+RESET CONTROLLER FRAMEWORK +M: Philipp Zabel p.zabel@pengutronix.de +S: Maintained +F: drivers/reset/ +F: Documentation/devicetree/bindings/reset/ +F: include/linux/reset.h +F: include/linux/reset-controller.h + RFKILL M: Johannes Berg johannes@sipsolutions.net L: linux-wireless@vger.kernel.org @@@ -7665,6 -7659,7 +7670,6 @@@ L: linux-media@vger.kernel.or Q: https://patchwork.linuxtv.org/project/linux-media/list/ S: Supported F: drivers/media/platform/exynos4-is/ -F: include/media/s5p_fimc.h
SAMSUNG S3C24XX/S3C64XX SOC SERIES CAMIF DRIVER M: Sylwester Nawrocki sylvester.nawrocki@gmail.com @@@ -7967,26 -7962,6 +7972,26 @@@ M: Robin Holt <robinmholt@gmail.com S: Maintained F: drivers/misc/sgi-xp/
+SI2157 MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/tuners/si2157* + +SI2168 MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/dvb-frontends/si2168* + SI470X FM RADIO RECEIVER I2C DRIVER M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org @@@ -9201,7 -9176,7 +9206,7 @@@ UNICORE32 ARCHITECTURE M: Guan Xuetao gxt@mprc.pku.edu.cn W: http://mprc.pku.edu.cn/~guanxuetao/linux S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git +T: git git://github.com/gxt/linux.git F: arch/unicore32/
UNIFDEF diff --combined arch/arm/boot/dts/am33xx.dtsi index 9f53e82,f1eea4a..4a4e02d --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@@ -147,6 -147,9 +147,6 @@@ <0x44e10f90 0x40>; interrupts = <12 13 14>; #dma-cells = <1>; - dma-channels = <64>; - ti,edma-regions = <4>; - ti,edma-slots = <256>; };
gpio0: gpio@44e07000 { @@@ -662,6 -665,8 +662,8 @@@ mac: ethernet@4a100000 { compatible = "ti,cpsw"; ti,hwmods = "cpgmac0"; + clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; + clock-names = "fck", "cpts"; cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; @@@ -685,7 -690,6 +687,7 @@@ */ interrupts = <40 41 42 43>; ranges; + status = "disabled";
davinci_mdio: mdio@4a101000 { compatible = "ti,davinci_mdio"; @@@ -694,7 -698,6 +696,7 @@@ ti,hwmods = "davinci_mdio"; bus_freq = <1000000>; reg = <0x4a101000 0x100>; + status = "disabled"; };
cpsw_emac0: slave@4a100200 { diff --combined arch/arm/boot/dts/am4372.dtsi index 794c73e,03a2255..230bf0e --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@@ -67,15 -67,11 +67,15 @@@ };
ocp { - compatible = "simple-bus"; + compatible = "ti,am4372-l3-noc", "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges; ti,hwmods = "l3_main"; + reg = <0x44000000 0x400000 + 0x44800000 0x400000>; + interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
prcm: prcm@44df0000 { compatible = "ti,am4-prcm"; @@@ -112,6 -108,9 +112,6 @@@ <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; - dma-channels = <64>; - ti,edma-regions = <4>; - ti,edma-slots = <256>; };
uart0: serial@44e09000 { @@@ -490,6 -489,8 +490,8 @@@ #address-cells = <1>; #size-cells = <1>; ti,hwmods = "cpgmac0"; + clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; + clock-names = "fck", "cpts"; status = "disabled"; cpdma_channels = <8>; ale_entries = <1024>; @@@ -522,12 -523,6 +524,12 @@@ /* Filled in by U-Boot */ mac-address = [ 00 00 00 00 00 00 ]; }; + + phy_sel: cpsw-phy-sel@44e10650 { + compatible = "ti,am43xx-cpsw-phy-sel"; + reg= <0x44e10650 0x4>; + reg-names = "gmii-sel"; + }; };
epwmss0: epwmss@48300000 { @@@ -742,121 -737,6 +744,121 @@@ #size-cells = <1>; status = "disabled"; }; + + am43xx_control_usb2phy1: control-phy@44e10620 { + compatible = "ti,control-phy-usb2-am437"; + reg = <0x44e10620 0x4>; + reg-names = "power"; + }; + + am43xx_control_usb2phy2: control-phy@0x44e10628 { + compatible = "ti,control-phy-usb2-am437"; + reg = <0x44e10628 0x4>; + reg-names = "power"; + }; + + ocp2scp0: ocp2scp@483a8000 { + compatible = "ti,omap-ocp2scp"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + ti,hwmods = "ocp2scp0"; + + usb2_phy1: phy@483a8000 { + compatible = "ti,am437x-usb2"; + reg = <0x483a8000 0x8000>; + ctrl-module = <&am43xx_control_usb2phy1>; + clocks = <&usb_phy0_always_on_clk32k>, + <&usb_otg_ss0_refclk960m>; + clock-names = "wkupclk", "refclk"; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + ocp2scp1: ocp2scp@483e8000 { + compatible = "ti,omap-ocp2scp"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + ti,hwmods = "ocp2scp1"; + + usb2_phy2: phy@483e8000 { + compatible = "ti,am437x-usb2"; + reg = <0x483e8000 0x8000>; + ctrl-module = <&am43xx_control_usb2phy2>; + clocks = <&usb_phy1_always_on_clk32k>, + <&usb_otg_ss1_refclk960m>; + clock-names = "wkupclk", "refclk"; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + dwc3_1: omap_dwc3@48380000 { + compatible = "ti,am437x-dwc3"; + ti,hwmods = "usb_otg_ss0"; + reg = <0x48380000 0x10000>; + interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <1>; + utmi-mode = <1>; + ranges; + + usb1: usb@48390000 { + compatible = "synopsys,dwc3"; + reg = <0x48390000 0x17000>; + interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; + phys = <&usb2_phy1>; + phy-names = "usb2-phy"; + maximum-speed = "high-speed"; + dr_mode = "otg"; + status = "disabled"; + }; + }; + + dwc3_2: omap_dwc3@483c0000 { + compatible = "ti,am437x-dwc3"; + ti,hwmods = "usb_otg_ss1"; + reg = <0x483c0000 0x10000>; + interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <1>; + utmi-mode = <1>; + ranges; + + usb2: usb@483d0000 { + compatible = "synopsys,dwc3"; + reg = <0x483d0000 0x17000>; + interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; + phys = <&usb2_phy2>; + phy-names = "usb2-phy"; + maximum-speed = "high-speed"; + dr_mode = "otg"; + status = "disabled"; + }; + }; + + qspi: qspi@47900000 { + compatible = "ti,am4372-qspi"; + reg = <0x47900000 0x100>; + #address-cells = <1>; + #size-cells = <0>; + ti,hwmods = "qspi"; + interrupts = <0 138 0x4>; + num-cs = <4>; + status = "disabled"; + }; + + hdq: hdq@48347000 { + compatible = "ti,am43xx-hdq"; + reg = <0x48347000 0x1000>; + interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&func_12m_clk>; + clock-names = "fck"; + ti,hwmods = "hdq1w"; + status = "disabled"; + }; }; };
diff --combined arch/arm/boot/dts/armada-xp-matrix.dts index 25674fe,3bb8c00..7e291e2 --- a/arch/arm/boot/dts/armada-xp-matrix.dts +++ b/arch/arm/boot/dts/armada-xp-matrix.dts @@@ -37,15 -37,19 +37,15 @@@
internal-regs { serial@12000 { status = "okay"; }; serial@12100 { status = "okay"; }; serial@12200 { status = "okay"; }; serial@12300 { - clock-frequency = <250000000>; status = "okay"; };
@@@ -57,6 -61,10 +57,10 @@@ ethernet@30000 { status = "okay"; phy-mode = "sgmii"; + fixed-link { + speed = <1000>; + full-duplex; + }; };
pcie-controller { diff --combined arch/sparc/include/asm/checksum_32.h index c80cf02,04471dc..426b238 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@@ -29,7 -29,7 +29,7 @@@ * * it's best to have buff aligned on a 32-bit boundary */ -extern __wsum csum_partial(const void *buff, int len, __wsum sum); +__wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it * checksums @@@ -38,7 -38,7 +38,7 @@@ * better 64-bit) boundary */
-extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); +unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) @@@ -238,4 -238,16 +238,16 @@@ static inline __sum16 ip_compute_csum(c return csum_fold(csum_partial(buff, len, 0)); }
+ #define HAVE_ARCH_CSUM_ADD + static inline __wsum csum_add(__wsum csum, __wsum addend) + { + __asm__ __volatile__( + "addcc %0, %1, %0\n" + "addx %0, %%g0, %0" + : "=r" (csum) + : "r" (addend), "0" (csum)); + + return csum; + } + #endif /* !(__SPARC_CHECKSUM_H) */ diff --combined arch/sparc/include/asm/checksum_64.h index b3c8b9d,2ff81ae..b8779a6 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@@ -29,7 -29,7 +29,7 @@@ * * it's best to have buff aligned on a 32-bit boundary */ -extern __wsum csum_partial(const void * buff, int len, __wsum sum); +__wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it * checksums @@@ -37,12 -37,12 +37,12 @@@ * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum);
-extern long __csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum); +long __csum_partial_copy_from_user(const void __user *src, + void *dst, int len, + __wsum sum);
static inline __wsum csum_partial_copy_from_user(const void __user *src, @@@ -59,9 -59,9 +59,9 @@@ * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -extern long __csum_partial_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum); +long __csum_partial_copy_to_user(const void *src, + void __user *dst, int len, + __wsum sum);
static inline __wsum csum_and_copy_to_user(const void *src, @@@ -77,7 -77,7 +77,7 @@@ /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. */ -extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */ static inline __sum16 csum_fold(__wsum sum) @@@ -96,9 -96,9 +96,9 @@@ }
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned int len, - unsigned short proto, - __wsum sum) + unsigned int len, + unsigned short proto, + __wsum sum) { __asm__ __volatile__( " addcc %1, %0, %0\n" @@@ -116,9 -116,9 +116,9 @@@ * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + unsigned short len, + unsigned short proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } @@@ -164,4 -164,16 +164,16 @@@ static inline __sum16 ip_compute_csum(c return csum_fold(csum_partial(buff, len, 0)); }
+ #define HAVE_ARCH_CSUM_ADD + static inline __wsum csum_add(__wsum csum, __wsum addend) + { + __asm__ __volatile__( + "addcc %0, %1, %0\n" + "addx %0, %%g0, %0" + : "=r" (csum) + : "r" (addend), "0" (csum)); + + return csum; + } + #endif /* !(__SPARC64_CHECKSUM_H) */ diff --combined drivers/clk/ti/clk-43xx.c index 527a43d,b4877e0..3795fce --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@@ -105,20 -105,30 +105,36 @@@ static struct ti_dt_clk am43xx_clks[] DT_CLK(NULL, "func_12m_clk", "func_12m_clk"), DT_CLK(NULL, "vtp_clk_div", "vtp_clk_div"), DT_CLK(NULL, "usbphy_32khz_clkmux", "usbphy_32khz_clkmux"), + DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"), + DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"), + DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"), + DT_CLK("48306200.ehrpwm", "tbclk", "ehrpwm3_tbclk"), + DT_CLK("48308200.ehrpwm", "tbclk", "ehrpwm4_tbclk"), + DT_CLK("4830a200.ehrpwm", "tbclk", "ehrpwm5_tbclk"), { .node_name = NULL }, };
int __init am43xx_dt_clk_init(void) { + struct clk *clk1, *clk2; + ti_dt_clocks_register(am43xx_clks);
omap2_clk_disable_autoidle_all();
+ /* + * cpsw_cpts_rft_clk has got the choice of 3 clocksources + * dpll_core_m4_ck, dpll_core_m5_ck and dpll_disp_m2_ck. + * By default dpll_core_m4_ck is selected, witn this as clock + * source the CPTS doesnot work properly. It gives clockcheck errors + * while running PTP. + * clockcheck: clock jumped backward or running slower than expected! + * By selecting dpll_core_m5_ck as the clocksource fixes this issue. + * In AM335x dpll_core_m5_ck is the default clocksource. + */ + clk1 = clk_get_sys(NULL, "cpsw_cpts_rft_clk"); + clk2 = clk_get_sys(NULL, "dpll_core_m5_ck"); + clk_set_parent(clk1, clk2); + return 0; } diff --combined drivers/net/ethernet/mellanox/mlx4/main.c index c187d74,38e9a4c..19606a4 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@@ -104,8 -104,6 +104,6 @@@ module_param(enable_64b_cqe_eqe, bool, MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
- #define HCA_GLOBAL_CAP_MASK 0 - #define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] = @@@ -134,8 -132,7 +132,7 @@@ MODULE_PARM_DESC(log_num_vlan, "Log2 ma
static bool use_prio; module_param_named(use_prio, use_prio, bool, 0444); - MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " - "(0/1, default 0)"); + MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); @@@ -163,8 -160,7 +160,7 @@@ int mlx4_check_port_params(struct mlx4_ for (i = 0; i < dev->caps.num_ports - 1; i++) { if (port_type[i] != port_type[i + 1]) { if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { - mlx4_err(dev, "Only same port types supported " - "on this HCA, aborting.\n"); + mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); return -EINVAL; } } @@@ -172,8 -168,8 +168,8 @@@
for (i = 0; i < dev->caps.num_ports; i++) { if (!(port_type[i] & dev->caps.supported_type[i+1])) { - mlx4_err(dev, "Requested port type for port %d is not " - "supported on this HCA\n", i + 1); + mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", + i + 1); return -EINVAL; } } @@@ -195,26 -191,23 +191,23 @@@ static int mlx4_dev_cap(struct mlx4_de
err = mlx4_QUERY_DEV_CAP(dev, dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; }
if (dev_cap->min_page_sz > PAGE_SIZE) { - mlx4_err(dev, "HCA minimum page size of %d bigger than " - "kernel PAGE_SIZE of %ld, aborting.\n", + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", dev_cap->min_page_sz, PAGE_SIZE); return -ENODEV; } if (dev_cap->num_ports > MLX4_MAX_PORTS) { - mlx4_err(dev, "HCA has %d ports, but we only support %d, " - "aborting.\n", + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", dev_cap->num_ports, MLX4_MAX_PORTS); return -ENODEV; }
if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { - mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " - "PCI resource 2 size of 0x%llx, aborting.\n", + mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev_cap->uar_size, (unsigned long long) pci_resource_len(dev->pdev, 2)); return -ENODEV; @@@ -296,7 -289,6 +289,6 @@@
dev->caps.log_num_macs = log_num_mac; dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; - dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) { dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; @@@ -347,14 -339,12 +339,12 @@@
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { dev->caps.log_num_macs = dev_cap->log_max_macs[i]; - mlx4_warn(dev, "Requested number of MACs is too much " - "for port %d, reducing to %d.\n", + mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_macs); } if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; - mlx4_warn(dev, "Requested number of VLANs is too much " - "for port %d, reducing to %d.\n", + mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", i, 1 << dev->caps.log_num_vlans); } } @@@ -366,7 -356,6 +356,6 @@@ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = (1 << dev->caps.log_num_macs) * (1 << dev->caps.log_num_vlans) * - (1 << dev->caps.log_num_prios) * dev->caps.num_ports; dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
@@@ -584,13 -573,14 +573,14 @@@ static int mlx4_slave_cap(struct mlx4_d memset(&hca_param, 0, sizeof(hca_param)); err = mlx4_QUERY_HCA(dev, &hca_param); if (err) { - mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); + mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); return err; }
- /*fail if the hca has an unknown capability */ - if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != - HCA_GLOBAL_CAP_MASK) { + /* fail if the hca has an unknown global capability + * at this time global_caps should be always zeroed + */ + if (hca_param.global_caps) { mlx4_err(dev, "Unknown hca global capabilities\n"); return -ENOSYS; } @@@ -603,19 -593,18 +593,18 @@@ dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; err = mlx4_dev_cap(dev, &dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); return err; }
err = mlx4_QUERY_FW(dev); if (err) - mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n"); + mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
page_size = ~dev->caps.page_size_cap + 1; mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); if (page_size > PAGE_SIZE) { - mlx4_err(dev, "HCA minimum page size of %d bigger than " - "kernel PAGE_SIZE of %ld, aborting.\n", + mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", page_size, PAGE_SIZE); return -ENODEV; } @@@ -633,8 -622,8 +622,8 @@@ memset(&func_cap, 0, sizeof(func_cap)); err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); if (err) { - mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n", - err); + mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", + err); return err; }
@@@ -661,8 -650,8 +650,8 @@@ dev->caps.num_amgms = 0;
if (dev->caps.num_ports > MLX4_MAX_PORTS) { - mlx4_err(dev, "HCA has %d ports, but we only support %d, " - "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); + mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", + dev->caps.num_ports, MLX4_MAX_PORTS); return -ENODEV; }
@@@ -680,8 -669,8 +669,8 @@@ for (i = 1; i <= dev->caps.num_ports; ++i) { err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); if (err) { - mlx4_err(dev, "QUERY_FUNC_CAP port command failed for" - " port %d, aborting (%d).\n", i, err); + mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", + i, err); goto err_mem; } dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; @@@ -699,8 -688,7 +688,7 @@@ if (dev->caps.uar_page_size * (dev->caps.num_uars - dev->caps.reserved_uars) > pci_resource_len(dev->pdev, 2)) { - mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " - "PCI resource 2 size of 0x%llx, aborting.\n", + mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", dev->caps.uar_page_size * dev->caps.num_uars, (unsigned long long) pci_resource_len(dev->pdev, 2)); goto err_mem; @@@ -722,7 -710,7 +710,7 @@@ }
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; - mlx4_warn(dev, "Timestamping is not supported in slave mode.\n"); + mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
@@@ -784,8 -772,8 +772,8 @@@ int mlx4_change_port_types(struct mlx4_ dev->caps.port_type[port] = port_types[port - 1]; err = mlx4_SET_PORT(dev, port, -1); if (err) { - mlx4_err(dev, "Failed to set port %d, " - "aborting\n", port); + mlx4_err(dev, "Failed to set port %d, aborting\n", + port); goto out; } } @@@ -868,9 -856,7 +856,7 @@@ static ssize_t set_port_type(struct dev } } if (err) { - mlx4_err(mdev, "Auto sensing is not supported on this HCA. " - "Set only 'eth' or 'ib' for both ports " - "(should be the same)\n"); + mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); goto out; }
@@@ -975,8 -961,8 +961,8 @@@ static ssize_t set_port_ib_mtu(struct d mlx4_CLOSE_PORT(mdev, port); err = mlx4_SET_PORT(mdev, port, -1); if (err) { - mlx4_err(mdev, "Failed to set port %d, " - "aborting\n", port); + mlx4_err(mdev, "Failed to set port %d, aborting\n", + port); goto err_set_port; } } @@@ -995,19 -981,19 +981,19 @@@ static int mlx4_load_fw(struct mlx4_de priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.fw_icm) { - mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); + mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); return -ENOMEM; }
err = mlx4_MAP_FA(dev, priv->fw.fw_icm); if (err) { - mlx4_err(dev, "MAP_FA command failed, aborting.\n"); + mlx4_err(dev, "MAP_FA command failed, aborting\n"); goto err_free; }
err = mlx4_RUN_FW(dev); if (err) { - mlx4_err(dev, "RUN_FW command failed, aborting.\n"); + mlx4_err(dev, "RUN_FW command failed, aborting\n"); goto err_unmap_fa; }
@@@ -1091,30 -1077,30 +1077,30 @@@ static int mlx4_init_icm(struct mlx4_de
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); if (err) { - mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); + mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); return err; }
- mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", + mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", (unsigned long long) icm_size >> 10, (unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, GFP_HIGHUSER | __GFP_NOWARN, 0); if (!priv->fw.aux_icm) { - mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); + mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); return -ENOMEM; }
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); if (err) { - mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); + mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); goto err_free_aux; }
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); if (err) { - mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); goto err_unmap_aux; }
@@@ -1125,7 -1111,7 +1111,7 @@@ init_hca->eqc_base, dev_cap->eqc_entry_sz, num_eqs, num_eqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); goto err_unmap_cmpt; }
@@@ -1146,7 -1132,7 +1132,7 @@@ dev->caps.num_mtts, dev->caps.reserved_mtts, 1, 0); if (err) { - mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); goto err_unmap_eq; }
@@@ -1156,7 -1142,7 +1142,7 @@@ dev->caps.num_mpts, dev->caps.reserved_mrws, 1, 1); if (err) { - mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); + mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); goto err_unmap_mtt; }
@@@ -1167,7 -1153,7 +1153,7 @@@ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); + mlx4_err(dev, "Failed to map QP context memory, aborting\n"); goto err_unmap_dmpt; }
@@@ -1178,7 -1164,7 +1164,7 @@@ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); + mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); goto err_unmap_qp; }
@@@ -1189,7 -1175,7 +1175,7 @@@ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 0, 0); if (err) { - mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); + mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); goto err_unmap_auxc; }
@@@ -1210,7 -1196,7 +1196,7 @@@ dev->caps.num_cqs, dev->caps.reserved_cqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); goto err_unmap_rdmarc; }
@@@ -1220,7 -1206,7 +1206,7 @@@ dev->caps.num_srqs, dev->caps.reserved_srqs, 0, 0); if (err) { - mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); + mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); goto err_unmap_cq; }
@@@ -1238,7 -1224,7 +1224,7 @@@ dev->caps.num_mgms + dev->caps.num_amgms, 0, 0); if (err) { - mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); + mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); goto err_unmap_srq; }
@@@ -1315,7 -1301,7 +1301,7 @@@ static void mlx4_slave_exit(struct mlx4
mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) - mlx4_warn(dev, "Failed to close slave function.\n"); + mlx4_warn(dev, "Failed to close slave function\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); }
@@@ -1413,7 -1399,7 +1399,7 @@@ static int mlx4_init_slave(struct mlx4_ u32 cmd_channel_ver;
if (atomic_read(&pf_loading)) { - mlx4_warn(dev, "PF is not ready. Deferring probe\n"); + mlx4_warn(dev, "PF is not ready - Deferring probe\n"); return -EPROBE_DEFER; }
@@@ -1426,8 -1412,7 +1412,7 @@@ * NUM_OF_RESET_RETRIES times before leaving.*/ if (ret_from_reset) { if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { - mlx4_warn(dev, "slave is currently in the " - "middle of FLR. Deferring probe.\n"); + mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); mutex_unlock(&priv->cmd.slave_cmd_mutex); return -EPROBE_DEFER; } else @@@ -1441,8 -1426,7 +1426,7 @@@
if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != MLX4_COMM_GET_IF_REV(slave_read)) { - mlx4_err(dev, "slave driver version is not supported" - " by the master\n"); + mlx4_err(dev, "slave driver version is not supported by the master\n"); goto err; }
@@@ -1520,8 -1504,7 +1504,7 @@@ static void choose_steering_mode(struc
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) - mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " - "set to use B0 steering. Falling back to A0 steering mode.\n"); + mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); } dev->oper_log_mgm_entry_size = mlx4_log_num_mgm_entry_size > 0 ? @@@ -1529,8 -1512,7 +1512,7 @@@ MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); } - mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " - "modparam log_num_mgm_entry_size = %d\n", + mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", mlx4_steering_mode_str(dev->caps.steering_mode), dev->oper_log_mgm_entry_size, mlx4_log_num_mgm_entry_size); @@@ -1564,15 -1546,15 +1546,15 @@@ static int mlx4_init_hca(struct mlx4_de err = mlx4_QUERY_FW(dev); if (err) { if (err == -EACCES) - mlx4_info(dev, "non-primary physical function, skipping.\n"); + mlx4_info(dev, "non-primary physical function, skipping\n"); else - mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); + mlx4_err(dev, "QUERY_FW command failed, aborting\n"); return err; }
err = mlx4_load_fw(dev); if (err) { - mlx4_err(dev, "Failed to start FW, aborting.\n"); + mlx4_err(dev, "Failed to start FW, aborting\n"); return err; }
@@@ -1584,7 -1566,7 +1566,7 @@@
err = mlx4_dev_cap(dev, &dev_cap); if (err) { - mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); goto err_stop_fw; }
@@@ -1625,7 -1607,7 +1607,7 @@@
err = mlx4_INIT_HCA(dev, &init_hca); if (err) { - mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); + mlx4_err(dev, "INIT_HCA command failed, aborting\n"); goto err_free_icm; } /* @@@ -1636,7 -1618,7 +1618,7 @@@ memset(&init_hca, 0, sizeof(init_hca)); err = mlx4_QUERY_HCA(dev, &init_hca); if (err) { - mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n"); + mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; } else { dev->caps.hca_core_clock = @@@ -1649,14 -1631,14 +1631,14 @@@ if (!dev->caps.hca_core_clock) { dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; mlx4_err(dev, - "HCA frequency is 0. Timestamping is not supported."); + "HCA frequency is 0 - timestamping is not supported\n"); } else if (map_internal_clock(dev)) { /* * Map internal clock, * in case of failure disable timestamping */ dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; - mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n"); + mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); } } } else { @@@ -1683,7 -1665,7 +1665,7 @@@
err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { - mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); + mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); goto unmap_bf; }
@@@ -1793,79 -1775,69 +1775,69 @@@ static int mlx4_setup_hca(struct mlx4_d
err = mlx4_init_uar_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "user access region table, aborting.\n"); - return err; + mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); + return err; }
err = mlx4_uar_alloc(dev, &priv->driver_uar); if (err) { - mlx4_err(dev, "Failed to allocate driver access region, " - "aborting.\n"); + mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); goto err_uar_table_free; }
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!priv->kar) { - mlx4_err(dev, "Couldn't map kernel access region, " - "aborting.\n"); + mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); err = -ENOMEM; goto err_uar_free; }
err = mlx4_init_pd_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "protection domain table, aborting.\n"); + mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); goto err_kar_unmap; }
err = mlx4_init_xrcd_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "reliable connection domain table, aborting.\n"); + mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); goto err_pd_table_free; }
err = mlx4_init_mr_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "memory region table, aborting.\n"); + mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); goto err_xrcd_table_free; }
if (!mlx4_is_slave(dev)) { err = mlx4_init_mcg_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n"); + mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); goto err_mr_table_free; } }
err = mlx4_init_eq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "event queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); goto err_mcg_table_free; }
err = mlx4_cmd_use_events(dev); if (err) { - mlx4_err(dev, "Failed to switch to event-driven " - "firmware commands, aborting.\n"); + mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); goto err_eq_table_free; }
err = mlx4_NOP(dev); if (err) { if (dev->flags & MLX4_FLAG_MSI_X) { - mlx4_warn(dev, "NOP command failed to generate MSI-X " - "interrupt IRQ %d).\n", + mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", priv->eq_table.eq[dev->caps.num_comp_vectors].irq); - mlx4_warn(dev, "Trying again without MSI-X.\n"); + mlx4_warn(dev, "Trying again without MSI-X\n"); } else { - mlx4_err(dev, "NOP command failed to generate interrupt " - "(IRQ %d), aborting.\n", + mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", priv->eq_table.eq[dev->caps.num_comp_vectors].irq); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); } @@@ -1877,28 -1849,25 +1849,25 @@@
err = mlx4_init_cq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "completion queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); goto err_cmd_poll; }
err = mlx4_init_srq_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "shared receive queue table, aborting.\n"); + mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); goto err_cq_table_free; }
err = mlx4_init_qp_table(dev); if (err) { - mlx4_err(dev, "Failed to initialize " - "queue pair table, aborting.\n"); + mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); goto err_srq_table_free; }
err = mlx4_init_counters_table(dev); if (err && err != -ENOENT) { - mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); + mlx4_err(dev, "Failed to initialize counters table, aborting\n"); goto err_qp_table_free; }
@@@ -1908,9 -1877,8 +1877,8 @@@ err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); if (err) - mlx4_warn(dev, "failed to get port %d default " - "ib capabilities (%d). Continuing " - "with caps = 0\n", port, err); + mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", + port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
/* initialize per-slave default ib port capabilities */ @@@ -1920,7 -1888,7 +1888,7 @@@ if (i == mlx4_master_func_num(dev)) continue; priv->mfunc.master.slave_state[i].ib_cap_mask[port] = - ib_port_default_caps; + ib_port_default_caps; } }
@@@ -1933,7 -1901,7 +1901,7 @@@ dev->caps.pkey_table_len[port] : -1); if (err) { mlx4_err(dev, "Failed to set port %d, aborting\n", - port); + port); goto err_counters_table_free; } } @@@ -2009,7 -1977,7 +1977,7 @@@ static void mlx4_enable_msi_x(struct ml kfree(entries); goto no_msi; } else if (nreq < MSIX_LEGACY_SZ + - dev->caps.num_ports * MIN_MSIX_P_PORT) { + dev->caps.num_ports * MIN_MSIX_P_PORT) { /*Working in legacy mode , all EQ's shared*/ dev->caps.comp_pool = 0; dev->caps.num_comp_vectors = nreq - 1; @@@ -2044,7 -2012,6 +2012,7 @@@ static int mlx4_init_port_info(struct m if (!mlx4_is_slave(dev)) { mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_vlan_table(dev, &info->vlan_table); + mlx4_init_roce_gid_table(dev, &info->gid_table); info->base_qpn = mlx4_get_base_qpn(dev, port); }
@@@ -2210,8 -2177,7 +2178,7 @@@ static int __mlx4_init_one(struct pci_d
err = pci_enable_device(pdev); if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device, " - "aborting.\n"); + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; }
@@@ -2258,14 -2224,13 +2225,13 @@@ */ if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing DCS, aborting." - "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", + dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", pci_dev_data, pci_resource_flags(pdev, 0)); err = -ENODEV; goto err_disable_pdev; } if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { - dev_err(&pdev->dev, "Missing UAR, aborting.\n"); + dev_err(&pdev->dev, "Missing UAR, aborting\n"); err = -ENODEV; goto err_disable_pdev; } @@@ -2280,21 -2245,19 +2246,19 @@@
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); + dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); goto err_release_regions; } } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " - "consistent PCI DMA mask.\n"); + dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " - "aborting.\n"); + dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); goto err_release_regions; } } @@@ -2325,7 -2288,7 +2289,7 @@@ if (total_vfs) { unsigned vfs_offset = 0; for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && - vfs_offset + nvfs[i] < extended_func_num(pdev); + vfs_offset + nvfs[i] < extended_func_num(pdev); vfs_offset += nvfs[i], i++) ; if (i == sizeof(nvfs)/sizeof(nvfs[0])) { @@@ -2351,8 -2314,7 +2315,7 @@@ if (err < 0) goto err_free_dev; else { - mlx4_warn(dev, "Multiple PFs not yet supported." - " Skipping PF.\n"); + mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); err = -EINVAL; goto err_free_dev; } @@@ -2362,8 -2324,8 +2325,8 @@@ mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); dev->dev_vfs = kzalloc( - total_vfs * sizeof(*dev->dev_vfs), - GFP_KERNEL); + total_vfs * sizeof(*dev->dev_vfs), + GFP_KERNEL); if (NULL == dev->dev_vfs) { mlx4_err(dev, "Failed to allocate memory for VFs\n"); err = 0; @@@ -2371,14 -2333,14 +2334,14 @@@ atomic_inc(&pf_loading); err = pci_enable_sriov(pdev, total_vfs); if (err) { - mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", err); atomic_dec(&pf_loading); err = 0; } else { mlx4_warn(dev, "Running in master mode\n"); dev->flags |= MLX4_FLAG_SRIOV | - MLX4_FLAG_MASTER; + MLX4_FLAG_MASTER; dev->num_vfs = total_vfs; sriov_initialized = 1; } @@@ -2395,7 -2357,7 +2358,7 @@@ */ err = mlx4_reset(dev); if (err) { - mlx4_err(dev, "Failed to reset HCA, aborting.\n"); + mlx4_err(dev, "Failed to reset HCA, aborting\n"); goto err_rel_own; } } @@@ -2403,7 -2365,7 +2366,7 @@@ slave_start: err = mlx4_cmd_init(dev); if (err) { - mlx4_err(dev, "Failed to init command interface, aborting.\n"); + mlx4_err(dev, "Failed to init command interface, aborting\n"); goto err_sriov; }
@@@ -2417,8 -2379,7 +2380,7 @@@ dev->num_slaves = 0; err = mlx4_multi_func_init(dev); if (err) { - mlx4_err(dev, "Failed to init slave mfunc" - " interface, aborting.\n"); + mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); goto err_cmd; } } @@@ -2450,8 -2411,7 +2412,7 @@@ unsigned sum = 0; err = mlx4_multi_func_init(dev); if (err) { - mlx4_err(dev, "Failed to init master mfunc" - "interface, aborting.\n"); + mlx4_err(dev, "Failed to init master mfunc interface, aborting\n"); goto err_close; } if (sriov_initialized) { @@@ -2462,10 -2422,7 +2423,7 @@@ if (ib_ports && (num_vfs_argc > 1 || probe_vfs_argc > 1)) { mlx4_err(dev, - "Invalid syntax of num_vfs/probe_vfs " - "with IB port. Single port VFs syntax" - " is only supported when all ports " - "are configured as ethernet\n"); + "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); goto err_close; } for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { @@@ -2491,8 -2448,7 +2449,7 @@@ if ((mlx4_is_mfunc(dev)) && !(dev->flags & MLX4_FLAG_MSI_X)) { err = -ENOSYS; - mlx4_err(dev, "INTx is not supported in multi-function mode." - " aborting.\n"); + mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); goto err_free_eq; }
@@@ -2637,7 -2593,7 +2594,7 @@@ static void __mlx4_remove_one(struct pc /* in SRIOV it is not allowed to unload the pf's * driver while there are alive vf's */ if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev)) - printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); + pr_warn("Removing PF when there are assigned VF's !!!\n"); mlx4_stop_sense(dev); mlx4_unregister_device(dev);
@@@ -2808,33 -2764,36 +2765,36 @@@ static struct pci_driver mlx4_driver = static int __init mlx4_verify_params(void) { if ((log_num_mac < 0) || (log_num_mac > 7)) { - pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); + pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); return -1; }
if (log_num_vlan != 0) - pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", - MLX4_LOG_NUM_VLANS); + pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", + MLX4_LOG_NUM_VLANS); + + if (use_prio != 0) + pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { - pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); + pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", + log_mtts_per_seg); return -1; }
/* Check if module param for ports type has legal combination */ if (port_type_array[0] == false && port_type_array[1] == true) { - printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); + pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); port_type_array[0] = true; }
if (mlx4_log_num_mgm_entry_size != -1 && (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { - pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " - "in legal range (-1 or %d..%d)\n", - mlx4_log_num_mgm_entry_size, - MLX4_MIN_MGM_LOG_ENTRY_SIZE, - MLX4_MAX_MGM_LOG_ENTRY_SIZE); + pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n", + mlx4_log_num_mgm_entry_size, + MLX4_MIN_MGM_LOG_ENTRY_SIZE, + MLX4_MAX_MGM_LOG_ENTRY_SIZE); return -1; }
diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4.h index 8e9eb02,9dd1b30..4b416ed --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@@ -216,18 -216,19 +216,19 @@@ extern int mlx4_debug_level #define mlx4_debug_level (0) #endif /* CONFIG_MLX4_DEBUG */
- #define mlx4_dbg(mdev, format, arg...) \ + #define mlx4_dbg(mdev, format, ...) \ do { \ if (mlx4_debug_level) \ - dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ + dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ + ##__VA_ARGS__); \ } while (0)
- #define mlx4_err(mdev, format, arg...) \ - dev_err(&mdev->pdev->dev, format, ##arg) - #define mlx4_info(mdev, format, arg...) \ - dev_info(&mdev->pdev->dev, format, ##arg) - #define mlx4_warn(mdev, format, arg...) \ - dev_warn(&mdev->pdev->dev, format, ##arg) + #define mlx4_err(mdev, format, ...) \ + dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + #define mlx4_info(mdev, format, ...) \ + dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + #define mlx4_warn(mdev, format, ...) \ + dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size; extern int log_mtts_per_seg; @@@ -695,17 -696,6 +696,17 @@@ struct mlx4_mac_table int max; };
+#define MLX4_ROCE_GID_ENTRY_SIZE 16 + +struct mlx4_roce_gid_entry { + u8 raw[MLX4_ROCE_GID_ENTRY_SIZE]; +}; + +struct mlx4_roce_gid_table { + struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS]; + struct mutex mutex; +}; + #define MLX4_MAX_VLAN_NUM 128 #define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
@@@ -769,7 -759,6 +770,7 @@@ struct mlx4_port_info struct device_attribute port_mtu_attr; struct mlx4_mac_table mac_table; struct mlx4_vlan_table vlan_table; + struct mlx4_roce_gid_table gid_table; int base_qpn; };
@@@ -800,6 -789,10 +801,6 @@@ enum MLX4_USE_RR = 1, };
-struct mlx4_roce_gid_entry { - u8 raw[16]; -}; - struct mlx4_priv { struct mlx4_dev dev;
@@@ -847,6 -840,7 +848,6 @@@ int fs_hash_mode; u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; __be64 slave_node_guids[MLX4_MFUNC_MAX]; - struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
atomic_t opreq_count; struct work_struct opreq_task; @@@ -1147,8 -1141,6 +1148,8 @@@ int mlx4_change_port_types(struct mlx4_
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); +void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table); void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
@@@ -1158,7 -1150,6 +1159,7 @@@ int mlx4_get_slave_from_resource_id(str enum mlx4_resource resource_type, u64 resource_id, int *slave); void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave); int mlx4_init_resource_tracker(struct mlx4_dev *dev);
void mlx4_free_resource_tracker(struct mlx4_dev *dev, diff --combined drivers/net/ethernet/mellanox/mlx4/port.c index 5ec6f20,376f2f1..7ab9717 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@@ -75,16 -75,6 +75,16 @@@ void mlx4_init_vlan_table(struct mlx4_d table->total = 0; }
+void mlx4_init_roce_gid_table(struct mlx4_dev *dev, + struct mlx4_roce_gid_table *table) +{ + int i; + + mutex_init(&table->mutex); + for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) + memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE); +} + static int validate_index(struct mlx4_dev *dev, struct mlx4_mac_table *table, int index) { @@@ -254,8 -244,8 +254,8 @@@ void __mlx4_unregister_mac(struct mlx4_ if (validate_index(dev, table, index)) goto out; if (--table->refs[index]) { - mlx4_dbg(dev, "Have more references for index %d," - "no need to modify mac table\n", index); + mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n", + index); goto out; }
@@@ -453,9 -443,8 +453,8 @@@ void __mlx4_unregister_vlan(struct mlx4 }
if (--table->refs[index]) { - mlx4_dbg(dev, "Have %d more references for index %d," - "no need to modify vlan table\n", table->refs[index], - index); + mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n", + table->refs[index], index); goto out; } table->entries[index] = 0; @@@ -594,84 -583,6 +593,84 @@@ int mlx4_get_base_gid_ix(struct mlx4_de } EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
+static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave, + int port, struct mlx4_cmd_mailbox *mailbox) +{ + struct mlx4_roce_gid_entry *gid_entry_mbox; + struct mlx4_priv *priv = mlx4_priv(dev); + int num_gids, base, offset; + int i, err; + + num_gids = mlx4_get_slave_num_gids(dev, slave, port); + base = mlx4_get_base_gid_ix(dev, slave, port); + + memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE); + + mutex_lock(&(priv->port[port].gid_table.mutex)); + /* Zero-out gids belonging to that slave in the port GID table */ + for (i = 0, offset = base; i < num_gids; offset++, i++) + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE); + + /* Now, copy roce port gids table to mailbox for passing to FW */ + gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf; + for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, mailbox->dma, + ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8), 1, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; +} + + +void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) +{ + struct mlx4_active_ports actv_ports; + struct mlx4_cmd_mailbox *mailbox; + int num_eth_ports, err; + int i; + + if (slave < 0 || slave > dev->num_vfs) + return; + + actv_ports = mlx4_get_active_ports(dev, slave); + + for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + num_eth_ports++; + } + } + + if (!num_eth_ports) + return; + + /* have ETH ports. Alloc mailbox for SET_PORT command */ + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return; + + for (i = 0; i < dev->caps.num_ports; i++) { + if (test_bit(i, actv_ports.ports)) { + if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH) + continue; + err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox); + if (err) + mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n", + slave, i + 1, err); + } + } + + mlx4_free_cmd_mailbox(dev, mailbox); + return; +} + static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { @@@ -780,12 -691,10 +779,12 @@@ /* 2. Check that do not have duplicates in OTHER * entries in the port GID table */ + + mutex_lock(&(priv->port[port].gid_table.mutex)); for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { if (i >= base && i < base + num_gids) continue; /* don't compare to slave's current gids */ - gid_entry_tbl = &priv->roce_gids[port - 1][i]; + gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i]; if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry))) continue; gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); @@@ -796,10 -705,8 +795,9 @@@ if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw, sizeof(gid_entry_tbl->raw))) { /* found duplicate */ - mlx4_warn(dev, "requested gid entry for slave:%d " - "is a duplicate of gid at index %d\n", + mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n", slave, i); + mutex_unlock(&(priv->port[port].gid_table.mutex)); return -EINVAL; } } @@@ -808,24 -715,16 +806,24 @@@ /* insert slave GIDs with memcpy, starting at slave's base index */ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++) - memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16); + memcpy(priv->port[port].gid_table.roce_gids[offset].raw, + gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
/* Now, copy roce port gids table to current mailbox for passing to FW */ gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf); for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++) - memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16); - - break; + memcpy(gid_entry_mbox->raw, + priv->port[port].gid_table.roce_gids[i].raw, + MLX4_ROCE_GID_ENTRY_SIZE); + + err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, + MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_NATIVE); + mutex_unlock(&(priv->port[port].gid_table.mutex)); + return err; } - return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, + + return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } @@@ -1198,8 -1097,7 +1196,8 @@@ int mlx4_get_slave_from_roce_gid(struc num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { - if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) { + if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, + MLX4_ROCE_GID_ENTRY_SIZE)) { found_ix = i; break; } @@@ -1287,8 -1185,7 +1285,8 @@@ int mlx4_get_roce_gid_from_slave(struc if (!mlx4_is_master(dev)) return -EINVAL;
- memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16); + memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw, + MLX4_ROCE_GID_ENTRY_SIZE); return 0; } EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave); diff --combined drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index f16e539,dd821b3..b6cddef --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@@ -586,7 -586,6 +586,7 @@@ void mlx4_free_resource_tracker(struct } /* free master's vlans */ i = dev->caps.function; + mlx4_reset_roce_gids(dev, i); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); rem_slave_vlans(dev, i); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); @@@ -963,7 -962,7 +963,7 @@@ static struct res_common *alloc_tr(u64 ret = alloc_srq_tr(id); break; case RES_MAC: - printk(KERN_ERR "implementation missing\n"); + pr_err("implementation missing\n"); return NULL; case RES_COUNTER: ret = alloc_counter_tr(id); @@@ -1057,10 -1056,10 +1057,10 @@@ static int remove_mtt_ok(struct res_mt { if (res->com.state == RES_MTT_BUSY || atomic_read(&res->ref_count)) { - printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n", - __func__, __LINE__, - mtt_states_str(res->com.state), - atomic_read(&res->ref_count)); + pr_devel("%s-%d: state %s, ref_count %d\n", + __func__, __LINE__, + mtt_states_str(res->com.state), + atomic_read(&res->ref_count)); return -EBUSY; } else if (res->com.state != RES_MTT_ALLOCATED) return -EPERM; @@@ -3881,7 -3880,7 +3881,7 @@@ static int add_eth_header(struct mlx4_d } } if (!be_mac) { - pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", + pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", port); return -EINVAL; } @@@ -3978,7 -3977,7 +3978,7 @@@ int mlx4_QP_FLOW_STEERING_ATTACH_wrappe qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { - pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); + pr_err("Steering rule with qpn 0x%x rejected\n", qpn); return err; } rule_header = (struct _rule_hw *)(ctrl + 1); @@@ -3996,7 -3995,7 +3996,7 @@@ case MLX4_NET_TRANS_RULE_ID_IPV4: case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: - pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); + pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); if (add_eth_header(dev, slave, inbox, rlist, header_id)) { err = -EINVAL; goto err_put; @@@ -4005,7 -4004,7 +4005,7 @@@ sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; break; default: - pr_err("Corrupted mailbox.\n"); + pr_err("Corrupted mailbox\n"); err = -EINVAL; goto err_put; } @@@ -4019,7 -4018,7 +4019,7 @@@
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { - mlx4_err(dev, "Fail to add flow steering resources.\n "); + mlx4_err(dev, "Fail to add flow steering resources\n"); /* detach rule*/ mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, @@@ -4057,7 -4056,7 +4057,7 @@@ int mlx4_QP_FLOW_STEERING_DETACH_wrappe
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { - mlx4_err(dev, "Fail to remove flow steering resources.\n "); + mlx4_err(dev, "Fail to remove flow steering resources\n"); goto out; }
@@@ -4186,8 -4185,8 +4186,8 @@@ static void rem_slave_qps(struct mlx4_d
err = move_all_busy(dev, slave, RES_QP); if (err) - mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" - "for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { @@@ -4225,10 -4224,8 +4225,8 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_qps: failed" - " to move slave %d qpn %d to" - " reset\n", slave, - qp->local_qpn); + mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", + slave, qp->local_qpn); atomic_dec(&qp->rcq->ref_count); atomic_dec(&qp->scq->ref_count); atomic_dec(&qp->mtt->ref_count); @@@ -4262,8 -4259,8 +4260,8 @@@ static void rem_slave_srqs(struct mlx4_
err = move_all_busy(dev, slave, RES_SRQ); if (err) - mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(srq, tmp, srq_list, com.list) { @@@ -4293,9 -4290,7 +4291,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_srqs: failed" - " to move slave %d srq %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", slave, srqn);
atomic_dec(&srq->mtt->ref_count); @@@ -4330,8 -4325,8 +4326,8 @@@ static void rem_slave_cqs(struct mlx4_d
err = move_all_busy(dev, slave, RES_CQ); if (err) - mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(cq, tmp, cq_list, com.list) { @@@ -4361,9 -4356,7 +4357,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_cqs: failed" - " to move slave %d cq %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", slave, cqn); atomic_dec(&cq->mtt->ref_count); state = RES_CQ_ALLOCATED; @@@ -4395,8 -4388,8 +4389,8 @@@ static void rem_slave_mrs(struct mlx4_d
err = move_all_busy(dev, slave, RES_MPT); if (err) - mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { @@@ -4431,9 -4424,7 +4425,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_mrs: failed" - " to move slave %d mpt %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", slave, mptn); if (mpt->mtt) atomic_dec(&mpt->mtt->ref_count); @@@ -4465,8 -4456,8 +4457,8 @@@ static void rem_slave_mtts(struct mlx4_
err = move_all_busy(dev, slave, RES_MTT); if (err) - mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { @@@ -4568,8 -4559,8 +4560,8 @@@ static void rem_slave_eqs(struct mlx4_d
err = move_all_busy(dev, slave, RES_EQ); if (err) - mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(eq, tmp, eq_list, com.list) { @@@ -4601,9 -4592,8 +4593,8 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_eqs: failed" - " to move slave %d eqs %d to" - " SW ownership\n", slave, eqn); + mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", + slave, eqn); mlx4_free_cmd_mailbox(dev, mailbox); atomic_dec(&eq->mtt->ref_count); state = RES_EQ_RESERVED; @@@ -4632,8 -4622,8 +4623,8 @@@ static void rem_slave_counters(struct m
err = move_all_busy(dev, slave, RES_COUNTER); if (err) - mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(counter, tmp, counter_list, com.list) { @@@ -4663,8 -4653,8 +4654,8 @@@ static void rem_slave_xrcdns(struct mlx
err = move_all_busy(dev, slave, RES_XRCD); if (err) - mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { @@@ -4682,7 -4672,7 +4673,7 @@@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); - + mlx4_reset_roce_gids(dev, slave); mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); rem_slave_vlans(dev, slave); rem_slave_macs(dev, slave); @@@ -4809,10 -4799,8 +4800,8 @@@ void mlx4_vf_immed_vlan_work_handler(st 0, MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) { - mlx4_info(dev, "UPDATE_QP failed for slave %d, " - "port %d, qpn %d (%d)\n", - work->slave, port, qp->local_qpn, - err); + mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, err); errors++; } } diff --combined drivers/net/team/team.c index ce4989b,9a9ce8d..b4958c7 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -968,7 -968,7 +968,7 @@@ static void team_port_disable(struct te static void __team_compute_features(struct team *team) { struct team_port *port; - u32 vlan_features = TEAM_VLAN_FEATURES; + u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; unsigned short max_hard_header_len = ETH_HLEN; unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
@@@ -1724,7 -1724,6 +1724,7 @@@ static int team_change_mtu(struct net_d * to traverse list in reverse under rcu_read_lock */ mutex_lock(&team->lock); + team->port_mtu_change_allowed = true; list_for_each_entry(port, &team->port_list, list) { err = dev_set_mtu(port->dev, new_mtu); if (err) { @@@ -1733,7 -1732,6 +1733,7 @@@ goto unwind; } } + team->port_mtu_change_allowed = false; mutex_unlock(&team->lock);
dev->mtu = new_mtu; @@@ -1743,7 -1741,6 +1743,7 @@@ unwind: list_for_each_entry_continue_reverse(port, &team->port_list, list) dev_set_mtu(port->dev, dev->mtu); + team->port_mtu_change_allowed = false; mutex_unlock(&team->lock);
return err; @@@ -2854,9 -2851,7 +2854,9 @@@ static int team_device_event(struct not break; case NETDEV_PRECHANGEMTU: /* Forbid to change mtu of underlaying device */ - return NOTIFY_BAD; + if (!port->team->port_mtu_change_allowed) + return NOTIFY_BAD; + break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid to change type of underlaying device */ return NOTIFY_BAD; diff --combined drivers/net/usb/ipheth.c index 973275f,f725707..76465b1 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@@ -59,8 -59,6 +59,8 @@@ #define USB_PRODUCT_IPHONE_3GS 0x1294 #define USB_PRODUCT_IPHONE_4 0x1297 #define USB_PRODUCT_IPAD 0x129a +#define USB_PRODUCT_IPAD_2 0x12a2 +#define USB_PRODUCT_IPAD_3 0x12a6 #define USB_PRODUCT_IPAD_MINI 0x12ab #define USB_PRODUCT_IPHONE_4_VZW 0x129c #define USB_PRODUCT_IPHONE_4S 0x12a0 @@@ -107,14 -105,6 +107,14 @@@ static struct usb_device_id ipheth_tabl { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPAD, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, + { USB_DEVICE_AND_INTERFACE_INFO( + USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, IPHETH_USBINTF_PROTO) }, { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI, @@@ -534,7 -524,7 +534,7 @@@ static int ipheth_probe(struct usb_inte usb_set_intfdata(intf, dev);
SET_NETDEV_DEV(netdev, &intf->dev); - SET_ETHTOOL_OPS(netdev, &ops); + netdev->ethtool_ops = &ops;
retval = register_netdev(netdev); if (retval) { diff --combined drivers/s390/net/qeth_core_main.c index e89f38c,549e9fd..1a884c9 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -20,9 -20,9 +20,10 @@@ #include <linux/kthread.h> #include <linux/slab.h> #include <net/iucv/af_iucv.h> + #include <net/dsfield.h>
#include <asm/ebcdic.h> +#include <asm/chpid.h> #include <asm/io.h> #include <asm/sysinfo.h> #include <asm/compat.h> @@@ -1013,7 -1013,7 +1014,7 @@@ static long __qeth_check_irb_error(stru
card = CARD_FROM_CDEV(cdev);
- if (!IS_ERR(irb)) + if (!card || !IS_ERR(irb)) return 0;
switch (PTR_ERR(irb)) { @@@ -1029,7 -1029,7 +1030,7 @@@ QETH_CARD_TEXT(card, 2, "ckirberr"); QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); if (intparm == QETH_RCD_PARM) { - if (card && (card->data.ccwdev == cdev)) { + if (card->data.ccwdev == cdev) { card->data.state = CH_STATE_DOWN; wake_up(&card->wait_q); } @@@ -1345,7 -1345,16 +1346,7 @@@ static void qeth_set_multiple_write_que static void qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; - struct channelPath_dsc { - u8 flags; - u8 lsn; - u8 desc; - u8 chpid; - u8 swla; - u8 zeroes; - u8 chla; - u8 chpp; - } *chp_dsc; + struct channel_path_desc *chp_dsc;
QETH_DBF_TEXT(SETUP, 2, "chp_desc");
@@@ -3662,42 -3671,56 +3663,56 @@@ void qeth_qdio_output_handler(struct cc } EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
+ /** + * Note: Function assumes that we have 4 outbound queues. + */ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { - if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX)) - return card->qdio.default_out_queue; - switch (card->qdio.no_out_queues) { - case 4: - if (cast_type && card->info.is_multicast_different) - return card->info.is_multicast_different & - (card->qdio.no_out_queues - 1); - if (card->qdio.do_prio_queueing && (ipv == 4)) { - const u8 tos = ip_hdr(skb)->tos; - - if (card->qdio.do_prio_queueing == - QETH_PRIO_Q_ING_TOS) { - if (tos & IP_TOS_NOTIMPORTANT) - return 3; - if (tos & IP_TOS_HIGHRELIABILITY) - return 2; - if (tos & IP_TOS_HIGHTHROUGHPUT) - return 1; - if (tos & IP_TOS_LOWDELAY) - return 0; - } - if (card->qdio.do_prio_queueing == - QETH_PRIO_Q_ING_PREC) - return 3 - (tos >> 6); - } else if (card->qdio.do_prio_queueing && (ipv == 6)) { - /* TODO: IPv6!!! */ + __be16 *tci; + u8 tos; + + if (cast_type && card->info.is_multicast_different) + return card->info.is_multicast_different & + (card->qdio.no_out_queues - 1); + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_TOS: + case QETH_PRIO_Q_ING_PREC: + switch (ipv) { + case 4: + tos = ipv4_get_dsfield(ip_hdr(skb)); + break; + case 6: + tos = ipv6_get_dsfield(ipv6_hdr(skb)); + break; + default: + return card->qdio.default_out_queue; } - return card->qdio.default_out_queue; - case 1: /* fallthrough for single-out-queue 1920-device */ + if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) + return ~tos >> 6 & 3; + if (tos & IPTOS_MINCOST) + return 3; + if (tos & IPTOS_RELIABILITY) + return 2; + if (tos & IPTOS_THROUGHPUT) + return 1; + if (tos & IPTOS_LOWDELAY) + return 0; + break; + case QETH_PRIO_Q_ING_SKB: + if (skb->priority > 5) + return 0; + return ~skb->priority >> 1 & 3; + case QETH_PRIO_Q_ING_VLAN: + tci = &((struct ethhdr *)skb->data)->h_proto; + if (*tci == ETH_P_8021Q) + return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3; + break; default: - return card->qdio.default_out_queue; + break; } + return card->qdio.default_out_queue; } EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
@@@ -5816,7 -5839,7 +5831,7 @@@ static int __init qeth_core_init(void if (rc) goto out_err; qeth_core_root_dev = root_device_register("qeth"); - rc = PTR_RET(qeth_core_root_dev); + rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) goto register_err; qeth_core_header_cache = kmem_cache_create("qeth_hdr", diff --combined include/linux/netlink.h index 034cda7,7a28115..9e572da --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@@ -16,10 -16,9 +16,10 @@@ static inline struct nlmsghdr *nlmsg_hd }
enum netlink_skb_flags { - NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ - NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ - NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ + NETLINK_SKB_MMAPED = 0x1, /* Packet data is mmaped */ + NETLINK_SKB_TX = 0x2, /* Packet was sent by userspace */ + NETLINK_SKB_DELIVERED = 0x4, /* Packet was delivered */ + NETLINK_SKB_DST = 0x8, /* Dst set in sendto or sendmsg */ };
struct netlink_skb_parms { @@@ -46,7 -45,8 +46,8 @@@ struct netlink_kernel_cfg unsigned int flags; void (*input)(struct sk_buff *skb); struct mutex *cb_mutex; - void (*bind)(int group); + int (*bind)(int group); + void (*unbind)(int group); bool (*compare)(struct net *net, struct sock *sk); };
diff --combined include/uapi/linux/audit.h index 4c31a36,b21ea45..cf67147 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@@ -357,7 -357,7 +357,7 @@@ enum #define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\ __AUDIT_ARCH_CONVENTION_MIPS64_N32) #define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) -#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\ +#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE|\ __AUDIT_ARCH_CONVENTION_MIPS64_N32) #define AUDIT_ARCH_OPENRISC (EM_OPENRISC) #define AUDIT_ARCH_PARISC (EM_PARISC) @@@ -385,6 -385,14 +385,14 @@@ */ #define AUDIT_MESSAGE_TEXT_MAX 8560
+ /* Multicast Netlink socket groups (default up to 32) */ + enum audit_nlgrps { + AUDIT_NLGRP_NONE, /* Group 0 not used */ + AUDIT_NLGRP_READLOG, /* "best effort" read only socket */ + __AUDIT_NLGRP_MAX + }; + #define AUDIT_NLGRP_MAX (__AUDIT_NLGRP_MAX - 1) + struct audit_status { __u32 mask; /* Bit mask for valid entries */ __u32 enabled; /* 1 = enabled, 0 = disabled */ diff --combined kernel/sysctl.c index 3c202d0,e36ae4b..5bcaf5c --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@@ -152,6 -152,10 +152,6 @@@ static unsigned long hung_task_timeout_ #ifdef CONFIG_SPARC #endif
-#ifdef CONFIG_SPARC64 -extern int sysctl_tsb_ratio; -#endif - #ifdef __hppa__ extern int pwrsw_enabled; #endif @@@ -2497,11 -2501,11 +2497,11 @@@ int proc_do_large_bitmap(struct ctl_tab bool first = 1; size_t left = *lenp; unsigned long bitmap_len = table->maxlen; - unsigned long *bitmap = (unsigned long *) table->data; + unsigned long *bitmap = *(unsigned long **) table->data; unsigned long *tmp_bitmap = NULL; char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c;
- if (!bitmap_len || !left || (*ppos && !write)) { + if (!bitmap || !bitmap_len || !left || (*ppos && !write)) { *lenp = 0; return 0; } diff --combined lib/Kconfig.debug index e548aa0,d1b7bdf..6da2c25 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@@ -180,7 -180,7 +180,7 @@@ config STRIP_ASM_SYM
config READABLE_ASM bool "Generate readable assembler code" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !LTO help Disable some compiler optimizations that tend to generate human unreadable assembler output. This may make the kernel slightly slower, but it helps @@@ -1620,6 -1620,19 +1620,19 @@@ config TEST_USER_COP
If unsure, say N.
+ config TEST_BPF + tristate "Test BPF filter functionality" + default n + depends on m && NET + help + This builds the "test_bpf" module that runs various test vectors + against the BPF interpreter or BPF JIT compiler depending on the + current setting. This is in particular useful for BPF JIT compiler + development, but also to run regression tests against changes in + the interpreter code. + + If unsure, say N. + source "samples/Kconfig"
source "lib/Kconfig.kgdb" diff --combined net/bridge/br_fdb.c index 474d36f,648d0e8..b74b4f5 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@@ -85,8 -85,58 +85,58 @@@ static void fdb_rcu_free(struct rcu_hea kmem_cache_free(br_fdb_cache, ent); }
+ /* When a static FDB entry is added, the mac address from the entry is + * added to the bridge private HW address list and all required ports + * are then updated with the new information. + * Called under RTNL. + */ + static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr) + { + int err; + struct net_bridge_port *p, *tmp; + + ASSERT_RTNL(); + + list_for_each_entry(p, &br->port_list, list) { + if (!br_promisc_port(p)) { + err = dev_uc_add(p->dev, addr); + if (err) + goto undo; + } + } + + return; + undo: + list_for_each_entry(tmp, &br->port_list, list) { + if (tmp == p) + break; + if (!br_promisc_port(tmp)) + dev_uc_del(tmp->dev, addr); + } + } + + /* When a static FDB entry is deleted, the HW address from that entry is + * also removed from the bridge private HW address list and updates all + * the ports with needed information. + * Called under RTNL. + */ + static void fdb_del_hw(struct net_bridge *br, const unsigned char *addr) + { + struct net_bridge_port *p; + + ASSERT_RTNL(); + + list_for_each_entry(p, &br->port_list, list) { + if (!br_promisc_port(p)) + dev_uc_del(p->dev, addr); + } + } + static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) { + if (f->is_static) + fdb_del_hw(br, f->addr.addr); + hlist_del_rcu(&f->hlist); fdb_notify(br, f, RTM_DELNEIGH); call_rcu(&f->rcu, fdb_rcu_free); @@@ -466,6 -516,7 +516,7 @@@ static int fdb_insert(struct net_bridg return -ENOMEM;
fdb->is_local = fdb->is_static = 1; + fdb_add_hw(br, addr); fdb_notify(br, fdb, RTM_NEWNEIGH); return 0; } @@@ -487,7 -538,6 +538,7 @@@ void br_fdb_update(struct net_bridge *b { struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; struct net_bridge_fdb_entry *fdb; + bool fdb_modified = false;
/* some users want to always flood. */ if (hold_time(br) == 0) @@@ -508,15 -558,10 +559,15 @@@ source->dev->name); } else { /* fastpath: update of existing entry */ - fdb->dst = source; + if (unlikely(source != fdb->dst)) { + fdb->dst = source; + fdb_modified = true; + } fdb->updated = jiffies; if (unlikely(added_by_user)) fdb->added_by_user = 1; + if (unlikely(fdb_modified)) + fdb_notify(br, fdb, RTM_NEWNEIGH); } } else { spin_lock(&br->hash_lock); @@@ -684,13 -729,25 +735,25 @@@ static int fdb_add_entry(struct net_bri }
if (fdb_to_nud(fdb) != state) { - if (state & NUD_PERMANENT) - fdb->is_local = fdb->is_static = 1; - else if (state & NUD_NOARP) { + if (state & NUD_PERMANENT) { + fdb->is_local = 1; + if (!fdb->is_static) { + fdb->is_static = 1; + fdb_add_hw(br, addr); + } + } else if (state & NUD_NOARP) { + fdb->is_local = 0; + if (!fdb->is_static) { + fdb->is_static = 1; + fdb_add_hw(br, addr); + } + } else { fdb->is_local = 0; - fdb->is_static = 1; - } else - fdb->is_local = fdb->is_static = 0; + if (fdb->is_static) { + fdb->is_static = 0; + fdb_del_hw(br, addr); + } + }
modified = true; } @@@ -880,3 -937,59 +943,59 @@@ int br_fdb_delete(struct ndmsg *ndm, st out: return err; } + + int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p) + { + struct net_bridge_fdb_entry *fdb, *tmp; + int i; + int err; + + ASSERT_RTNL(); + + for (i = 0; i < BR_HASH_SIZE; i++) { + hlist_for_each_entry(fdb, &br->hash[i], hlist) { + /* We only care for static entries */ + if (!fdb->is_static) + continue; + + err = dev_uc_add(p->dev, fdb->addr.addr); + if (err) + goto rollback; + } + } + return 0; + + rollback: + for (i = 0; i < BR_HASH_SIZE; i++) { + hlist_for_each_entry(tmp, &br->hash[i], hlist) { + /* If we reached the fdb that failed, we can stop */ + if (tmp == fdb) + break; + + /* We only care for static entries */ + if (!tmp->is_static) + continue; + + dev_uc_del(p->dev, tmp->addr.addr); + } + } + return err; + } + + void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p) + { + struct net_bridge_fdb_entry *fdb; + int i; + + ASSERT_RTNL(); + + for (i = 0; i < BR_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(fdb, &br->hash[i], hlist) { + /* We only care for static entries */ + if (!fdb->is_static) + continue; + + dev_uc_del(p->dev, fdb->addr.addr); + } + } + } diff --combined net/bridge/br_private.h index 59d3a85,53d6e32..bc17210 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@@ -174,6 -174,8 +174,8 @@@ struct net_bridge_por #define BR_ADMIN_COST 0x00000010 #define BR_LEARNING 0x00000020 #define BR_FLOOD 0x00000040 + #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) + #define BR_PROMISC 0x00000080
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING struct bridge_mcast_query ip4_query; @@@ -198,6 -200,9 +200,9 @@@ #endif };
+ #define br_auto_port(p) ((p)->flags & BR_AUTO_MASK) + #define br_promisc_port(p) ((p)->flags & BR_PROMISC) + #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev) @@@ -290,6 -295,7 +295,7 @@@ struct net_bridg struct timer_list topology_change_timer; struct timer_list gc_timer; struct kobject *ifobj; + u32 auto_cnt; #ifdef CONFIG_BRIDGE_VLAN_FILTERING u8 vlan_enabled; struct net_port_vlans __rcu *vlan_info; @@@ -327,8 -333,6 +333,6 @@@ struct br_input_skb_cb #define br_debug(br, format, args...) \ pr_debug("%s: " format, (br)->dev->name, ##args)
- extern struct notifier_block br_device_notifier; - /* called under bridge lock */ static inline int br_is_root_bridge(const struct net_bridge *br) { @@@ -395,6 -399,8 +399,8 @@@ int br_fdb_add(struct ndmsg *nlh, struc const unsigned char *addr, u16 nlh_flags); int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, int idx); + int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p); + void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
/* br_forward.c */ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb); @@@ -415,6 -421,8 +421,8 @@@ int br_del_if(struct net_bridge *br, st int br_min_mtu(const struct net_bridge *br); netdev_features_t br_features_recompute(struct net_bridge *br, netdev_features_t features); + void br_port_flags_change(struct net_bridge_port *port, unsigned long mask); + void br_manage_promisc(struct net_bridge *br);
/* br_input.c */ int br_handle_frame_finish(struct sk_buff *skb); @@@ -581,7 -589,6 +589,7 @@@ bool br_allowed_ingress(struct net_brid struct sk_buff *skb, u16 *vid); bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v, const struct sk_buff *skb); +bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid); struct sk_buff *br_handle_vlan(struct net_bridge *br, const struct net_port_vlans *v, struct sk_buff *skb); @@@ -633,6 -640,10 +641,10 @@@ static inline u16 br_get_pvid(const str return v->pvid ?: VLAN_N_VID; }
+ static inline int br_vlan_enabled(struct net_bridge *br) + { + return br->vlan_enabled; + } #else static inline bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, @@@ -649,12 -660,6 +661,12 @@@ static inline bool br_allowed_egress(st return true; }
+static inline bool br_should_learn(struct net_bridge_port *p, + struct sk_buff *skb, u16 *vid) +{ + return true; +} + static inline struct sk_buff *br_handle_vlan(struct net_bridge *br, const struct net_port_vlans *v, struct sk_buff *skb) @@@ -719,6 -724,11 +731,11 @@@ static inline u16 br_get_pvid(const str { return VLAN_N_VID; /* Returns invalid vid */ } + + static inline int br_vlan_enabled(struct net_bridge *br) + { + return 0; + } #endif
/* br_netfilter.c */ diff --combined net/bridge/br_vlan.c index 5fee2fe,24c5cc5..fcc9539 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@@ -241,34 -241,6 +241,34 @@@ bool br_allowed_egress(struct net_bridg return false; }
+/* Called under RCU */ +bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid) +{ + struct net_bridge *br = p->br; + struct net_port_vlans *v; + + if (!br->vlan_enabled) + return true; + + v = rcu_dereference(p->vlan_info); + if (!v) + return false; + + br_vlan_get_tag(skb, vid); + if (!*vid) { + *vid = br_get_pvid(v); + if (*vid == VLAN_N_VID) + return false; + + return true; + } + + if (test_bit(*vid, v->vlan_bitmap)) + return true; + + return false; +} + /* Must be protected by RTNL. * Must be called with vid in range from 1 to 4094 inclusive. */ @@@ -360,6 -332,7 +360,7 @@@ int br_vlan_filter_toggle(struct net_br goto unlock;
br->vlan_enabled = val; + br_manage_promisc(br);
unlock: rtnl_unlock(); diff --combined net/core/dev.c index fb8b054,0355ca5..dfdace9 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -1661,6 -1661,29 +1661,29 @@@ bool is_skb_forwardable(struct net_devi } EXPORT_SYMBOL_GPL(is_skb_forwardable);
+ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) + { + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + } + + if (unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_scrub_packet(skb, true); + skb->protocol = eth_type_trans(skb, dev); + + return 0; + } + EXPORT_SYMBOL_GPL(__dev_forward_skb); + /** * dev_forward_skb - loopback an skb to another netif * @@@ -1681,24 -1704,7 +1704,7 @@@ */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { - if (skb_copy_ubufs(skb, GFP_ATOMIC)) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - } - - if (unlikely(!is_skb_forwardable(dev, skb))) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - - skb_scrub_packet(skb, true); - skb->protocol = eth_type_trans(skb, dev); - - return netif_rx_internal(skb); + return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb);
@@@ -2283,8 -2289,8 +2289,8 @@@ EXPORT_SYMBOL(skb_checksum_help)
__be16 skb_network_protocol(struct sk_buff *skb, int *depth) { + unsigned int vlan_depth = skb->mac_len; __be16 type = skb->protocol; - int vlan_depth = skb->mac_len;
/* Tunnel gso handlers can set protocol to ethernet. */ if (type == htons(ETH_P_TEB)) { @@@ -2297,30 -2303,15 +2303,30 @@@ type = eth->h_proto; }
- while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { - struct vlan_hdr *vh; - - if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) - return 0; - - vh = (struct vlan_hdr *)(skb->data + vlan_depth); - type = vh->h_vlan_encapsulated_proto; - vlan_depth += VLAN_HLEN; + /* if skb->protocol is 802.1Q/AD then the header should already be + * present at mac_len - VLAN_HLEN (if mac_len > 0), or at + * ETH_HLEN otherwise + */ + if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) { + if (vlan_depth) { + if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN))) + return 0; + vlan_depth -= VLAN_HLEN; + } else { + vlan_depth = ETH_HLEN; + } + do { + struct vlan_hdr *vh; + + if (unlikely(!pskb_may_pull(skb, + vlan_depth + VLAN_HLEN))) + return 0; + + vh = (struct vlan_hdr *)(skb->data + vlan_depth); + type = vh->h_vlan_encapsulated_proto; + vlan_depth += VLAN_HLEN; + } while (type == htons(ETH_P_8021Q) || + type == htons(ETH_P_8021AD)); }
*depth = vlan_depth; @@@ -5689,10 -5680,6 +5695,6 @@@ static void rollback_registered_many(st */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
- if (!dev->rtnl_link_ops || - dev->rtnl_link_state == RTNL_LINK_INITIALIZED) - rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); - /* * Flush the unicast and multicast chains */ @@@ -5702,6 -5689,10 +5704,10 @@@ if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev);
+ if (!dev->rtnl_link_ops || + dev->rtnl_link_state == RTNL_LINK_INITIALIZED) + rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); + /* Notifier chain MUST detach us all upper devices. */ WARN_ON(netdev_has_any_upper_dev(dev));
diff --combined net/netfilter/ipvs/ip_vs_core.c index 3d2d2c8,d9da8c4..e683675 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@@ -97,7 -97,7 +97,7 @@@ const char *ip_vs_proto_name(unsigned i return "ICMPv6"; #endif default: - sprintf(buf, "IP_%d", proto); + sprintf(buf, "IP_%u", proto); return buf; } } @@@ -1392,19 -1392,15 +1392,19 @@@ ip_vs_in_icmp(struct sk_buff *skb, int
if (ipip) { __be32 info = ic->un.gateway; + __u8 type = ic->type; + __u8 code = ic->code;
/* Update the MTU */ if (ic->type == ICMP_DEST_UNREACH && ic->code == ICMP_FRAG_NEEDED) { struct ip_vs_dest *dest = cp->dest; u32 mtu = ntohs(ic->un.frag.mtu); + __be16 frag_off = cih->frag_off;
/* Strip outer IP and ICMP, go to IPIP header */ - __skb_pull(skb, ihl + sizeof(_icmph)); + if (pskb_pull(skb, ihl + sizeof(_icmph)) == NULL) + goto ignore_ipip; offset2 -= ihl + sizeof(_icmph); skb_reset_network_header(skb); IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", @@@ -1412,7 -1408,7 +1412,7 @@@ ipv4_update_pmtu(skb, dev_net(skb->dev), mtu, 0, 0, 0, 0); /* Client uses PMTUD? */ - if (!(cih->frag_off & htons(IP_DF))) + if (!(frag_off & htons(IP_DF))) goto ignore_ipip; /* Prefer the resulting PMTU */ if (dest) { @@@ -1431,13 -1427,12 +1431,13 @@@ /* Strip outer IP, ICMP and IPIP, go to IP header of * original request. */ - __skb_pull(skb, offset2); + if (pskb_pull(skb, offset2) == NULL) + goto ignore_ipip; skb_reset_network_header(skb); IP_VS_DBG(12, "Sending ICMP for %pI4->%pI4: t=%u, c=%u, i=%u\n", &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, - ic->type, ic->code, ntohl(info)); - icmp_send(skb, ic->type, ic->code, info); + type, code, ntohl(info)); + icmp_send(skb, type, code, info); /* ICMP can be shorter but anyways, account it */ ip_vs_out_stats(cp, skb);
diff --combined net/netlink/af_netlink.c index f22757a,e0ccd84..15c731f --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -1206,7 -1206,8 +1206,8 @@@ static int netlink_create(struct net *n struct module *module = NULL; struct mutex *cb_mutex; struct netlink_sock *nlk; - void (*bind)(int group); + int (*bind)(int group); + void (*unbind)(int group); int err = 0;
sock->state = SS_UNCONNECTED; @@@ -1232,6 -1233,7 +1233,7 @@@ err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; bind = nl_table[protocol].bind; + unbind = nl_table[protocol].unbind; netlink_unlock_table();
if (err < 0) @@@ -1248,6 -1250,7 +1250,7 @@@ nlk = nlk_sk(sock->sk); nlk->module = module; nlk->netlink_bind = bind; + nlk->netlink_unbind = unbind; out: return err;
@@@ -1301,6 -1304,7 +1304,7 @@@ static int netlink_release(struct socke kfree_rcu(old, rcu); nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].bind = NULL; + nl_table[sk->sk_protocol].unbind = NULL; nl_table[sk->sk_protocol].flags = 0; nl_table[sk->sk_protocol].registered = 0; } @@@ -1373,9 -1377,7 +1377,9 @@@ retry bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *user_ns, int cap) { - return sk_ns_capable(nsp->sk, user_ns, cap); + return ((nsp->flags & NETLINK_SKB_DST) || + file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) && + ns_capable(user_ns, cap); } EXPORT_SYMBOL(__netlink_ns_capable);
@@@ -1478,6 -1480,19 +1482,19 @@@ static int netlink_realloc_groups(struc return err; }
+ static void netlink_unbind(int group, long unsigned int groups, + struct netlink_sock *nlk) + { + int undo; + + if (!nlk->netlink_unbind) + return; + + for (undo = 0; undo < group; undo++) + if (test_bit(group, &groups)) + nlk->netlink_unbind(undo); + } + static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { @@@ -1486,6 -1501,7 +1503,7 @@@ struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err; + long unsigned int groups = nladdr->nl_groups;
if (addr_len < sizeof(struct sockaddr_nl)) return -EINVAL; @@@ -1494,7 -1510,7 +1512,7 @@@ return -EINVAL;
/* Only superuser is allowed to listen multicasts */ - if (nladdr->nl_groups) { + if (groups) { if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV)) return -EPERM; err = netlink_realloc_groups(sk); @@@ -1502,37 -1518,45 +1520,45 @@@ return err; }
- if (nlk->portid) { + if (nlk->portid) if (nladdr->nl_pid != nlk->portid) return -EINVAL; - } else { + + if (nlk->netlink_bind && groups) { + int group; + + for (group = 0; group < nlk->ngroups; group++) { + if (!test_bit(group, &groups)) + continue; + err = nlk->netlink_bind(group); + if (!err) + continue; + netlink_unbind(group, groups, nlk); + return err; + } + } + + if (!nlk->portid) { err = nladdr->nl_pid ? netlink_insert(sk, net, nladdr->nl_pid) : netlink_autobind(sock); - if (err) + if (err) { + netlink_unbind(nlk->ngroups - 1, groups, nlk); return err; + } }
- if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) + if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) return 0;
netlink_table_grab(); netlink_update_subscriptions(sk, nlk->subscriptions + - hweight32(nladdr->nl_groups) - + hweight32(groups) - hweight32(nlk->groups[0])); - nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; + nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups; netlink_update_listeners(sk); netlink_table_ungrab();
- if (nlk->netlink_bind && nlk->groups[0]) { - int i; - - for (i = 0; i < nlk->ngroups; i++) { - if (test_bit(i, nlk->groups)) - nlk->netlink_bind(i); - } - } - return 0; }
@@@ -2170,13 -2194,17 +2196,17 @@@ static int netlink_setsockopt(struct so return err; if (!val || val - 1 >= nlk->ngroups) return -EINVAL; + if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) { + err = nlk->netlink_bind(val); + if (err) + return err; + } netlink_table_grab(); netlink_update_socket_mc(nlk, val, optname == NETLINK_ADD_MEMBERSHIP); netlink_table_ungrab(); - - if (nlk->netlink_bind) - nlk->netlink_bind(val); + if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind) + nlk->netlink_unbind(val);
err = 0; break; @@@ -2295,7 -2323,6 +2325,7 @@@ static int netlink_sendmsg(struct kioc struct sk_buff *skb; int err; struct scm_cookie scm; + u32 netlink_skb_flags = 0;
if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; @@@ -2317,7 -2344,6 +2347,7 @@@ if ((dst_group || dst_portid) && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) goto out; + netlink_skb_flags |= NETLINK_SKB_DST; } else { dst_portid = nlk->dst_portid; dst_group = nlk->dst_group; @@@ -2347,7 -2373,6 +2377,7 @@@ NETLINK_CB(skb).portid = nlk->portid; NETLINK_CB(skb).dst_group = dst_group; NETLINK_CB(skb).creds = siocb->scm->creds; + NETLINK_CB(skb).flags = netlink_skb_flags;
err = -EFAULT; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {