The following commit has been merged in the master branch: commit e8bfb01963b34a5a22adca32fafaf472978564b1 Merge: 47469b30fb2ed397261f434d791afd99a2e9ec43 938049e18dca57bcd2f93986fc1cbb5a83cdf027 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Mon Aug 24 13:20:19 2015 +1000
next-20150821/net-next
diff --combined MAINTAINERS index 2614670,7b528b8..861f9d0 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -158,6 -158,7 +158,7 @@@ L: linux-wpan@vger.kernel.or S: Maintained F: net/6lowpan/ F: include/net/6lowpan.h + F: Documentation/networking/6lowpan.txt
6PACK NETWORK DRIVER FOR AX.25 M: Andreas Koensgen ajk@comnets.uni-bremen.de @@@ -728,12 -729,6 +729,12 @@@ X: drivers/iio/*/adjd F: drivers/staging/iio/*/ad* F: staging/iio/trigger/iio-trig-bfin-timer.c
+ANALOG DEVICES INC DMA DRIVERS +M: Lars-Peter Clausen lars@metafoo.de +W: http://ez.analog.com/community/linux-device-drivers +S: Supported +F: drivers/dma/dma-axi-dmac.c + ANDROID DRIVERS M: Greg Kroah-Hartman gregkh@linuxfoundation.org M: Arve Hjønnevåg arve@android.com @@@ -805,13 -800,11 +806,13 @@@ F: arch/arm/include/asm/floppy. ARM PMU PROFILING AND DEBUGGING M: Will Deacon will.deacon@arm.com S: Maintained -F: arch/arm/kernel/perf_event* +F: arch/arm/kernel/perf_* F: arch/arm/oprofile/common.c -F: arch/arm/include/asm/pmu.h F: arch/arm/kernel/hw_breakpoint.c F: arch/arm/include/asm/hw_breakpoint.h +F: arch/arm/include/asm/perf_event.h +F: drivers/perf/arm_pmu.c +F: include/linux/perf/arm_pmu.h
ARM PORT M: Russell King linux@arm.linux.org.uk @@@ -1473,7 -1466,9 +1474,7 @@@ F: arch/arm/boot/dts/emev2 F: arch/arm/boot/dts/r7s* F: arch/arm/boot/dts/r8a* F: arch/arm/boot/dts/sh* -F: arch/arm/configs/armadillo800eva_defconfig F: arch/arm/configs/bockw_defconfig -F: arch/arm/configs/kzm9g_defconfig F: arch/arm/configs/marzen_defconfig F: arch/arm/configs/shmobile_defconfig F: arch/arm/include/debug/renesas-scif.S @@@ -1512,7 -1507,6 +1513,7 @@@ F: arch/arm/boot/dts/sti F: drivers/clocksource/arm_global_timer.c F: drivers/i2c/busses/i2c-st.c F: drivers/media/rc/st_rc.c +F: drivers/media/platform/sti/c8sectpfe/ F: drivers/mmc/host/sdhci-st.c F: drivers/phy/phy-miphy28lp.c F: drivers/phy/phy-miphy365x.c @@@ -1586,10 -1580,7 +1587,10 @@@ ARM/UNIPHIER ARCHITECTUR M: Masahiro Yamada yamada.masahiro@socionext.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +F: arch/arm/boot/dts/uniphier* F: arch/arm/mach-uniphier/ +F: drivers/pinctrl/uniphier/ +F: drivers/tty/serial/8250/8250_uniphier.c N: uniphier
ARM/Ux500 ARM ARCHITECTURE @@@ -1684,7 -1675,7 +1685,7 @@@ M: Michal Simek <michal.simek@xilinx.co R: Sören Brinkmann soren.brinkmann@xilinx.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://wiki.xilinx.com -T: git git://git.xilinx.com/linux-xlnx.git +T: git https://github.com/Xilinx/linux-xlnx.git S: Supported F: arch/arm/mach-zynq/ F: drivers/cpuidle/cpuidle-zynq.c @@@ -2228,9 -2219,7 +2229,9 @@@ F: drivers/clocksource/bcm_kona_timer. BROADCOM BCM2835 ARM ARCHITECTURE M: Stephen Warren swarren@wwwdotorg.org M: Lee Jones lee@kernel.org +M: Eric Anholt eric@anholt.net L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git S: Maintained N: bcm2835 @@@ -3599,15 -3588,6 +3600,15 @@@ S: Maintaine F: drivers/gpu/drm/rockchip/ F: Documentation/devicetree/bindings/video/rockchip*
+DRM DRIVERS FOR STI +M: Benjamin Gaignard benjamin.gaignard@linaro.org +M: Vincent Abriou vincent.abriou@st.com +L: dri-devel@lists.freedesktop.org +T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git +S: Maintained +F: drivers/gpu/drm/sti +F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt + DSBR100 USB FM RADIO DRIVER M: Alexey Klimov klimov.linux@gmail.com L: linux-media@vger.kernel.org @@@ -4080,6 -4060,15 +4081,6 @@@ F: Documentation/filesystems/ext2.tx F: fs/ext2/ F: include/linux/ext2*
-EXT3 FILE SYSTEM -M: Jan Kara jack@suse.com -M: Andrew Morton akpm@linux-foundation.org -M: Andreas Dilger adilger.kernel@dilger.ca -L: linux-ext4@vger.kernel.org -S: Maintained -F: Documentation/filesystems/ext3.txt -F: fs/ext3/ - EXT4 FILE SYSTEM M: "Theodore Ts'o" tytso@mit.edu M: Andreas Dilger adilger.kernel@dilger.ca @@@ -4418,7 -4407,6 +4419,7 @@@ F: include/linux/fscache*. F2FS FILE SYSTEM M: Jaegeuk Kim jaegeuk@kernel.org M: Changman Lee cm224.lee@samsung.com +R: Chao Yu chao2.yu@samsung.com L: linux-f2fs-devel@lists.sourceforge.net W: http://en.wikipedia.org/wiki/F2FS T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git @@@ -4427,7 -4415,6 +4428,7 @@@ F: Documentation/filesystems/f2fs.tx F: Documentation/ABI/testing/sysfs-fs-f2fs F: fs/f2fs/ F: include/linux/f2fs_fs.h +F: include/trace/events/f2fs.h
FUJITSU FR-V (FRV) PORT M: David Howells dhowells@redhat.com @@@ -5769,20 -5756,21 +5770,20 @@@ S: Maintaine F: fs/jffs2/ F: include/uapi/linux/jffs2.h
-JOURNALLING LAYER FOR BLOCK DEVICES (JBD) -M: Andrew Morton akpm@linux-foundation.org -M: Jan Kara jack@suse.com -L: linux-ext4@vger.kernel.org -S: Maintained -F: fs/jbd/ -F: include/linux/jbd.h - JOURNALLING LAYER FOR BLOCK DEVICES (JBD2) M: "Theodore Ts'o" tytso@mit.edu +M: Jan Kara jack@suse.com L: linux-ext4@vger.kernel.org S: Maintained F: fs/jbd2/ F: include/linux/jbd2.h
+JPU V4L2 MEM2MEM DRIVER FOR RENESAS +M: Mikhail Ulyanov mikhail.ulyanov@cogentembedded.com +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/platform/rcar_jpu.c + JSM Neo PCI based serial card M: Thadeu Lima de Souza Cascardo cascardo@linux.vnet.ibm.com L: linux-serial@vger.kernel.org @@@ -5853,7 -5841,6 +5854,7 @@@ S: Odd Fixe
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS M: "J. Bruce Fields" bfields@fieldses.org +M: Jeff Layton jlayton@poochiereds.net L: linux-nfs@vger.kernel.org W: http://nfs.sourceforge.net/ S: Supported @@@ -5910,6 -5897,7 +5911,6 @@@ F: arch/powerpc/kvm KERNEL VIRTUAL MACHINE for s390 (KVM/s390) M: Christian Borntraeger borntraeger@de.ibm.com M: Cornelia Huck cornelia.huck@de.ibm.com -M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported @@@ -6523,7 -6511,7 +6524,7 @@@ F: drivers/net/ethernet/marvell/mvneta.
MARVELL MWIFIEX WIRELESS DRIVER M: Amitkumar Karwar akarwar@marvell.com - M: Avinash Patil patila@marvell.com + M: Nishant Sarmukadam nishants@marvell.com L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mwifiex/ @@@ -6552,13 -6540,6 +6553,13 @@@ S: Maintaine F: Documentation/hwmon/max16065 F: drivers/hwmon/max16065.c
+MAX20751 HARDWARE MONITOR DRIVER +M: Guenter Roeck linux@roeck-us.net +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/max20751 +F: drivers/hwmon/max20751.c + MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER M: "Hans J. Koch" hjk@hansjkoch.de L: lm-sensors@lm-sensors.org @@@ -6619,51 -6600,6 +6620,51 @@@ S: Supporte F: Documentation/devicetree/bindings/media/renesas,vsp1.txt F: drivers/media/platform/vsp1/
+MEDIA DRIVERS FOR ASCOT2E +M: Sergey Kozlov serjk@netup.ru +L: linux-media@vger.kernel.org +W: http://linuxtv.org +W: http://netup.tv/ +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/dvb-frontends/ascot2e* + +MEDIA DRIVERS FOR CXD2841ER +M: Sergey Kozlov serjk@netup.ru +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://netup.tv/ +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/dvb-frontends/cxd2841er* + +MEDIA DRIVERS FOR HORUS3A +M: Sergey Kozlov serjk@netup.ru +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://netup.tv/ +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/dvb-frontends/horus3a* + +MEDIA DRIVERS FOR LNBH25 +M: Sergey Kozlov serjk@netup.ru +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://netup.tv/ +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/dvb-frontends/lnbh25* + +MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices +M: Sergey Kozlov serjk@netup.ru +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://netup.tv/ +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/pci/netup_unidvb/* + MEDIA INPUT INFRASTRUCTURE (V4L/DVB) M: Mauro Carvalho Chehab mchehab@osg.samsung.com P: LinuxTV.org Project @@@ -6713,6 -6649,15 +6714,15 @@@ W: http://www.mellanox.co Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx4/en_*
+ MELLANOX ETHERNET SWITCH DRIVERS + M: Jiri Pirko jiri@mellanox.com + M: Ido Schimmel idosch@mellanox.com + L: netdev@vger.kernel.org + S: Supported + W: http://www.mellanox.com + Q: http://patchwork.ozlabs.org/project/netdev/list/ + F: drivers/net/ethernet/mellanox/mlxsw/ + MEMORY MANAGEMENT L: linux-mm@kvack.org W: http://www.linux-mm.org @@@ -8140,15 -8085,6 +8150,15 @@@ S: Maintaine F: include/linux/power_supply.h F: drivers/power/
+POWER STATE COORDINATION INTERFACE (PSCI) +M: Mark Rutland mark.rutland@arm.com +M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: drivers/firmware/psci.c +F: include/linux/psci.h +F: include/uapi/linux/psci.h + PNP SUPPORT M: "Rafael J. Wysocki" rafael.j.wysocki@intel.com S: Maintained @@@ -8638,7 -8574,6 +8648,7 @@@ M: Philipp Zabel <p.zabel@pengutronix.d S: Maintained F: drivers/reset/ F: Documentation/devicetree/bindings/reset/ +F: include/dt-bindings/reset/ F: include/linux/reset.h F: include/linux/reset-controller.h
@@@ -8773,6 -8708,7 +8783,6 @@@ F: drivers/video/fbdev/savage S390 M: Martin Schwidefsky schwidefsky@de.ibm.com M: Heiko Carstens heiko.carstens@de.ibm.com -M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported @@@ -8800,6 -8736,7 +8810,6 @@@ F: block/partitions/ibm.
S390 NETWORK DRIVERS M: Ursula Braun ursula.braun@de.ibm.com -M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported @@@ -8816,6 -8753,7 +8826,6 @@@ F: drivers/pci/hotplug/s390_pci_hpc.
S390 ZCRYPT DRIVER M: Ingo Tuchscherer ingo.tuchscherer@de.ibm.com -M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported @@@ -8823,6 -8761,7 +8833,6 @@@ F: drivers/s390/crypto
S390 ZFCP DRIVER M: Steffen Maier maier@linux.vnet.ibm.com -M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported @@@ -8830,6 -8769,7 +8840,6 @@@ F: drivers/s390/scsi/zfcp_
S390 IUCV NETWORK LAYER M: Ursula Braun ursula.braun@de.ibm.com -M: linux390@de.ibm.com L: linux-s390@vger.kernel.org W: http://www.ibm.com/developerworks/linux/linux390/ S: Supported @@@ -8982,6 -8922,13 +8992,13 @@@ F: include/linux/dma/dw. F: include/linux/platform_data/dma-dw.h F: drivers/dma/dw/
+ SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver + M: Lars Persson lars.persson@axis.com + L: netdev@vger.kernel.org + S: Supported + F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt + F: drivers/net/ethernet/synopsys/dwc_eth_qos.c + SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER M: Seungwon Jeon tgih.jun@samsung.com M: Jaehoon Chung jh80.chung@samsung.com @@@ -9946,7 -9893,6 +9963,7 @@@ S: Supporte F: arch/arc/ F: Documentation/devicetree/bindings/arc/ F: drivers/tty/serial/arc_uart.c +T: git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
SYNOPSYS ARC SDP platform support M: Alexey Brodkin abrodkin@synopsys.com @@@ -10401,13 -10347,6 +10418,13 @@@ F: drivers/char/toshiba. F: include/linux/toshiba.h F: include/uapi/linux/toshiba.h
+TOSHIBA TC358743 DRIVER +M: Mats Randgaard matrandg@cisco.com +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/tc358743* +F: include/media/tc358743.h + TMIO MMC DRIVER M: Ian Molton ian@mnementh.co.uk L: linux-mmc@vger.kernel.org diff --combined arch/arm/boot/dts/am33xx.dtsi index 01a2d94,8b59c86..b222306 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@@ -103,15 -103,6 +103,15 @@@ #size-cells = <1>; ranges = <0 0x44c00000 0x280000>;
+ wkup_m3: wkup_m3@100000 { + compatible = "ti,am3352-wkup-m3"; + reg = <0x100000 0x4000>, + <0x180000 0x2000>; + reg-names = "umem", "dmem"; + ti,hwmods = "wkup_m3"; + ti,pm-firmware = "am335x-pm-firmware.elf"; + }; + prcm: prcm@200000 { compatible = "ti,am3-prcm"; reg = <0x200000 0x4000>; @@@ -153,14 -144,6 +153,14 @@@ }; };
+ wkup_m3_ipc: wkup_m3_ipc@1324 { + compatible = "ti,am3352-wkup-m3-ipc"; + reg = <0x1324 0x24>; + interrupts = <78>; + ti,rproc = <&wkup_m3>; + mboxes = <&mailbox &mbox_wkupm3>; + }; + scm_clockdomains: clockdomains { }; }; @@@ -717,7 -700,7 +717,7 @@@ };
mac: ethernet@4a100000 { - compatible = "ti,cpsw"; + compatible = "ti,am335x-cpsw","ti,cpsw"; ti,hwmods = "cpgmac0"; clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; clock-names = "fck", "cpts"; @@@ -779,6 -762,14 +779,6 @@@ reg = <0x40300000 0x10000>; /* 64k */ };
- wkup_m3: wkup_m3@44d00000 { - compatible = "ti,am3353-wkup-m3"; - reg = <0x44d00000 0x4000 /* M3 UMEM */ - 0x44d80000 0x2000>; /* M3 DMEM */ - ti,hwmods = "wkup_m3"; - ti,no-reset-on-init; - }; - elm: elm@48080000 { compatible = "ti,am3352-elm"; reg = <0x48080000 0x2000>; diff --combined arch/arm/boot/dts/dra7.dtsi index e4c4380,0001e95..007e16b --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@@ -116,7 -116,7 +116,7 @@@ ranges = <0 0x2000 0x2000>;
scm_conf: scm_conf@0 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x0 0x1400>; #address-cells = <1>; #size-cells = <1>; @@@ -141,7 -141,7 +141,7 @@@ dra7_pmx_core: pinmux@1400 { compatible = "ti,dra7-padconf", "pinctrl-single"; - reg = <0x1400 0x0464>; + reg = <0x1400 0x0468>; #address-cells = <1>; #size-cells = <0>; #interrupt-cells = <1>; @@@ -149,11 -149,6 +149,11 @@@ pinctrl-single,register-width = <32>; pinctrl-single,function-mask = <0x3fffffff>; }; + + scm_conf1: scm_conf@1c04 { + compatible = "syscon"; + reg = <0x1c04 0x0020>; + }; };
cm_core_aon: cm_core_aon@5000 { @@@ -216,7 -211,7 +216,7 @@@ #address-cells = <1>; ranges = <0x51000000 0x51000000 0x3000 0x0 0x20000000 0x10000000>; - pcie@51000000 { + pcie1: pcie@51000000 { compatible = "ti,dra7-pcie"; reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; @@@ -291,6 -286,16 +291,6 @@@ #thermal-sensor-cells = <1>; };
- dra7_ctrl_core: ctrl_core@4a002000 { - compatible = "syscon"; - reg = <0x4a002000 0x6d0>; - }; - - dra7_ctrl_general: tisyscon@4a002e00 { - compatible = "syscon"; - reg = <0x4a002e00 0x7c>; - }; - sdma: dma-controller@4a056000 { compatible = "ti,omap4430-sdma"; reg = <0x4a056000 0x1000>; @@@ -303,15 -308,6 +303,15 @@@ dma-requests = <127>; };
+ sdma_xbar: dma-router@4a002b78 { + compatible = "ti,dra7-dma-crossbar"; + reg = <0x4a002b78 0xfc>; + #dma-cells = <1>; + dma-requests = <205>; + ti,dma-safe-map = <0>; + dma-masters = <&sdma>; + }; + gpio1: gpio@4ae10000 { compatible = "ti,omap4-gpio"; reg = <0x4ae10000 0x200>; @@@ -407,7 -403,7 +407,7 @@@ ti,hwmods = "uart1"; clock-frequency = <48000000>; status = "disabled"; - dmas = <&sdma 49>, <&sdma 50>; + dmas = <&sdma_xbar 49>, <&sdma_xbar 50>; dma-names = "tx", "rx"; };
@@@ -418,7 -414,7 +418,7 @@@ ti,hwmods = "uart2"; clock-frequency = <48000000>; status = "disabled"; - dmas = <&sdma 51>, <&sdma 52>; + dmas = <&sdma_xbar 51>, <&sdma_xbar 52>; dma-names = "tx", "rx"; };
@@@ -429,7 -425,7 +429,7 @@@ ti,hwmods = "uart3"; clock-frequency = <48000000>; status = "disabled"; - dmas = <&sdma 53>, <&sdma 54>; + dmas = <&sdma_xbar 53>, <&sdma_xbar 54>; dma-names = "tx", "rx"; };
@@@ -440,7 -436,7 +440,7 @@@ ti,hwmods = "uart4"; clock-frequency = <48000000>; status = "disabled"; - dmas = <&sdma 55>, <&sdma 56>; + dmas = <&sdma_xbar 55>, <&sdma_xbar 56>; dma-names = "tx", "rx"; };
@@@ -451,7 -447,7 +451,7 @@@ ti,hwmods = "uart5"; clock-frequency = <48000000>; status = "disabled"; - dmas = <&sdma 63>, <&sdma 64>; + dmas = <&sdma_xbar 63>, <&sdma_xbar 64>; dma-names = "tx", "rx"; };
@@@ -462,7 -458,7 +462,7 @@@ ti,hwmods = "uart6"; clock-frequency = <48000000>; status = "disabled"; - dmas = <&sdma 79>, <&sdma 80>; + dmas = <&sdma_xbar 79>, <&sdma_xbar 80>; dma-names = "tx", "rx"; };
@@@ -871,7 -867,7 +871,7 @@@ ti,hwmods = "mmc1"; ti,dual-volt; ti,needs-special-reset; - dmas = <&sdma 61>, <&sdma 62>; + dmas = <&sdma_xbar 61>, <&sdma_xbar 62>; dma-names = "tx", "rx"; status = "disabled"; pbias-supply = <&pbias_mmc_reg>; @@@ -883,7 -879,7 +883,7 @@@ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "mmc2"; ti,needs-special-reset; - dmas = <&sdma 47>, <&sdma 48>; + dmas = <&sdma_xbar 47>, <&sdma_xbar 48>; dma-names = "tx", "rx"; status = "disabled"; }; @@@ -894,7 -890,7 +894,7 @@@ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "mmc3"; ti,needs-special-reset; - dmas = <&sdma 77>, <&sdma 78>; + dmas = <&sdma_xbar 77>, <&sdma_xbar 78>; dma-names = "tx", "rx"; status = "disabled"; }; @@@ -905,7 -901,7 +905,7 @@@ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>; ti,hwmods = "mmc4"; ti,needs-special-reset; - dmas = <&sdma 57>, <&sdma 58>; + dmas = <&sdma_xbar 57>, <&sdma_xbar 58>; dma-names = "tx", "rx"; status = "disabled"; }; @@@ -1050,14 -1046,14 +1050,14 @@@ #size-cells = <0>; ti,hwmods = "mcspi1"; ti,spi-num-cs = <4>; - dmas = <&sdma 35>, - <&sdma 36>, - <&sdma 37>, - <&sdma 38>, - <&sdma 39>, - <&sdma 40>, - <&sdma 41>, - <&sdma 42>; + dmas = <&sdma_xbar 35>, + <&sdma_xbar 36>, + <&sdma_xbar 37>, + <&sdma_xbar 38>, + <&sdma_xbar 39>, + <&sdma_xbar 40>, + <&sdma_xbar 41>, + <&sdma_xbar 42>; dma-names = "tx0", "rx0", "tx1", "rx1", "tx2", "rx2", "tx3", "rx3"; status = "disabled"; @@@ -1071,10 -1067,10 +1071,10 @@@ #size-cells = <0>; ti,hwmods = "mcspi2"; ti,spi-num-cs = <2>; - dmas = <&sdma 43>, - <&sdma 44>, - <&sdma 45>, - <&sdma 46>; + dmas = <&sdma_xbar 43>, + <&sdma_xbar 44>, + <&sdma_xbar 45>, + <&sdma_xbar 46>; dma-names = "tx0", "rx0", "tx1", "rx1"; status = "disabled"; }; @@@ -1087,7 -1083,7 +1087,7 @@@ #size-cells = <0>; ti,hwmods = "mcspi3"; ti,spi-num-cs = <2>; - dmas = <&sdma 15>, <&sdma 16>; + dmas = <&sdma_xbar 15>, <&sdma_xbar 16>; dma-names = "tx0", "rx0"; status = "disabled"; }; @@@ -1100,7 -1096,7 +1100,7 @@@ #size-cells = <0>; ti,hwmods = "mcspi4"; ti,spi-num-cs = <1>; - dmas = <&sdma 70>, <&sdma 71>; + dmas = <&sdma_xbar 70>, <&sdma_xbar 71>; dma-names = "tx0", "rx0"; status = "disabled"; }; @@@ -1300,12 -1296,7 +1300,12 @@@ usb1: usb@48890000 { compatible = "snps,dwc3"; reg = <0x48890000 0x17000>; - interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "peripheral", + "host", + "otg"; phys = <&usb2_phy1>, <&usb3_phy1>; phy-names = "usb2-phy", "usb3-phy"; tx-fifo-resize; @@@ -1328,12 -1319,7 +1328,12 @@@ usb2: usb@488d0000 { compatible = "snps,dwc3"; reg = <0x488d0000 0x17000>; - interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "peripheral", + "host", + "otg"; phys = <&usb2_phy2>; phy-names = "usb2-phy"; tx-fifo-resize; @@@ -1358,12 -1344,7 +1358,12 @@@ usb3: usb@48910000 { compatible = "snps,dwc3"; reg = <0x48910000 0x17000>; - interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "peripheral", + "host", + "otg"; tx-fifo-resize; maximum-speed = "high-speed"; dr_mode = "otg"; @@@ -1418,7 -1399,7 +1418,7 @@@ };
mac: ethernet@4a100000 { - compatible = "ti,cpsw"; + compatible = "ti,dra7-cpsw","ti,cpsw"; ti,hwmods = "gmac"; clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>; clock-names = "fck", "cpts"; diff --combined drivers/base/property.c index 37a7bb7,4c20828..287704d --- a/drivers/base/property.c +++ b/drivers/base/property.c @@@ -16,6 -16,8 +16,8 @@@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/property.h> + #include <linux/etherdevice.h> + #include <linux/phy.h>
/** * device_add_property_set - Add a collection of properties to a device object. @@@ -27,10 -29,9 +29,10 @@@ */ void device_add_property_set(struct device *dev, struct property_set *pset) { - if (pset) - pset->fwnode.type = FWNODE_PDATA; + if (!pset) + return;
+ pset->fwnode.type = FWNODE_PDATA; set_secondary_fwnode(dev, &pset->fwnode); } EXPORT_SYMBOL_GPL(device_add_property_set); @@@ -534,3 -535,79 +536,79 @@@ bool device_dma_is_coherent(struct devi return coherent; } EXPORT_SYMBOL_GPL(device_dma_is_coherent); + + /** + * device_get_phy_mode - Get phy mode for given device + * @dev: Pointer to the given device + * + * The function gets phy interface string from property 'phy-mode' or + * 'phy-connection-type', and return its index in phy_modes table, or errno in + * error case. + */ + int device_get_phy_mode(struct device *dev) + { + const char *pm; + int err, i; + + err = device_property_read_string(dev, "phy-mode", &pm); + if (err < 0) + err = device_property_read_string(dev, + "phy-connection-type", &pm); + if (err < 0) + return err; + + for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) + if (!strcasecmp(pm, phy_modes(i))) + return i; + + return -ENODEV; + } + EXPORT_SYMBOL_GPL(device_get_phy_mode); + + static void *device_get_mac_addr(struct device *dev, + const char *name, char *addr, + int alen) + { + int ret = device_property_read_u8_array(dev, name, addr, alen); + + if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr)) + return addr; + return NULL; + } + + /** + * device_get_mac_address - Get the MAC for a given device + * @dev: Pointer to the device + * @addr: Address of buffer to store the MAC in + * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN + * + * Search the firmware node for the best MAC address to use. 'mac-address' is + * checked first, because that is supposed to contain to "most recent" MAC + * address. If that isn't set, then 'local-mac-address' is checked next, + * because that is the default address. If that isn't set, then the obsolete + * 'address' is checked, just in case we're using an old device tree. + * + * Note that the 'address' property is supposed to contain a virtual address of + * the register set, but some DTS files have redefined that property to be the + * MAC address. + * + * All-zero MAC addresses are rejected, because those could be properties that + * exist in the firmware tables, but were not updated by the firmware. For + * example, the DTS could define 'mac-address' and 'local-mac-address', with + * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'. + * In this case, the real MAC is in 'local-mac-address', and 'mac-address' + * exists but is all zeros. + */ + void *device_get_mac_address(struct device *dev, char *addr, int alen) + { + addr = device_get_mac_addr(dev, "mac-address", addr, alen); + if (addr) + return addr; + + addr = device_get_mac_addr(dev, "local-mac-address", addr, alen); + if (addr) + return addr; + + return device_get_mac_addr(dev, "address", addr, alen); + } + EXPORT_SYMBOL(device_get_mac_address); diff --combined drivers/net/ethernet/emulex/benet/be_main.c index 6ca693b,15cc3a1..12687bf --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@@ -681,11 -681,14 +681,14 @@@ void be_link_status_update(struct be_ad static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb) { struct be_tx_stats *stats = tx_stats(txo); + u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
u64_stats_update_begin(&stats->sync); stats->tx_reqs++; stats->tx_bytes += skb->len; - stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1); + stats->tx_pkts += tx_pkts; + if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) + stats->tx_vxlan_offload_pkts += tx_pkts; u64_stats_update_end(&stats->sync); }
@@@ -1258,7 -1261,7 +1261,7 @@@ static bool be_send_pkt_to_bmc(struct b if (is_udp_pkt((*skb))) { struct udphdr *udp = udp_hdr((*skb));
- switch (udp->dest) { + switch (ntohs(udp->dest)) { case DHCP_CLIENT_PORT: os2bmc = is_dhcp_client_filt_enabled(adapter); goto done; @@@ -1961,6 -1964,8 +1964,8 @@@ static void be_rx_stats_update(struct b stats->rx_compl++; stats->rx_bytes += rxcp->pkt_size; stats->rx_pkts++; + if (rxcp->tunneled) + stats->rx_vxlan_offload_pkts++; if (rxcp->pkt_type == BE_MULTICAST_PACKET) stats->rx_mcast_pkts++; if (rxcp->err) @@@ -3610,15 -3615,15 +3615,15 @@@ err
static int be_setup_wol(struct be_adapter *adapter, bool enable) { + struct device *dev = &adapter->pdev->dev; struct be_dma_mem cmd; - int status = 0; u8 mac[ETH_ALEN]; + int status;
eth_zero_addr(mac);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, - GFP_KERNEL); + cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); if (!cmd.va) return -ENOMEM;
@@@ -3627,24 -3632,18 +3632,18 @@@ PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); if (status) { - dev_err(&adapter->pdev->dev, - "Could not enable Wake-on-lan\n"); - dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, - cmd.dma); - return status; + dev_err(dev, "Could not enable Wake-on-lan\n"); + goto err; } - status = be_cmd_enable_magic_wol(adapter, - adapter->netdev->dev_addr, - &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, 1); - pci_enable_wake(adapter->pdev, PCI_D3cold, 1); } else { - status = be_cmd_enable_magic_wol(adapter, mac, &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, 0); - pci_enable_wake(adapter->pdev, PCI_D3cold, 0); + ether_addr_copy(mac, adapter->netdev->dev_addr); }
- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); + status = be_cmd_enable_magic_wol(adapter, mac, &cmd); + pci_enable_wake(adapter->pdev, PCI_D3hot, enable); + pci_enable_wake(adapter->pdev, PCI_D3cold, enable); + err: + dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma); return status; }
@@@ -4977,7 -4976,7 +4976,7 @@@ static bool be_check_ufi_compatibility( { if (!fhdr) { dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); - return -1; + return false; }
/* First letter of the build version is used to identify @@@ -5132,9 -5131,6 +5131,6 @@@ static int be_ndo_bridge_getlink(struc int status = 0; u8 hsw_mode;
- if (!sriov_enabled(adapter)) - return 0; - /* BE and Lancer chips support VEB mode only */ if (BEx_chip(adapter) || lancer_chip(adapter)) { hsw_mode = PORT_FWD_TYPE_VEB; @@@ -5144,6 -5140,9 +5140,9 @@@ NULL); if (status) return 0; + + if (hsw_mode == PORT_FWD_TYPE_PASSTHRU) + return 0; }
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, @@@ -5174,7 -5173,7 +5173,7 @@@ static void be_add_vxlan_port(struct ne struct device *dev = &adapter->pdev->dev; int status;
- if (lancer_chip(adapter) || BEx_chip(adapter)) + if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) return;
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { @@@ -5221,7 -5220,7 +5220,7 @@@ static void be_del_vxlan_port(struct ne { struct be_adapter *adapter = netdev_priv(netdev);
- if (lancer_chip(adapter) || BEx_chip(adapter)) + if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) return;
if (adapter->vxlan_port != port) @@@ -5278,6 -5277,27 +5277,27 @@@ static netdev_features_t be_features_ch } #endif
+ static int be_get_phys_port_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) + { + int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1; + struct be_adapter *adapter = netdev_priv(dev); + u8 *id; + + if (MAX_PHYS_ITEM_ID_LEN < id_len) + return -ENOSPC; + + ppid->id[0] = adapter->hba_port_num + 1; + id = &ppid->id[1]; + for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0; + i--, id += CNTL_SERIAL_NUM_WORD_SZ) + memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ); + + ppid->id_len = id_len; + + return 0; + } + static const struct net_device_ops be_netdev_ops = { .ndo_open = be_open, .ndo_stop = be_close, @@@ -5308,6 -5328,7 +5328,7 @@@ .ndo_del_vxlan_port = be_del_vxlan_port, .ndo_features_check = be_features_check, #endif + .ndo_get_phys_port_id = be_get_phys_port_id, };
static void be_netdev_init(struct net_device *netdev) @@@ -5866,7 -5887,6 +5887,6 @@@ static int be_pci_resume(struct pci_de if (status) return status;
- pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev);
status = be_resume(adapter); @@@ -5946,7 -5966,6 +5966,6 @@@ static pci_ers_result_t be_eeh_reset(st return PCI_ERS_RESULT_DISCONNECT;
pci_set_master(pdev); - pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev);
/* Check if card is ok and fw is ready */ diff --combined drivers/net/ethernet/freescale/gianfar.c index 10b3bbbb,087ffcd..4b69d061 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@@ -109,15 -109,15 +109,15 @@@
#define TX_TIMEOUT (1*HZ)
- const char gfar_driver_version[] = "1.3"; + const char gfar_driver_version[] = "2.0";
static int gfar_enet_open(struct net_device *dev); static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); static void gfar_reset_task(struct work_struct *work); static void gfar_timeout(struct net_device *dev); static int gfar_close(struct net_device *dev); - static struct sk_buff *gfar_new_skb(struct net_device *dev, - dma_addr_t *bufaddr); + static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt); static int gfar_set_mac_address(struct net_device *dev); static int gfar_change_mtu(struct net_device *dev, int new_mtu); static irqreturn_t gfar_error(int irq, void *dev_id); @@@ -141,8 -141,7 +141,7 @@@ static void gfar_netpoll(struct net_dev #endif int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); - static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi); + static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb); static void gfar_halt_nodisable(struct gfar_private *priv); static void gfar_clear_exact_match(struct net_device *dev); static void gfar_set_mac_for_addr(struct net_device *dev, int num, @@@ -169,17 -168,15 +168,15 @@@ static void gfar_init_rxbdp(struct gfar bdp->lstatus = cpu_to_be32(lstatus); }
- static int gfar_init_bds(struct net_device *ndev) + static void gfar_init_bds(struct net_device *ndev) { struct gfar_private *priv = netdev_priv(ndev); struct gfar __iomem *regs = priv->gfargrp[0].regs; struct gfar_priv_tx_q *tx_queue = NULL; struct gfar_priv_rx_q *rx_queue = NULL; struct txbd8 *txbdp; - struct rxbd8 *rxbdp; u32 __iomem *rfbptr; int i, j; - dma_addr_t bufaddr;
for (i = 0; i < priv->num_tx_queues; i++) { tx_queue = priv->tx_queue[i]; @@@ -207,40 -204,26 +204,26 @@@ rfbptr = ®s->rfbptr0; for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->cur_rx = rx_queue->rx_bd_base; - rx_queue->skb_currx = 0; - rxbdp = rx_queue->rx_bd_base;
- for (j = 0; j < rx_queue->rx_ring_size; j++) { - struct sk_buff *skb = rx_queue->rx_skbuff[j]; + rx_queue->next_to_clean = 0; + rx_queue->next_to_use = 0; + rx_queue->next_to_alloc = 0;
- if (skb) { - bufaddr = be32_to_cpu(rxbdp->bufPtr); - } else { - skb = gfar_new_skb(ndev, &bufaddr); - if (!skb) { - netdev_err(ndev, "Can't allocate RX buffers\n"); - return -ENOMEM; - } - rx_queue->rx_skbuff[j] = skb; - } - - gfar_init_rxbdp(rx_queue, rxbdp, bufaddr); - rxbdp++; - } + /* make sure next_to_clean != next_to_use after this + * by leaving at least 1 unused descriptor + */ + gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
rx_queue->rfbptr = rfbptr; rfbptr += 2; } - - return 0; }
static int gfar_alloc_skb_resources(struct net_device *ndev) { void *vaddr; dma_addr_t addr; - int i, j, k; + int i, j; struct gfar_private *priv = netdev_priv(ndev); struct device *dev = priv->dev; struct gfar_priv_tx_q *tx_queue = NULL; @@@ -279,7 -262,8 +262,8 @@@ rx_queue = priv->rx_queue[i]; rx_queue->rx_bd_base = vaddr; rx_queue->rx_bd_dma_base = addr; - rx_queue->dev = ndev; + rx_queue->ndev = ndev; + rx_queue->dev = dev; addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size; } @@@ -294,25 -278,20 +278,20 @@@ if (!tx_queue->tx_skbuff) goto cleanup;
- for (k = 0; k < tx_queue->tx_ring_size; k++) - tx_queue->tx_skbuff[k] = NULL; + for (j = 0; j < tx_queue->tx_ring_size; j++) + tx_queue->tx_skbuff[j] = NULL; }
for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - rx_queue->rx_skbuff = - kmalloc_array(rx_queue->rx_ring_size, - sizeof(*rx_queue->rx_skbuff), - GFP_KERNEL); - if (!rx_queue->rx_skbuff) + rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size, + sizeof(*rx_queue->rx_buff), + GFP_KERNEL); + if (!rx_queue->rx_buff) goto cleanup; - - for (j = 0; j < rx_queue->rx_ring_size; j++) - rx_queue->rx_skbuff[j] = NULL; }
- if (gfar_init_bds(ndev)) - goto cleanup; + gfar_init_bds(ndev);
return 0;
@@@ -354,10 -333,8 +333,8 @@@ static void gfar_init_rqprm(struct gfar } }
- static void gfar_rx_buff_size_config(struct gfar_private *priv) + static void gfar_rx_offload_en(struct gfar_private *priv) { - int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN; - /* set this when rx hw offload (TOE) functions are being used */ priv->uses_rxfcb = 0;
@@@ -366,16 -343,6 +343,6 @@@
if (priv->hwts_rx_en) priv->uses_rxfcb = 1; - - if (priv->uses_rxfcb) - frame_size += GMAC_FCB_LEN; - - frame_size += priv->padding; - - frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + - INCREMENTAL_BUFFER_SIZE; - - priv->rx_buffer_size = frame_size; }
static void gfar_mac_rx_config(struct gfar_private *priv) @@@ -593,9 -560,8 +560,8 @@@ static int gfar_alloc_rx_queues(struct if (!priv->rx_queue[i]) return -ENOMEM;
- priv->rx_queue[i]->rx_skbuff = NULL; priv->rx_queue[i]->qindex = i; - priv->rx_queue[i]->dev = priv->ndev; + priv->rx_queue[i]->ndev = priv->ndev; } return 0; } @@@ -1187,12 -1153,11 +1153,11 @@@ void gfar_mac_reset(struct gfar_privat
udelay(3);
- /* Compute rx_buff_size based on config flags */ - gfar_rx_buff_size_config(priv); + gfar_rx_offload_en(priv);
/* Initialize the max receive frame/buffer lengths */ - gfar_write(®s->maxfrm, priv->rx_buffer_size); - gfar_write(®s->mrblr, priv->rx_buffer_size); + gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE); + gfar_write(®s->mrblr, GFAR_RXB_SIZE);
/* Initialize the Minimum Frame Length Register */ gfar_write(®s->minflr, MINFLR_INIT_SETTINGS); @@@ -1200,12 -1165,11 +1165,11 @@@ /* Initialize MACCFG2. */ tempval = MACCFG2_INIT_SETTINGS;
- /* If the mtu is larger than the max size for standard - * ethernet frames (ie, a jumbo frame), then set maccfg2 - * to allow huge frames, and to check the length + /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1 + * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1, + * and by checking RxBD[LG] and discarding larger than MAXFRM. */ - if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || - gfar_has_errata(priv, GFAR_ERRATA_74)) + if (gfar_has_errata(priv, GFAR_ERRATA_74)) tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
gfar_write(®s->maccfg2, tempval); @@@ -1415,8 -1379,6 +1379,6 @@@ static int gfar_probe(struct platform_d priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) dev->needed_headroom = GMAC_FCB_LEN;
- priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; - /* Initializing some of the rx/tx queue level parameters */ for (i = 0; i < priv->num_tx_queues; i++) { priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE; @@@ -1599,10 -1561,7 +1561,7 @@@ static int gfar_restore(struct device * return 0; }
- if (gfar_init_bds(ndev)) { - free_skb_resources(priv); - return -ENOMEM; - } + gfar_init_bds(ndev);
gfar_mac_reset(priv);
@@@ -1893,26 -1852,32 +1852,32 @@@ static void free_skb_tx_queue(struct gf
static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) { - struct rxbd8 *rxbdp; - struct gfar_private *priv = netdev_priv(rx_queue->dev); int i;
- rxbdp = rx_queue->rx_bd_base; + struct rxbd8 *rxbdp = rx_queue->rx_bd_base; + + if (rx_queue->skb) + dev_kfree_skb(rx_queue->skb);
for (i = 0; i < rx_queue->rx_ring_size; i++) { - if (rx_queue->rx_skbuff[i]) { - dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr), - priv->rx_buffer_size, - DMA_FROM_DEVICE); - dev_kfree_skb_any(rx_queue->rx_skbuff[i]); - rx_queue->rx_skbuff[i] = NULL; - } + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i]; + rxbdp->lstatus = 0; rxbdp->bufPtr = 0; rxbdp++; + + if (!rxb->page) + continue; + + dma_unmap_single(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(rxb->page); + + rxb->page = NULL; } - kfree(rx_queue->rx_skbuff); - rx_queue->rx_skbuff = NULL; + + kfree(rx_queue->rx_buff); + rx_queue->rx_buff = NULL; }
/* If there are any tx skbs or rx skbs still around, free them. @@@ -1937,7 -1902,7 +1902,7 @@@ static void free_skb_resources(struct g
for (i = 0; i < priv->num_rx_queues; i++) { rx_queue = priv->rx_queue[i]; - if (rx_queue->rx_skbuff) + if (rx_queue->rx_buff) free_skb_rx_queue(rx_queue); }
@@@ -2102,11 -2067,6 +2067,11 @@@ int startup_gfar(struct net_device *nde /* Start Rx/Tx DMA and enable the interrupts */ gfar_start(priv);
+ /* force link state update after mac reset */ + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + phy_start(priv->phydev);
enable_napi(priv); @@@ -2500,7 -2460,7 +2465,7 @@@ static int gfar_change_mtu(struct net_d struct gfar_private *priv = netdev_priv(dev); int frame_size = new_mtu + ETH_HLEN;
- if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) { netif_err(priv, drv, dev, "Invalid MTU setting\n"); return -EINVAL; } @@@ -2554,15 -2514,6 +2519,6 @@@ static void gfar_timeout(struct net_dev schedule_work(&priv->reset_task); }
- static void gfar_align_skb(struct sk_buff *skb) - { - /* We need the data buffer to be aligned properly. We will reserve - * as many bytes as needed to align the data properly - */ - skb_reserve(skb, RXBUF_ALIGNMENT - - (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1))); - } - /* Interrupt Handler for Transmit complete */ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) { @@@ -2620,7 -2571,8 +2576,8 @@@
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { struct skb_shared_hwtstamps shhwtstamps; - u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7); + u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) & + ~0x7UL);
memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(*ns); @@@ -2669,49 -2621,85 +2626,85 @@@ netdev_tx_completed_queue(txq, howmany, bytes_sent); }
- static struct sk_buff *gfar_alloc_skb(struct net_device *dev) + static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; + struct page *page; + dma_addr_t addr;
- skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); - if (!skb) - return NULL; + page = dev_alloc_page(); + if (unlikely(!page)) + return false;
- gfar_align_skb(skb); + addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(rxq->dev, addr))) { + __free_page(page);
- return skb; + return false; + } + + rxb->dma = addr; + rxb->page = page; + rxb->page_offset = 0; + + return true; }
- static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr) + static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue) { - struct gfar_private *priv = netdev_priv(dev); - struct sk_buff *skb; - dma_addr_t addr; + struct gfar_private *priv = netdev_priv(rx_queue->ndev); + struct gfar_extra_stats *estats = &priv->extra_stats;
- skb = gfar_alloc_skb(dev); - if (!skb) - return NULL; + netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n"); + atomic64_inc(&estats->rx_alloc_err); + }
- addr = dma_map_single(priv->dev, skb->data, - priv->rx_buffer_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, addr))) { - dev_kfree_skb_any(skb); - return NULL; + static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue, + int alloc_cnt) + { + struct rxbd8 *bdp; + struct gfar_rx_buff *rxb; + int i; + + i = rx_queue->next_to_use; + bdp = &rx_queue->rx_bd_base[i]; + rxb = &rx_queue->rx_buff[i]; + + while (alloc_cnt--) { + /* try reuse page */ + if (unlikely(!rxb->page)) { + if (unlikely(!gfar_new_page(rx_queue, rxb))) { + gfar_rx_alloc_err(rx_queue); + break; + } + } + + /* Setup the new RxBD */ + gfar_init_rxbdp(rx_queue, bdp, + rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT); + + /* Update to the next pointer */ + bdp++; + rxb++; + + if (unlikely(++i == rx_queue->rx_ring_size)) { + i = 0; + bdp = rx_queue->rx_bd_base; + rxb = rx_queue->rx_buff; + } }
- *bufaddr = addr; - return skb; + rx_queue->next_to_use = i; + rx_queue->next_to_alloc = i; }
- static inline void count_errors(unsigned short status, struct net_device *dev) + static void count_errors(u32 lstatus, struct net_device *ndev) { - struct gfar_private *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; + struct gfar_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; struct gfar_extra_stats *estats = &priv->extra_stats;
/* If the packet was truncated, none of the other errors matter */ - if (status & RXBD_TRUNCATED) { + if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) { stats->rx_length_errors++;
atomic64_inc(&estats->rx_trunc); @@@ -2719,25 -2707,25 +2712,25 @@@ return; } /* Count the errors, if there were any */ - if (status & (RXBD_LARGE | RXBD_SHORT)) { + if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) { stats->rx_length_errors++;
- if (status & RXBD_LARGE) + if (lstatus & BD_LFLAG(RXBD_LARGE)) atomic64_inc(&estats->rx_large); else atomic64_inc(&estats->rx_short); } - if (status & RXBD_NONOCTET) { + if (lstatus & BD_LFLAG(RXBD_NONOCTET)) { stats->rx_frame_errors++; atomic64_inc(&estats->rx_nonoctet); } - if (status & RXBD_CRCERR) { + if (lstatus & BD_LFLAG(RXBD_CRCERR)) { atomic64_inc(&estats->rx_crcerr); stats->rx_crc_errors++; } - if (status & RXBD_OVERRUN) { + if (lstatus & BD_LFLAG(RXBD_OVERRUN)) { atomic64_inc(&estats->rx_overrun); - stats->rx_crc_errors++; + stats->rx_over_errors++; } }
@@@ -2788,6 -2776,93 +2781,93 @@@ static irqreturn_t gfar_transmit(int ir return IRQ_HANDLED; }
+ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, + struct sk_buff *skb, bool first) + { + unsigned int size = lstatus & BD_LENGTH_MASK; + struct page *page = rxb->page; + + /* Remove the FCS from the packet length */ + if (likely(lstatus & BD_LFLAG(RXBD_LAST))) + size -= ETH_FCS_LEN; + + if (likely(first)) + skb_put(skb, size); + else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); + + /* try reuse page */ + if (unlikely(page_count(page) != 1)) + return false; + + /* change offset to the other half */ + rxb->page_offset ^= GFAR_RXB_TRUESIZE; + + atomic_inc(&page->_count); + + return true; + } + + static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq, + struct gfar_rx_buff *old_rxb) + { + struct gfar_rx_buff *new_rxb; + u16 nta = rxq->next_to_alloc; + + new_rxb = &rxq->rx_buff[nta]; + + /* find next buf that can reuse a page */ + nta++; + rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0; + + /* copy page reference */ + *new_rxb = *old_rxb; + + /* sync for use by the device */ + dma_sync_single_range_for_device(rxq->dev, old_rxb->dma, + old_rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); + } + + static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue, + u32 lstatus, struct sk_buff *skb) + { + struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean]; + struct page *page = rxb->page; + bool first = false; + + if (likely(!skb)) { + void *buff_addr = page_address(page) + rxb->page_offset; + + skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE); + if (unlikely(!skb)) { + gfar_rx_alloc_err(rx_queue); + return NULL; + } + skb_reserve(skb, RXBUF_ALIGNMENT); + first = true; + } + + dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset, + GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE); + + if (gfar_add_rx_frag(rxb, lstatus, skb, first)) { + /* reuse the free half of the page */ + gfar_reuse_rx_page(rx_queue, rxb); + } else { + /* page cannot be reused, unmap it */ + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + } + + /* clear rxb content */ + rxb->page = NULL; + + return skb; + } + static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb) { /* If valid headers were found, and valid sums @@@ -2802,10 -2877,9 +2882,9 @@@ }
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */ - static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, - int amount_pull, struct napi_struct *napi) + static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) { - struct gfar_private *priv = netdev_priv(dev); + struct gfar_private *priv = netdev_priv(ndev); struct rxfcb *fcb = NULL;
/* fcb is at the beginning if exists */ @@@ -2814,10 -2888,8 +2893,8 @@@ /* Remove the FCB from the skb * Remove the padded bytes, if there are any */ - if (amount_pull) { - skb_record_rx_queue(skb, fcb->rq); - skb_pull(skb, amount_pull); - } + if (priv->uses_rxfcb) + skb_pull(skb, GMAC_FCB_LEN);
/* Get receive timestamp from the skb */ if (priv->hwts_rx_en) { @@@ -2831,24 -2903,20 +2908,20 @@@ if (priv->padding) skb_pull(skb, priv->padding);
- if (dev->features & NETIF_F_RXCSUM) + if (ndev->features & NETIF_F_RXCSUM) gfar_rx_checksum(skb, fcb);
/* Tell the skb what kind of packet this is */ - skb->protocol = eth_type_trans(skb, dev); + skb->protocol = eth_type_trans(skb, ndev);
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here. * Even if vlan rx accel is disabled, on some chips * RXFCB_VLN is pseudo randomly set. */ - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX && + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && be16_to_cpu(fcb->flags) & RXFCB_VLN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(fcb->vlctl)); - - /* Send the packet up the stack */ - napi_gro_receive(napi, skb); - }
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring @@@ -2857,91 -2925,89 +2930,89 @@@ */ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) { - struct net_device *dev = rx_queue->dev; - struct rxbd8 *bdp, *base; - struct sk_buff *skb; - int pkt_len; - int amount_pull; - int howmany = 0; - struct gfar_private *priv = netdev_priv(dev); + struct net_device *ndev = rx_queue->ndev; + struct gfar_private *priv = netdev_priv(ndev); + struct rxbd8 *bdp; + int i, howmany = 0; + struct sk_buff *skb = rx_queue->skb; + int cleaned_cnt = gfar_rxbd_unused(rx_queue); + unsigned int total_bytes = 0, total_pkts = 0;
/* Get the first full descriptor */ - bdp = rx_queue->cur_rx; - base = rx_queue->rx_bd_base; + i = rx_queue->next_to_clean;
- amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0; + while (rx_work_limit--) { + u32 lstatus; + + if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) { + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + cleaned_cnt = 0; + }
- while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) { - struct sk_buff *newskb; - dma_addr_t bufaddr; + bdp = &rx_queue->rx_bd_base[i]; + lstatus = be32_to_cpu(bdp->lstatus); + if (lstatus & BD_LFLAG(RXBD_EMPTY)) + break;
+ /* order rx buffer descriptor reads */ rmb();
- /* Add another skb for the future */ - newskb = gfar_new_skb(dev, &bufaddr); + /* fetch next to clean buffer from the ring */ + skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb); + if (unlikely(!skb)) + break;
- skb = rx_queue->rx_skbuff[rx_queue->skb_currx]; + cleaned_cnt++; + howmany++;
- dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr), - priv->rx_buffer_size, DMA_FROM_DEVICE); - - if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) && - be16_to_cpu(bdp->length) > priv->rx_buffer_size)) - bdp->status = cpu_to_be16(RXBD_LARGE); - - /* We drop the frame if we failed to allocate a new buffer */ - if (unlikely(!newskb || - !(be16_to_cpu(bdp->status) & RXBD_LAST) || - be16_to_cpu(bdp->status) & RXBD_ERR)) { - count_errors(be16_to_cpu(bdp->status), dev); - - if (unlikely(!newskb)) { - newskb = skb; - bufaddr = be32_to_cpu(bdp->bufPtr); - } else if (skb) - dev_kfree_skb(skb); - } else { - /* Increment the number of packets */ - rx_queue->stats.rx_packets++; - howmany++; - - if (likely(skb)) { - pkt_len = be16_to_cpu(bdp->length) - - ETH_FCS_LEN; - /* Remove the FCS from the packet length */ - skb_put(skb, pkt_len); - rx_queue->stats.rx_bytes += pkt_len; - skb_record_rx_queue(skb, rx_queue->qindex); - gfar_process_frame(dev, skb, amount_pull, - &rx_queue->grp->napi_rx); + if (unlikely(++i == rx_queue->rx_ring_size)) + i = 0;
- } else { - netif_warn(priv, rx_err, dev, "Missing skb!\n"); - rx_queue->stats.rx_dropped++; - atomic64_inc(&priv->extra_stats.rx_skbmissing); - } + rx_queue->next_to_clean = i; + + /* fetch next buffer if not the last in frame */ + if (!(lstatus & BD_LFLAG(RXBD_LAST))) + continue; + + if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) { + count_errors(lstatus, ndev);
+ /* discard faulty buffer */ + dev_kfree_skb(skb); + skb = NULL; + rx_queue->stats.rx_dropped++; + continue; }
- rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb; + /* Increment the number of packets */ + total_pkts++; + total_bytes += skb->len;
- /* Setup the new bdp */ - gfar_init_rxbdp(rx_queue, bdp, bufaddr); + skb_record_rx_queue(skb, rx_queue->qindex);
- /* Update Last Free RxBD pointer for LFC */ - if (unlikely(rx_queue->rfbptr && priv->tx_actual_en)) - gfar_write(rx_queue->rfbptr, (u32)bdp); + gfar_process_frame(ndev, skb);
- /* Update to the next pointer */ - bdp = next_bd(bdp, base, rx_queue->rx_ring_size); + /* Send the packet up the stack */ + napi_gro_receive(&rx_queue->grp->napi_rx, skb);
- /* update to point at the next skb */ - rx_queue->skb_currx = (rx_queue->skb_currx + 1) & - RX_RING_MOD_MASK(rx_queue->rx_ring_size); + skb = NULL; }
- /* Update the current rxbd pointer to be the next one */ - rx_queue->cur_rx = bdp; + /* Store incomplete frames for completion */ + rx_queue->skb = skb; + + rx_queue->stats.rx_packets += total_pkts; + rx_queue->stats.rx_bytes += total_bytes; + + if (cleaned_cnt) + gfar_alloc_rx_buffs(rx_queue, cleaned_cnt); + + /* Update Last Free RxBD pointer for LFC */ + if (unlikely(priv->tx_actual_en)) { + u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + + gfar_write(rx_queue->rfbptr, bdp_dma); + }
return howmany; } @@@ -3459,7 -3525,6 +3530,6 @@@ static noinline void gfar_update_link_s struct phy_device *phydev = priv->phydev; struct gfar_priv_rx_q *rx_queue = NULL; int i; - struct rxbd8 *bdp;
if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) return; @@@ -3516,15 -3581,11 +3586,11 @@@ /* Turn last free buffer recording on */ if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) { for (i = 0; i < priv->num_rx_queues; i++) { + u32 bdp_dma; + rx_queue = priv->rx_queue[i]; - bdp = rx_queue->cur_rx; - /* skip to previous bd */ - bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1, - rx_queue->rx_bd_base, - rx_queue->rx_ring_size); - - if (rx_queue->rfbptr) - gfar_write(rx_queue->rfbptr, (u32)bdp); + bdp_dma = gfar_rxbd_dma_lastfree(rx_queue); + gfar_write(rx_queue->rfbptr, bdp_dma); }
priv->tx_actual_en = 1; diff --combined drivers/net/ethernet/intel/igb/igb_main.c index 830466c,1902ef8..e174fbb --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -57,8 -57,8 +57,8 @@@ #include "igb.h"
#define MAJ 5 - #define MIN 2 - #define BUILD 18 + #define MIN 3 + #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; @@@ -179,6 -179,8 +179,8 @@@ static void igb_check_vf_rate_limit(str #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs); + static int igb_disable_sriov(struct pci_dev *dev); + static int igb_pci_disable_sriov(struct pci_dev *dev); #endif
#ifdef CONFIG_PM @@@ -1205,10 -1207,14 +1207,14 @@@ static int igb_alloc_q_vector(struct ig
/* allocate q_vector and rings */ q_vector = adapter->q_vector[v_idx]; - if (!q_vector) + if (!q_vector) { q_vector = kzalloc(size, GFP_KERNEL); - else + } else if (size > ksize(q_vector)) { + kfree_rcu(q_vector, rcu); + q_vector = kzalloc(size, GFP_KERNEL); + } else { memset(q_vector, 0, size); + } if (!q_vector) return -ENOMEM;
@@@ -2645,7 -2651,11 +2651,11 @@@ err_eeprom if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: + kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); + #ifdef CONFIG_PCI_IOV + igb_disable_sriov(pdev); + #endif pci_iounmap(pdev, hw->hw_addr); err_ioremap: free_netdev(netdev); @@@ -2805,14 -2815,14 +2815,14 @@@ static void igb_remove(struct pci_dev * */ igb_release_hw_control(adapter);
- unregister_netdev(netdev); - - igb_clear_interrupt_scheme(adapter); - #ifdef CONFIG_PCI_IOV igb_disable_sriov(pdev); #endif
+ unregister_netdev(netdev); + + igb_clear_interrupt_scheme(adapter); + pci_iounmap(pdev, hw->hw_addr); if (hw->flash_address) iounmap(hw->flash_address); @@@ -2847,7 -2857,7 +2857,7 @@@ static void igb_probe_vfs(struct igb_ad return;
pci_sriov_set_totalvfs(pdev, 7); - igb_pci_enable_sriov(pdev, max_vfs); + igb_enable_sriov(pdev, max_vfs);
#endif /* CONFIG_PCI_IOV */ } @@@ -2888,6 -2898,14 +2898,14 @@@ static void igb_init_queue_configuratio
adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+ igb_set_flag_queue_pairs(adapter, max_rss_queues); + } + + void igb_set_flag_queue_pairs(struct igb_adapter *adapter, + const u32 max_rss_queues) + { + struct e1000_hw *hw = &adapter->hw; + /* Determine if we need to pair queues. */ switch (hw->mac.type) { case e1000_82575: @@@ -2968,6 -2986,8 +2986,8 @@@ static int igb_sw_init(struct igb_adapt } #endif /* CONFIG_PCI_IOV */
+ igb_probe_vfs(adapter); + igb_init_queue_configuration(adapter);
/* Setup and initialize a copy of the hw vlan table array */ @@@ -2980,8 -3000,6 +3000,6 @@@ return -ENOMEM; }
- igb_probe_vfs(adapter); - /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter);
@@@ -6566,7 -6584,7 +6584,7 @@@ static void igb_reuse_rx_page(struct ig
static inline bool igb_page_is_reserved(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); }
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, @@@ -6621,22 -6639,25 +6639,25 @@@ static bool igb_add_rx_frag(struct igb_ struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else - unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int truesize = SKB_DATA_ALIGN(size); #endif + unsigned int pull_len;
- if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag;
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - va += IGB_TS_HDR_LEN; - size -= IGB_TS_HDR_LEN; - } + if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { + igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); + va += IGB_TS_HDR_LEN; + size -= IGB_TS_HDR_LEN; + }
+ if (likely(size <= IGB_RX_HDR_LEN)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as-is */ @@@ -6648,8 -6669,21 +6669,21 @@@ return false; }
+ /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + + add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize);
return igb_can_reuse_rx_page(rx_buffer, page, truesize); } @@@ -6791,62 -6825,6 +6825,6 @@@ static bool igb_is_non_eop(struct igb_r }
/** - * igb_pull_tail - igb specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @rx_desc: pointer to the EOP Rx descriptor - * @skb: pointer to current skb being adjusted - * - * This function is an igb specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - */ - static void igb_pull_tail(struct igb_ring *rx_ring, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) - { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - /* retrieve timestamp from buffer */ - igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb); - - /* update pointers to remove timestamp header */ - skb_frag_size_sub(frag, IGB_TS_HDR_LEN); - frag->page_offset += IGB_TS_HDR_LEN; - skb->data_len -= IGB_TS_HDR_LEN; - skb->len -= IGB_TS_HDR_LEN; - - /* move va to start of packet data */ - va += IGB_TS_HDR_LEN; - } - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - } - - /** * igb_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@@ -6873,10 -6851,6 +6851,6 @@@ static bool igb_cleanup_headers(struct } }
- /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - igb_pull_tail(rx_ring, rx_desc, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@@ -7445,6 -7419,7 +7419,7 @@@ static int igb_resume(struct device *de
if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + rtnl_unlock(); return -ENOMEM; }
@@@ -7538,6 -7513,7 +7513,7 @@@ static int igb_sriov_reinit(struct pci_ igb_init_queue_configuration(adapter);
if (igb_init_interrupt_scheme(adapter, true)) { + rtnl_unlock(); dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ae21e0b,7906234..ab28dc2 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -248,8 -248,7 +248,7 @@@ static void ixgbe_check_minimum_link(st enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; struct pci_dev *pdev;
- /* determine whether to use the the parent device - */ + /* determine whether to use the parent device */ if (ixgbe_pcie_from_parent(&adapter->hw)) pdev = adapter->pdev->bus->parent->self; else @@@ -1360,14 -1359,31 +1359,31 @@@ static int __ixgbe_notify_dca(struct de }
#endif /* CONFIG_IXGBE_DCA */ + + #define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - if (ring->netdev->features & NETIF_F_RXHASH) - skb_set_hash(skb, - le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), - PKT_HASH_TYPE_L3); + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); }
#ifdef IXGBE_FCOE @@@ -1832,7 -1848,7 +1848,7 @@@ static void ixgbe_reuse_rx_page(struct
static inline bool ixgbe_page_is_reserved(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); }
/** diff --combined drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1d7b00b,88298a3..149a0b4 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@@ -457,6 -457,32 +457,32 @@@ static void ixgbevf_rx_skb(struct ixgbe napi_gro_receive(&q_vector->napi, skb); }
+ #define IXGBE_RSS_L4_TYPES_MASK \ + ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ + (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP)) + + static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { + u16 rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; + + rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & + IXGBE_RXDADV_RSSTYPE_MASK; + + if (!rss_type) + return; + + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), + (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); + } + /** * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * @ring: structure containig ring specific data @@@ -506,6 -532,7 +532,7 @@@ static void ixgbevf_process_skb_fields( union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { + ixgbevf_rx_hash(rx_ring, rx_desc, skb); ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { @@@ -649,46 -676,6 +676,6 @@@ static void ixgbevf_alloc_rx_buffers(st }
/** - * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail - * @rx_ring: rx descriptor ring packet is being transacted on - * @skb: pointer to current skb being adjusted - * - * This function is an ixgbevf specific version of __pskb_pull_tail. The - * main difference between this version and the original function is that - * this function can make several assumptions about the state of things - * that allow for significant optimizations versus the standard function. - * As a result we can do things like drop a frag and maintain an accurate - * truesize for the skb. - **/ - static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, - struct sk_buff *skb) - { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; - unsigned char *va; - unsigned int pull_len; - - /* it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - } - - /** * ixgbevf_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@@ -721,10 -708,6 +708,6 @@@ static bool ixgbevf_cleanup_headers(str } }
- /* place header in linear portion of buffer */ - if (skb_is_nonlinear(skb)) - ixgbevf_pull_tail(rx_ring, skb); - /* if eth_skb_pad returns an error the skb was freed */ if (eth_skb_pad(skb)) return true; @@@ -765,7 -748,7 +748,7 @@@ static void ixgbevf_reuse_rx_page(struc
static inline bool ixgbevf_page_is_reserved(struct page *page) { - return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); }
/** @@@ -789,16 -772,19 +772,19 @@@ static bool ixgbevf_add_rx_frag(struct struct sk_buff *skb) { struct page *page = rx_buffer->page; + unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IXGBEVF_RX_BUFSZ; #else unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); #endif + unsigned int pull_len;
- if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { - unsigned char *va = page_address(page) + rx_buffer->page_offset; + if (unlikely(skb_is_nonlinear(skb))) + goto add_tail_frag;
+ if (likely(size <= IXGBEVF_RX_HDR_SIZE)) { memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* page is not reserved, we can reuse buffer as is */ @@@ -810,8 -796,21 +796,21 @@@ return false; }
+ /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + va += pull_len; + size -= pull_len; + + add_tail_frag: skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rx_buffer->page_offset, size, truesize); + (unsigned long)va & ~PAGE_MASK, size, truesize);
/* avoid re-using remote pages */ if (unlikely(ixgbevf_page_is_reserved(page))) @@@ -1697,22 -1696,25 +1696,25 @@@ static void ixgbevf_setup_vfmrqc(struc { struct ixgbe_hw *hw = &adapter->hw; u32 vfmrqc = 0, vfreta = 0; - u32 rss_key[10]; u16 rss_i = adapter->num_rx_queues; - int i, j; + u8 i, j;
/* Fill out hash function seeds */ - netdev_rss_key_fill(rss_key, sizeof(rss_key)); - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]); + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); + for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
- /* Fill out redirection table */ - for (i = 0, j = 0; i < 64; i++, j++) { + for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) { if (j == rss_i) j = 0; - vfreta = (vfreta << 8) | (j * 0x1); - if ((i & 3) == 3) + + adapter->rss_indir_tbl[i] = j; + + vfreta |= j << (i & 0x3) * 8; + if ((i & 3) == 3) { IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta); + vfreta = 0; + } }
/* Perform hash on these packet types */ diff --combined drivers/net/phy/phy.c index 1e1fbb0,84b1fba..d972851 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@@ -353,6 -353,8 +353,8 @@@ int phy_ethtool_sset(struct phy_device
phydev->duplex = cmd->duplex;
+ phydev->mdix = cmd->eth_tp_mdix_ctrl; + /* Restart the PHY */ phy_start_aneg(phydev);
@@@ -377,6 -379,7 +379,7 @@@ int phy_ethtool_gset(struct phy_device cmd->transceiver = phy_is_internal(phydev) ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->autoneg = phydev->autoneg; + cmd->eth_tp_mdix_ctrl = phydev->mdix;
return 0; } @@@ -811,7 -814,6 +814,7 @@@ void phy_state_machine(struct work_stru bool needs_aneg = false, do_suspend = false; enum phy_state old_state; int err = 0; + int old_link;
mutex_lock(&phydev->lock);
@@@ -897,18 -899,11 +900,18 @@@ phydev->adjust_link(phydev->attached_dev); break; case PHY_RUNNING: - /* Only register a CHANGE if we are - * polling or ignoring interrupts + /* Only register a CHANGE if we are polling or ignoring + * interrupts and link changed since latest checking. */ - if (!phy_interrupt_is_valid(phydev)) - phydev->state = PHY_CHANGELINK; + if (!phy_interrupt_is_valid(phydev)) { + old_link = phydev->link; + err = phy_read_status(phydev); + if (err) + break; + + if (old_link != phydev->link) + phydev->state = PHY_CHANGELINK; + } break; case PHY_CHANGELINK: err = phy_read_status(phydev); diff --combined drivers/net/usb/qmi_wwan.c index 64a60af,1f7a7cd..6392ae3 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@@ -785,7 -785,7 +785,8 @@@ static const struct usb_device_id produ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
/* 4. Gobi 1000 devices */ diff --combined include/linux/skbuff.h index 9b88536,065e10b..989307f9 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -37,6 -37,7 +37,7 @@@ #include <net/flow_dissector.h> #include <linux/splice.h> #include <linux/in6.h> + #include <net/flow.h>
/* A. Checksumming of received packets by device. * @@@ -173,17 -174,24 +174,24 @@@ struct nf_bridge_info BRNF_PROTO_8021Q, BRNF_PROTO_PPPOE } orig_proto:8; - bool pkt_otherhost; + u8 pkt_otherhost:1; + u8 in_prerouting:1; + u8 bridged_dnat:1; __u16 frag_max_size; - unsigned int mask; struct net_device *physindev; union { - struct net_device *physoutdev; - char neigh_header[8]; - }; - union { + /* prerouting: detect dnat in orig/reply direction */ __be32 ipv4_daddr; struct in6_addr ipv6_daddr; + + /* after prerouting + nat detected: store original source + * mac since neigh resolution overwrites it, only used while + * skb is out in neigh layer. + */ + char neigh_header[8]; + + /* always valid & non-NULL from FORWARD on, for physdev match */ + struct net_device *physoutdev; }; }; #endif @@@ -506,6 -514,7 +514,7 @@@ static inline u32 skb_mstamp_us_delta(c * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS * @napi_id: id of the NAPI struct this skb came from * @secmark: security marking + * @offload_fwd_mark: fwding offload mark * @mark: Generic packet mark * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information @@@ -650,9 -659,15 +659,15 @@@ struct sk_buff unsigned int sender_cpu; }; #endif + union { #ifdef CONFIG_NETWORK_SECMARK - __u32 secmark; + __u32 secmark; + #endif + #ifdef CONFIG_NET_SWITCHDEV + __u32 offload_fwd_mark; #endif + }; + union { __u32 mark; __u32 reserved_tailroom; @@@ -938,6 -953,26 +953,26 @@@ static inline __u32 skb_get_hash(struc return skb->hash; }
+ __u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6); + + static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6) + { + if (!skb->l4_hash && !skb->sw_hash) + __skb_get_hash_flowi6(skb, fl6); + + return skb->hash; + } + + __u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl); + + static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4) + { + if (!skb->l4_hash && !skb->sw_hash) + __skb_get_hash_flowi4(skb, fl4); + + return skb->hash; + } + __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) @@@ -1602,16 -1637,20 +1637,16 @@@ static inline void __skb_fill_page_desc skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/* - * Propagate page->pfmemalloc to the skb if we can. The problem is - * that not all callers have unique ownership of the page. If - * pfmemalloc is set, we check the mapping as a mapping implies - * page->index is set (index and pfmemalloc share space). - * If it's a valid mapping, we cannot use page->pfmemalloc but we - * do not lose pfmemalloc information as the pages would not be - * allocated using __GFP_MEMALLOC. + * Propagate page pfmemalloc to the skb if we can. The problem is + * that not all callers have unique ownership of the page but rely + * on page_is_pfmemalloc doing the right thing(tm). */ frag->page.p = page; frag->page_offset = off; skb_frag_size_set(frag, size);
page = compound_head(page); - if (page->pfmemalloc && !page->mapping) + if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; }
@@@ -2259,7 -2298,7 +2294,7 @@@ static inline struct page *dev_alloc_pa static inline void skb_propagate_pfmemalloc(struct page *page, struct sk_buff *skb) { - if (page && page->pfmemalloc) + if (page_is_pfmemalloc(page)) skb->pfmemalloc = true; }
@@@ -2667,12 -2706,6 +2702,6 @@@ static inline void skb_frag_list_init(s skb_shinfo(skb)->frag_list = NULL; }
- static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) - { - frag->next = skb_shinfo(skb)->frag_list; - skb_shinfo(skb)->frag_list = frag; - } - #define skb_walk_frags(skb, iter) \ for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
@@@ -3464,5 -3497,6 +3493,6 @@@ static inline unsigned int skb_gso_netw skb_network_header(skb); return hdr_len + skb_gso_transport_seglen(skb); } + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --combined kernel/events/core.c index e6feb51,e2c6a88..a1339b1 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@@ -1868,6 -1868,8 +1868,6 @@@ event_sched_in(struct perf_event *event
perf_pmu_disable(event->pmu);
- event->tstamp_running += tstamp - event->tstamp_stopped; - perf_set_shadow_time(event, ctx, tstamp);
perf_log_itrace_start(event); @@@ -1879,8 -1881,6 +1879,8 @@@ goto out; }
+ event->tstamp_running += tstamp - event->tstamp_stopped; + if (!is_software_event(event)) cpuctx->active_oncpu++; if (!ctx->nr_active++) @@@ -3212,6 -3212,59 +3212,59 @@@ static inline u64 perf_event_count(stru return __perf_event_count(event); }
+ /* + * NMI-safe method to read a local event, that is an event that + * is: + * - either for the current task, or for this CPU + * - does not have inherit set, for inherited task events + * will not be local and we cannot read them atomically + * - must not have a pmu::count method + */ + u64 perf_event_read_local(struct perf_event *event) + { + unsigned long flags; + u64 val; + + /* + * Disabling interrupts avoids all counter scheduling (context + * switches, timer based rotation and IPIs). + */ + local_irq_save(flags); + + /* If this is a per-task event, it must be for current */ + WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && + event->hw.target != current); + + /* If this is a per-CPU event, it must be for this CPU */ + WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && + event->cpu != smp_processor_id()); + + /* + * It must not be an event with inherit set, we cannot read + * all child counters from atomic context. + */ + WARN_ON_ONCE(event->attr.inherit); + + /* + * It must not have a pmu::count method, those are not + * NMI safe. + */ + WARN_ON_ONCE(event->pmu->count); + + /* + * If the event is currently on this CPU, its either a per-task event, + * or local to this CPU. Furthermore it means its ACTIVE (otherwise + * oncpu == -1). + */ + if (event->oncpu == smp_processor_id()) + event->pmu->read(event); + + val = local64_read(&event->count); + local_irq_restore(flags); + + return val; + } + static u64 perf_event_read(struct perf_event *event) { /* @@@ -3958,21 -4011,28 +4011,21 @@@ static void perf_event_for_each(struct perf_event_for_each_child(sibling, func); }
-static int perf_event_period(struct perf_event *event, u64 __user *arg) -{ - struct perf_event_context *ctx = event->ctx; - int ret = 0, active; +struct period_event { + struct perf_event *event; u64 value; +};
- if (!is_sampling_event(event)) - return -EINVAL; - - if (copy_from_user(&value, arg, sizeof(value))) - return -EFAULT; - - if (!value) - return -EINVAL; +static int __perf_event_period(void *info) +{ + struct period_event *pe = info; + struct perf_event *event = pe->event; + struct perf_event_context *ctx = event->ctx; + u64 value = pe->value; + bool active;
- raw_spin_lock_irq(&ctx->lock); + raw_spin_lock(&ctx->lock); if (event->attr.freq) { - if (value > sysctl_perf_event_sample_rate) { - ret = -EINVAL; - goto unlock; - } - event->attr.sample_freq = value; } else { event->attr.sample_period = value; @@@ -3991,53 -4051,11 +4044,53 @@@ event->pmu->start(event, PERF_EF_RELOAD); perf_pmu_enable(ctx->pmu); } + raw_spin_unlock(&ctx->lock);
-unlock: + return 0; +} + +static int perf_event_period(struct perf_event *event, u64 __user *arg) +{ + struct period_event pe = { .event = event, }; + struct perf_event_context *ctx = event->ctx; + struct task_struct *task; + u64 value; + + if (!is_sampling_event(event)) + return -EINVAL; + + if (copy_from_user(&value, arg, sizeof(value))) + return -EFAULT; + + if (!value) + return -EINVAL; + + if (event->attr.freq && value > sysctl_perf_event_sample_rate) + return -EINVAL; + + task = ctx->task; + pe.value = value; + + if (!task) { + cpu_function_call(event->cpu, __perf_event_period, &pe); + return 0; + } + +retry: + if (!task_function_call(task, __perf_event_period, &pe)) + return 0; + + raw_spin_lock_irq(&ctx->lock); + if (ctx->is_active) { + raw_spin_unlock_irq(&ctx->lock); + task = ctx->task; + goto retry; + } + + __perf_event_period(&pe); raw_spin_unlock_irq(&ctx->lock);
- return ret; + return 0; }
static const struct file_operations perf_fops; @@@ -4775,20 -4793,12 +4828,20 @@@ static const struct file_operations per * to user-space before waking everybody up. */
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event) +{ + /* only the parent has fasync state */ + if (event->parent) + event = event->parent; + return &event->fasync; +} + void perf_event_wakeup(struct perf_event *event) { ring_buffer_wakeup(event);
if (event->pending_kill) { - kill_fasync(&event->fasync, SIGIO, event->pending_kill); + kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); event->pending_kill = 0; } } @@@ -6167,7 -6177,7 +6220,7 @@@ static int __perf_event_overflow(struc else perf_event_output(event, data, regs);
- if (event->fasync && event->pending_kill) { + if (*perf_event_fasync(event) && event->pending_kill) { event->pending_wakeup = 1; irq_work_queue(&event->pending); } @@@ -8617,6 -8627,31 +8670,31 @@@ void perf_event_delayed_put(struct task WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); }
+ struct perf_event *perf_event_get(unsigned int fd) + { + int err; + struct fd f; + struct perf_event *event; + + err = perf_fget_light(fd, &f); + if (err) + return ERR_PTR(err); + + event = f.file->private_data; + atomic_long_inc(&event->refcount); + fdput(f); + + return event; + } + + const struct perf_event_attr *perf_event_attrs(struct perf_event *event) + { + if (!event) + return ERR_PTR(-EINVAL); + + return &event->attr; + } + /* * inherit a event from parent task to child task: */ diff --combined lib/Kconfig index f6aa03d,278890d..5b1a994 --- a/lib/Kconfig +++ b/lib/Kconfig @@@ -53,6 -53,9 +53,6 @@@ config GENERIC_I config STMP_DEVICE bool
-config PERCPU_RWSEM - bool - config ARCH_USE_CMPXCHG_LOCKREF bool
@@@ -457,16 -460,6 +457,6 @@@ config ARCH_HAS_ATOMIC64_DEC_IF_POSITIV config LRU_CACHE tristate
- config AVERAGE - bool "Averaging functions" - help - This option is provided for the case where no in-kernel-tree - modules require averaging functions, but a module built outside - the kernel tree does. Such modules that use library averaging - functions require Y here. - - If unsure, say N. - config CLZ_TAB bool
diff --combined net/batman-adv/translation-table.c index 5809b39,db06de2..c1eb7b7 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@@ -19,6 -19,7 +19,7 @@@ #include "main.h"
#include <linux/atomic.h> + #include <linux/bitops.h> #include <linux/bug.h> #include <linux/byteorder/generic.h> #include <linux/compiler.h> @@@ -595,11 -596,8 +596,11 @@@ bool batadv_tt_local_add(struct net_dev /* increase the refcounter of the related vlan */ vlan = batadv_softif_vlan_get(bat_priv, vid); if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d", - addr, BATADV_PRINT_VID(vid))) + addr, BATADV_PRINT_VID(vid))) { + kfree(tt_local); + tt_local = NULL; goto out; + }
batadv_dbg(BATADV_DBG_TT, bat_priv, "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", @@@ -1882,7 -1880,7 +1883,7 @@@ void batadv_tt_global_del_orig(struct b } spin_unlock_bh(list_lock); } - orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT; + clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized); }
static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global, @@@ -2215,7 -2213,7 +2216,7 @@@ static void batadv_tt_req_list_free(str spin_lock_bh(&bat_priv->tt.req_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { - list_del(&node->list); + list_del_init(&node->list); kfree(node); }
@@@ -2251,7 -2249,7 +2252,7 @@@ static void batadv_tt_req_purge(struct list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { if (batadv_has_timed_out(node->issued_at, BATADV_TT_REQUEST_TIMEOUT)) { - list_del(&node->list); + list_del_init(&node->list); kfree(node); } } @@@ -2533,7 -2531,8 +2534,8 @@@ out batadv_hardif_free_ref(primary_if); if (ret && tt_req_node) { spin_lock_bh(&bat_priv->tt.req_list_lock); - list_del(&tt_req_node->list); + /* list_del_init() verifies tt_req_node still is in the list */ + list_del_init(&tt_req_node->list); spin_unlock_bh(&bat_priv->tt.req_list_lock); kfree(tt_req_node); } @@@ -2841,7 -2840,7 +2843,7 @@@ static void _batadv_tt_update_changes(s return; } } - orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT; + set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized); }
static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv, @@@ -2970,7 -2969,7 +2972,7 @@@ static void batadv_handle_tt_response(s list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) { if (!batadv_compare_eth(node->addr, resp_src)) continue; - list_del(&node->list); + list_del_init(&node->list); kfree(node); }
@@@ -3343,7 -3342,8 +3345,8 @@@ static void batadv_tt_update_orig(struc bool has_tt_init;
tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff; - has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT; + has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT, + &orig_node->capa_initialized);
/* orig table not initialised AND first diff is in the OGM OR the ttvn * increased by one -> we can apply the attached changes diff --combined net/bridge/br_multicast.c index 1285eaf,0752796..66efdc2 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@@ -283,6 -283,8 +283,8 @@@ static void br_multicast_del_pg(struct rcu_assign_pointer(*pp, p->next); hlist_del_init(&p->mglist); del_timer(&p->timer); + br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB, + p->state); call_rcu_bh(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->mglist && @@@ -704,7 -706,7 +706,7 @@@ static int br_multicast_add_group(struc if (unlikely(!p)) goto err; rcu_assign_pointer(*pp, p); - br_mdb_notify(br->dev, port, group, RTM_NEWMDB); + br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
found: mod_timer(&p->timer, now + br->multicast_membership_interval); @@@ -764,6 -766,7 +766,7 @@@ static void br_multicast_router_expired goto out;
hlist_del_init_rcu(&port->rlist); + br_rtr_notify(br->dev, port, RTM_DELMDB);
out: spin_unlock(&br->multicast_lock); @@@ -924,6 -927,15 +927,15 @@@ void br_multicast_add_port(struct net_b
void br_multicast_del_port(struct net_bridge_port *port) { + struct net_bridge *br = port->br; + struct net_bridge_port_group *pg; + struct hlist_node *n; + + /* Take care of the remaining groups, only perm ones should be left */ + spin_lock_bh(&br->multicast_lock); + hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) + br_multicast_del_pg(br, pg); + spin_unlock_bh(&br->multicast_lock); del_timer_sync(&port->multicast_router_timer); }
@@@ -963,10 -975,13 +975,13 @@@ void br_multicast_disable_port(struct n
spin_lock(&br->multicast_lock); hlist_for_each_entry_safe(pg, n, &port->mglist, mglist) - br_multicast_del_pg(br, pg); + if (pg->state == MDB_TEMPORARY) + br_multicast_del_pg(br, pg);
- if (!hlist_unhashed(&port->rlist)) + if (!hlist_unhashed(&port->rlist)) { hlist_del_init_rcu(&port->rlist); + br_rtr_notify(br->dev, port, RTM_DELMDB); + } del_timer(&port->multicast_router_timer); del_timer(&port->ip4_own_query.timer); #if IS_ENABLED(CONFIG_IPV6) @@@ -1204,6 -1219,7 +1219,7 @@@ static void br_multicast_add_router(str hlist_add_behind_rcu(&port->rlist, slot); else hlist_add_head_rcu(&port->rlist, &br->router_list); + br_rtr_notify(br->dev, port, RTM_NEWMDB); }
static void br_multicast_mark_router(struct net_bridge *br, @@@ -1437,7 -1453,8 +1453,8 @@@ br_multicast_leave_group(struct net_bri hlist_del_init(&p->mglist); del_timer(&p->timer); call_rcu_bh(&p->rcu, br_multicast_free_pg); - br_mdb_notify(br->dev, port, group, RTM_DELMDB); + br_mdb_notify(br->dev, port, group, RTM_DELMDB, + p->state);
if (!mp->ports && !mp->mglist && netif_running(br->dev)) @@@ -1591,7 -1608,7 +1608,7 @@@ static int br_multicast_ipv4_rcv(struc break; }
- if (skb_trimmed) + if (skb_trimmed && skb_trimmed != skb) kfree_skb(skb_trimmed);
return err; @@@ -1636,7 -1653,7 +1653,7 @@@ static int br_multicast_ipv6_rcv(struc break; }
- if (skb_trimmed) + if (skb_trimmed && skb_trimmed != skb) kfree_skb(skb_trimmed);
return err; @@@ -1754,12 -1771,6 +1771,6 @@@ void br_multicast_open(struct net_bridg
void br_multicast_stop(struct net_bridge *br) { - struct net_bridge_mdb_htable *mdb; - struct net_bridge_mdb_entry *mp; - struct hlist_node *n; - u32 ver; - int i; - del_timer_sync(&br->multicast_router_timer); del_timer_sync(&br->ip4_other_query.timer); del_timer_sync(&br->ip4_own_query.timer); @@@ -1767,6 -1778,15 +1778,15 @@@ del_timer_sync(&br->ip6_other_query.timer); del_timer_sync(&br->ip6_own_query.timer); #endif + } + + void br_multicast_dev_del(struct net_bridge *br) + { + struct net_bridge_mdb_htable *mdb; + struct net_bridge_mdb_entry *mp; + struct hlist_node *n; + u32 ver; + int i;
spin_lock_bh(&br->multicast_lock); mdb = mlock_dereference(br->mdb, br); @@@ -1834,8 -1854,10 +1854,10 @@@ int br_multicast_set_port_router(struc p->multicast_router = val; err = 0;
- if (val < 2 && !hlist_unhashed(&p->rlist)) + if (val < 2 && !hlist_unhashed(&p->rlist)) { hlist_del_init_rcu(&p->rlist); + br_rtr_notify(br->dev, p, RTM_DELMDB); + }
if (val == 1) break; diff --combined net/ipv4/fib_trie.c index b0c6258,1243c79..5154f81 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@@ -1423,8 -1423,11 +1423,11 @@@ found nh->nh_flags & RTNH_F_LINKDOWN && !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) continue; - if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif) - continue; + if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) { + if (flp->flowi4_oif && + flp->flowi4_oif != nh->nh_oif) + continue; + }
if (!(fib_flags & FIB_LOOKUP_NOREF)) atomic_inc(&fi->fib_clntref); @@@ -2465,7 -2468,7 +2468,7 @@@ static struct key_vector *fib_route_get key = l->key + 1; iter->pos++;
- if (pos-- <= 0) + if (--pos <= 0) break;
l = NULL; diff --combined net/ipv6/ip6_fib.c index 548c623,5693b5e..20a1b85 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@@ -32,6 -32,7 +32,7 @@@ #include <net/ipv6.h> #include <net/ndisc.h> #include <net/addrconf.h> + #include <net/lwtunnel.h>
#include <net/ip6_fib.h> #include <net/ip6_route.h> @@@ -172,13 -173,12 +173,14 @@@ static void rt6_free_pcpu(struct rt6_in *ppcpu_rt = NULL; } } + + non_pcpu_rt->rt6i_pcpu = NULL; }
static void rt6_release(struct rt6_info *rt) { if (atomic_dec_and_test(&rt->rt6i_ref)) { + lwtstate_put(rt->rt6i_lwtstate); rt6_free_pcpu(rt); dst_free(&rt->dst); } diff --combined net/ipv6/route.c index d155864,c373304..8c985a1 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -58,6 -58,7 +58,7 @@@ #include <net/netevent.h> #include <net/netlink.h> #include <net/nexthop.h> + #include <net/lwtunnel.h>
#include <asm/uaccess.h>
@@@ -318,7 -319,8 +319,7 @@@ static const struct rt6_info ip6_blk_ho /* allocate dst with ip6_dst_ops */ static struct rt6_info *__ip6_dst_alloc(struct net *net, struct net_device *dev, - int flags, - struct fib6_table *table) + int flags) { struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0, DST_OBSOLETE_FORCE_CHK, flags); @@@ -335,9 -337,10 +336,9 @@@
static struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, - int flags, - struct fib6_table *table) + int flags) { - struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table); + struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
if (rt) { rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); @@@ -542,6 -545,7 +543,7 @@@ static void rt6_probe_deferred(struct w
static void rt6_probe(struct rt6_info *rt) { + struct __rt6_probe_work *work; struct neighbour *neigh; /* * Okay, this does not seem to be appropriate @@@ -556,34 -560,33 +558,33 @@@ rcu_read_lock_bh(); neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); if (neigh) { - write_lock(&neigh->lock); if (neigh->nud_state & NUD_VALID) goto out; - } - - if (!neigh || - time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { - struct __rt6_probe_work *work;
+ work = NULL; + write_lock(&neigh->lock); + if (!(neigh->nud_state & NUD_VALID) && + time_after(jiffies, + neigh->updated + + rt->rt6i_idev->cnf.rtr_probe_interval)) { + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (work) + __neigh_set_probe_once(neigh); + } + write_unlock(&neigh->lock); + } else { work = kmalloc(sizeof(*work), GFP_ATOMIC); + }
- if (neigh && work) - __neigh_set_probe_once(neigh); - - if (neigh) - write_unlock(&neigh->lock); + if (work) { + INIT_WORK(&work->work, rt6_probe_deferred); + work->target = rt->rt6i_gateway; + dev_hold(rt->dst.dev); + work->dev = rt->dst.dev; + schedule_work(&work->work); + }
- if (work) { - INIT_WORK(&work->work, rt6_probe_deferred); - work->target = rt->rt6i_gateway; - dev_hold(rt->dst.dev); - work->dev = rt->dst.dev; - schedule_work(&work->work); - } - } else { out: - write_unlock(&neigh->lock); - } rcu_read_unlock_bh(); } #else @@@ -662,6 -665,12 +663,12 @@@ static struct rt6_info *find_match(stru { int m; bool match_do_rr = false; + struct inet6_dev *idev = rt->rt6i_idev; + struct net_device *dev = rt->dst.dev; + + if (dev && !netif_carrier_ok(dev) && + idev->cnf.ignore_routes_with_linkdown) + goto out;
if (rt6_check_expired(rt)) goto out; @@@ -948,7 -957,8 +955,7 @@@ static struct rt6_info *ip6_rt_cache_al if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) ort = (struct rt6_info *)ort->dst.from;
- rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, - 0, ort->rt6i_table); + rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
if (!rt) return NULL; @@@ -980,7 -990,8 +987,7 @@@ static struct rt6_info *ip6_rt_pcpu_all struct rt6_info *pcpu_rt;
pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), - rt->dst.dev, rt->dst.flags, - rt->rt6i_table); + rt->dst.dev, rt->dst.flags);
if (!pcpu_rt) return NULL; @@@ -993,53 -1004,32 +1000,53 @@@ /* It should be called with read_lock_bh(&tb6_lock) acquired */ static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) { - struct rt6_info *pcpu_rt, *prev, **p; + struct rt6_info *pcpu_rt, **p;
p = this_cpu_ptr(rt->rt6i_pcpu); pcpu_rt = *p;
- if (pcpu_rt) - goto done; + if (pcpu_rt) { + dst_hold(&pcpu_rt->dst); + rt6_dst_from_metrics_check(pcpu_rt); + } + return pcpu_rt; +} + +static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) +{ + struct fib6_table *table = rt->rt6i_table; + struct rt6_info *pcpu_rt, *prev, **p;
pcpu_rt = ip6_rt_pcpu_alloc(rt); if (!pcpu_rt) { struct net *net = dev_net(rt->dst.dev);
- pcpu_rt = net->ipv6.ip6_null_entry; - goto done; + dst_hold(&net->ipv6.ip6_null_entry->dst); + return net->ipv6.ip6_null_entry; }
- prev = cmpxchg(p, NULL, pcpu_rt); - if (prev) { - /* If someone did it before us, return prev instead */ + read_lock_bh(&table->tb6_lock); + if (rt->rt6i_pcpu) { + p = this_cpu_ptr(rt->rt6i_pcpu); + prev = cmpxchg(p, NULL, pcpu_rt); + if (prev) { + /* If someone did it before us, return prev instead */ + dst_destroy(&pcpu_rt->dst); + pcpu_rt = prev; + } + } else { + /* rt has been removed from the fib6 tree + * before we have a chance to acquire the read_lock. + * In this case, don't brother to create a pcpu rt + * since rt is going away anyway. The next + * dst_check() will trigger a re-lookup. + */ dst_destroy(&pcpu_rt->dst); - pcpu_rt = prev; + pcpu_rt = rt; } - -done: dst_hold(&pcpu_rt->dst); rt6_dst_from_metrics_check(pcpu_rt); + read_unlock_bh(&table->tb6_lock); return pcpu_rt; }
@@@ -1114,22 -1104,9 +1121,22 @@@ redo_rt6_select rt->dst.lastuse = jiffies; rt->dst.__use++; pcpu_rt = rt6_get_pcpu_route(rt); - read_unlock_bh(&table->tb6_lock); + + if (pcpu_rt) { + read_unlock_bh(&table->tb6_lock); + } else { + /* We have to do the read_unlock first + * because rt6_make_pcpu_route() may trigger + * ip6_dst_gc() which will take the write_lock. + */ + dst_hold(&rt->dst); + read_unlock_bh(&table->tb6_lock); + pcpu_rt = rt6_make_pcpu_route(rt); + dst_release(&rt->dst); + }
return pcpu_rt; + } }
@@@ -1585,7 -1562,7 +1592,7 @@@ struct dst_entry *icmp6_dst_alloc(struc if (unlikely(!idev)) return ERR_PTR(-ENODEV);
- rt = ip6_dst_alloc(net, dev, 0, NULL); + rt = ip6_dst_alloc(net, dev, 0); if (unlikely(!rt)) { in6_dev_put(idev); dst = ERR_PTR(-ENOMEM); @@@ -1772,8 -1749,7 +1779,8 @@@ int ip6_route_add(struct fib6_config *c if (!table) goto out;
- rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); + rt = ip6_dst_alloc(net, NULL, + (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
if (!rt) { err = -ENOMEM; @@@ -1801,6 -1777,24 +1808,24 @@@
rt->dst.output = ip6_output;
+ if (cfg->fc_encap) { + struct lwtunnel_state *lwtstate; + + err = lwtunnel_build_state(dev, cfg->fc_encap_type, + cfg->fc_encap, &lwtstate); + if (err) + goto out; + rt->rt6i_lwtstate = lwtstate_get(lwtstate); + if (lwtunnel_output_redirect(rt->rt6i_lwtstate)) { + rt->rt6i_lwtstate->orig_output = rt->dst.output; + rt->dst.output = lwtunnel_output6; + } + if (lwtunnel_input_redirect(rt->rt6i_lwtstate)) { + rt->rt6i_lwtstate->orig_input = rt->dst.input; + rt->dst.input = lwtunnel_input6; + } + } + ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); rt->rt6i_dst.plen = cfg->fc_dst_len; if (rt->rt6i_dst.plen == 128) @@@ -2180,6 -2174,7 +2205,7 @@@ static void ip6_rt_copy_init(struct rt6 #endif rt->rt6i_prefsrc = ort->rt6i_prefsrc; rt->rt6i_table = ort->rt6i_table; + rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate); }
#ifdef CONFIG_IPV6_ROUTE_INFO @@@ -2430,7 -2425,7 +2456,7 @@@ struct rt6_info *addrconf_dst_alloc(str { struct net *net = dev_net(idev->dev); struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, - DST_NOCOUNT, NULL); + DST_NOCOUNT); if (!rt) return ERR_PTR(-ENOMEM);
@@@ -2628,6 -2623,8 +2654,8 @@@ static const struct nla_policy rtm_ipv6 [RTA_METRICS] = { .type = NLA_NESTED }, [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, [RTA_PREF] = { .type = NLA_U8 }, + [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, + [RTA_ENCAP] = { .type = NLA_NESTED }, };
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, @@@ -2722,6 -2719,12 +2750,12 @@@ cfg->fc_flags |= RTF_PREF(pref); }
+ if (tb[RTA_ENCAP]) + cfg->fc_encap = tb[RTA_ENCAP]; + + if (tb[RTA_ENCAP_TYPE]) + cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); + err = 0; errout: return err; @@@ -2754,6 -2757,10 +2788,10 @@@ beginning r_cfg.fc_gateway = nla_get_in6_addr(nla); r_cfg.fc_flags |= RTF_GATEWAY; } + r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); + nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); + if (nla) + r_cfg.fc_encap_type = nla_get_u16(nla); } err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg); if (err) { @@@ -2816,7 -2823,7 +2854,7 @@@ static int inet6_rtm_newroute(struct sk return ip6_route_add(&cfg); }
- static inline size_t rt6_nlmsg_size(void) + static inline size_t rt6_nlmsg_size(struct rt6_info *rt) { return NLMSG_ALIGN(sizeof(struct rtmsg)) + nla_total_size(16) /* RTA_SRC */ @@@ -2830,7 -2837,8 +2868,8 @@@ + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ + nla_total_size(sizeof(struct rta_cacheinfo)) + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ - + nla_total_size(1); /* RTA_PREF */ + + nla_total_size(1) /* RTA_PREF */ + + lwtunnel_get_encap_size(rt->rt6i_lwtstate); }
static int rt6_fill_node(struct net *net, @@@ -2891,6 -2899,11 +2930,11 @@@ else rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; + if (!netif_carrier_ok(rt->dst.dev)) { + rtm->rtm_flags |= RTNH_F_LINKDOWN; + if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) + rtm->rtm_flags |= RTNH_F_DEAD; + } rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->rt6i_protocol; if (rt->rt6i_flags & RTF_DYNAMIC) @@@ -2978,6 -2991,8 +3022,8 @@@ if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) goto nla_put_failure;
+ lwtunnel_fill_encap(skb, rt->rt6i_lwtstate); + nlmsg_end(skb, nlh); return 0;
@@@ -3104,7 -3119,7 +3150,7 @@@ void inet6_rt_notify(int event, struct err = -ENOBUFS; seq = info->nlh ? info->nlh->nlmsg_seq : 0;
- skb = nlmsg_new(rt6_nlmsg_size(), gfp_any()); + skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); if (!skb) goto errout;
linux-merge@lists.open-mesh.org