The following commit has been merged in the master branch: commit 37f12c67d64f17245cf11c03cf7fa18f2d77f175 Merge: bf3bb0a906504ca5fc830e6cb44878cc40333073 5737f6c92681939e417579b421f81f035e57c582 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Sep 21 11:30:45 2016 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS index 89b4958,ce80b36..2eb7242 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -636,6 -636,15 +636,15 @@@ F: drivers/tty/serial/altera_jtaguart. F: include/linux/altera_uart.h F: include/linux/altera_jtaguart.h
+ AMAZON ETHERNET DRIVERS + M: Netanel Belgazal netanel@annapurnalabs.com + R: Saeed Bishara saeed@annapurnalabs.com + R: Zorik Machulsky zorik@annapurnalabs.com + L: netdev@vger.kernel.org + S: Supported + F: Documentation/networking/ena.txt + F: drivers/net/ethernet/amazon/ + AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER M: Tom Lendacky thomas.lendacky@amd.com M: Gary Hook gary.hook@amd.com @@@ -857,13 -866,6 +866,13 @@@ F: drivers/net/phy/mdio-xgene. F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
+APPLIED MICRO (APM) X-GENE SOC PMU +M: Tai Nguyen ttnguyen@apm.com +S: Supported +F: drivers/perf/xgene_pmu.c +F: Documentation/perf/xgene-pmu.txt +F: Documentation/devicetree/bindings/perf/apm-xgene-pmu.txt + APTINA CAMERA SENSOR PLL M: Laurent Pinchart Laurent.pinchart@ideasonboard.com L: linux-media@vger.kernel.org @@@ -920,17 -922,15 +929,17 @@@ F: arch/arm/include/asm/floppy.
ARM PMU PROFILING AND DEBUGGING M: Will Deacon will.deacon@arm.com -R: Mark Rutland mark.rutland@arm.com +M: Mark Rutland mark.rutland@arm.com S: Maintained +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) F: arch/arm*/kernel/perf_* F: arch/arm/oprofile/common.c F: arch/arm*/kernel/hw_breakpoint.c F: arch/arm*/include/asm/hw_breakpoint.h F: arch/arm*/include/asm/perf_event.h -F: drivers/perf/arm_pmu.c +F: drivers/perf/* F: include/linux/perf/arm_pmu.h +F: Documentation/devicetree/bindings/arm/pmu.txt
ARM PORT M: Russell King linux@armlinux.org.uk @@@ -1001,7 -1001,6 +1010,7 @@@ M: Chen-Yu Tsai <wens@csie.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: sun[x456789]i +F: arch/arm/boot/dts/ntc-gr8*
ARM/Allwinner SoC Clock Support M: Emilio López emilio@elopez.com.ar @@@ -1635,7 -1634,6 +1644,7 @@@ N: rockchi ARM/SAMSUNG EXYNOS ARM ARCHITECTURES M: Kukjin Kim kgene@kernel.org M: Krzysztof Kozlowski krzk@kernel.org +R: Javier Martinez Canillas javier@osg.samsung.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained @@@ -1687,6 -1685,14 +1696,6 @@@ S: Maintaine F: arch/arm/plat-samsung/s5p-dev-mfc.c F: drivers/media/platform/s5p-mfc/
-ARM/SAMSUNG S5P SERIES TV SUBSYSTEM SUPPORT -M: Kyungmin Park kyungmin.park@samsung.com -M: Tomasz Stanislawski t.stanislaws@samsung.com -L: linux-arm-kernel@lists.infradead.org -L: linux-media@vger.kernel.org -S: Maintained -F: drivers/media/platform/s5p-tv/ - ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT M: Kyungmin Park kyungmin.park@samsung.com L: linux-arm-kernel@lists.infradead.org @@@ -1844,7 -1850,6 +1853,7 @@@ F: arch/arm64/boot/dts/socionext F: drivers/bus/uniphier-system-bus.c F: drivers/i2c/busses/i2c-uniphier* F: drivers/pinctrl/uniphier/ +F: drivers/reset/reset-uniphier.c F: drivers/tty/serial/8250/8250_uniphier.c N: uniphier
@@@ -2226,9 -2231,9 +2235,9 @@@ S: Maintaine F: drivers/net/wireless/atmel/atmel*
ATMEL MAXTOUCH DRIVER -M: Nick Dyer nick.dyer@itdev.co.uk -T: git git://github.com/atmel-maxtouch/linux.git -S: Supported +M: Nick Dyer nick@shmanahar.org +T: git git://github.com/ndyer/linux.git +S: Maintained F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt F: drivers/input/touchscreen/atmel_mxt_ts.c F: include/linux/platform_data/atmel_mxt_ts.h @@@ -2579,13 -2584,6 +2588,13 @@@ F: arch/arm/mach-bcm/bcm_5301x. F: arch/arm/boot/dts/bcm5301x*.dtsi F: arch/arm/boot/dts/bcm470*
+BROADCOM BCM53573 ARM ARCHITECTURE +M: Rafał Miłecki rafal@milecki.pl +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: arch/arm/boot/dts/bcm53573* +F: arch/arm/boot/dts/bcm47189* + BROADCOM BCM63XX ARM ARCHITECTURE M: Florian Fainelli f.fainelli@gmail.com M: bcm-kernel-feedback-list@broadcom.com @@@ -2785,7 -2783,7 +2794,7 @@@ L: linux-media@vger.kernel.or W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes -F: Documentation/video4linux/bttv/ +F: Documentation/media/v4l-drivers/bttv* F: drivers/media/pci/bt8xx/bttv*
BUSLOGIC SCSI DRIVER @@@ -2830,7 -2828,7 +2839,7 @@@ M: Jonathan Corbet <corbet@lwn.net L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained -F: Documentation/video4linux/cafe_ccic +F: Documentation/media/v4l-drivers/cafe_ccic* F: drivers/media/platform/marvell-ccic/
CAIF NETWORK LAYER @@@ -2899,14 -2897,6 +2908,14 @@@ S: Maintaine F: drivers/iio/light/cm* F: Documentation/devicetree/bindings/i2c/trivial-devices.txt
+CAVIUM I2C DRIVER +M: Jan Glauber jglauber@cavium.com +M: David Daney david.daney@cavium.com +W: http://www.cavium.com +S: Supported +F: drivers/i2c/busses/i2c-octeon* +F: drivers/i2c/busses/i2c-thunderx* + CAVIUM LIQUIDIO NETWORK DRIVER M: Derek Chickles derek.chickles@caviumnetworks.com M: Satanand Burla satananda.burla@caviumnetworks.com @@@ -2932,7 -2922,7 +2941,7 @@@ T: git git://linuxtv.org/media_tree.gi W: http://linuxtv.org S: Supported F: Documentation/cec.txt -F: Documentation/DocBook/media/v4l/cec* +F: Documentation/media/uapi/cec F: drivers/staging/media/cec/ F: drivers/media/cec-edid.c F: drivers/media/rc/keymaps/rc-cec.c @@@ -3154,7 -3144,7 +3163,7 @@@ L: cocci@systeme.lip6.fr (moderated fo T: git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git misc W: http://coccinelle.lip6.fr/ S: Supported -F: Documentation/coccinelle.txt +F: Documentation/dev-tools/coccinelle.rst F: scripts/coccinelle/ F: scripts/coccicheck
@@@ -3300,7 -3290,6 +3309,7 @@@ L: linux-pm@vger.kernel.or S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates) +F: Documentation/cpu-freq/ F: drivers/cpufreq/ F: include/linux/cpufreq.h
@@@ -3419,7 -3408,7 +3428,7 @@@ T: git git://linuxtv.org/media_tree.gi W: https://linuxtv.org W: http://www.ivtvdriver.org/index.php/Cx18 S: Maintained -F: Documentation/video4linux/cx18.txt +F: Documentation/media/v4l-drivers/cx18* F: drivers/media/pci/cx18/ F: include/uapi/linux/ivtv*
@@@ -3448,7 -3437,7 +3457,7 @@@ L: linux-media@vger.kernel.or W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes -F: Documentation/video4linux/cx88/ +F: Documentation/media/v4l-drivers/cx88* F: drivers/media/pci/cx88/
CXD2820R MEDIA DRIVER @@@ -3512,14 -3501,14 +3521,14 @@@ F: drivers/net/ethernet/chelsio/cxgb4vf
CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER M: Ian Munsie imunsie@au1.ibm.com -M: Michael Neuling mikey@neuling.org +M: Frederic Barrat fbarrat@linux.vnet.ibm.com L: linuxppc-dev@lists.ozlabs.org S: Supported +F: arch/powerpc/platforms/powernv/pci-cxl.c F: drivers/misc/cxl/ F: include/misc/cxl* F: include/uapi/misc/cxl.h F: Documentation/powerpc/cxl.txt -F: Documentation/powerpc/cxl.txt F: Documentation/ABI/testing/sysfs-class-cxl
CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER @@@ -3921,7 -3910,7 +3930,7 @@@ X: Documentation/devicetree X: Documentation/acpi X: Documentation/power X: Documentation/spi -X: Documentation/DocBook/media +X: Documentation/media T: git git://git.lwn.net/linux.git docs-next
DOUBLETALK DRIVER @@@ -4622,7 -4611,6 +4631,7 @@@ W: https://linuxtv.or T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/usb/em28xx/ +F: Documentation/media/v4l-drivers/em28xx*
EMBEDDED LINUX M: Paul Gortmaker paul.gortmaker@windriver.com @@@ -5091,9 -5079,10 +5100,9 @@@ F: include/linux/fscrypto.
F2FS FILE SYSTEM M: Jaegeuk Kim jaegeuk@kernel.org -M: Changman Lee cm224.lee@samsung.com -R: Chao Yu yuchao0@huawei.com +M: Chao Yu yuchao0@huawei.com L: linux-f2fs-devel@lists.sourceforge.net -W: http://en.wikipedia.org/wiki/F2FS +W: https://f2fs.wiki.kernel.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git S: Maintained F: Documentation/filesystems/f2fs.txt @@@ -5155,7 -5144,7 +5164,7 @@@ GCOV BASED KERNEL PROFILIN M: Peter Oberparleiter oberpar@linux.vnet.ibm.com S: Maintained F: kernel/gcov/ -F: Documentation/gcov.txt +F: Documentation/dev-tools/gcov.rst
GDT SCSI DISK ARRAY CONTROLLER DRIVER M: Achim Leubner achim_leubner@adaptec.com @@@ -5303,13 -5292,6 +5312,13 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/ethernet/aeroflex/
+GS1662 VIDEO SERIALIZER +M: Charles-Antoine Couret charles-antoine.couret@nexvision.fr +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/spi/gs1662.c + GSPCA FINEPIX SUBDRIVER M: Frank Zago frank@zago.net L: linux-media@vger.kernel.org @@@ -5650,7 -5632,7 +5659,7 @@@ M: Sebastian Reichel <sre@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/sre/linux-hsi.git S: Maintained F: Documentation/ABI/testing/sysfs-bus-hsi -F: Documentation/hsi.txt +F: Documentation/device-drivers/serial-interfaces.rst F: drivers/hsi/ F: include/linux/hsi/ F: include/uapi/linux/hsi/ @@@ -5678,14 -5660,6 +5687,14 @@@ M: Nadia Yvette Chambers <nyc@holomorph S: Maintained F: fs/hugetlbfs/
+HVA ST MEDIA DRIVER +M: Jean-Christophe Trotin jean-christophe.trotin@st.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +W: https://linuxtv.org +S: Supported +F: drivers/media/platform/sti/hva + Hyper-V CORE AND DRIVERS M: "K. Y. Srinivasan" kys@microsoft.com M: Haiyang Zhang haiyangz@microsoft.com @@@ -5712,8 -5686,6 +5721,8 @@@ S: Maintaine F: Documentation/i2c/i2c-topology F: Documentation/i2c/muxes/ F: Documentation/devicetree/bindings/i2c/i2c-mux* +F: Documentation/devicetree/bindings/i2c/i2c-arb* +F: Documentation/devicetree/bindings/i2c/i2c-gate* F: drivers/i2c/i2c-mux.c F: drivers/i2c/muxes/ F: include/linux/i2c-mux.h @@@ -6131,13 -6103,6 +6140,13 @@@ T: git git://git.kernel.org/pub/scm/lin S: Supported F: drivers/idle/intel_idle.c
+INTEL INTEGRATED SENSOR HUB DRIVER +M: Srinivas Pandruvada srinivas.pandruvada@linux.intel.com +M: Jiri Kosina jikos@kernel.org +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/intel-ish-hid/ + INTEL PSTATE DRIVER M: Srinivas Pandruvada srinivas.pandruvada@linux.intel.com M: Len Brown lenb@kernel.org @@@ -6146,7 -6111,7 +6155,7 @@@ S: Supporte F: drivers/cpufreq/intel_pstate.c
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) -M: Maik Broemme mbroemme@plusserver.de +M: Maik Broemme mbroemme@libmpq.org L: linux-fbdev@vger.kernel.org S: Maintained F: Documentation/fb/intelfb.txt @@@ -6564,7 -6529,7 +6573,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git W: http://www.ivtvdriver.org S: Maintained -F: Documentation/video4linux/*.ivtv +F: Documentation/media/v4l-drivers/ivtv* F: drivers/media/pci/ivtv/ F: include/uapi/linux/ivtv*
@@@ -6648,7 -6613,7 +6657,7 @@@ L: kasan-dev@googlegroups.co S: Maintained F: arch/*/include/asm/kasan.h F: arch/*/mm/kasan_init* -F: Documentation/kasan.txt +F: Documentation/dev-tools/kasan.rst F: include/linux/kasan*.h F: lib/test_kasan.c F: mm/kasan/ @@@ -6864,7 -6829,7 +6873,7 @@@ KMEMCHEC M: Vegard Nossum vegardno@ifi.uio.no M: Pekka Enberg penberg@kernel.org S: Maintained -F: Documentation/kmemcheck.txt +F: Documentation/dev-tools/kmemcheck.rst F: arch/x86/include/asm/kmemcheck.h F: arch/x86/mm/kmemcheck/ F: include/linux/kmemcheck.h @@@ -6873,7 -6838,7 +6882,7 @@@ F: mm/kmemcheck. KMEMLEAK M: Catalin Marinas catalin.marinas@arm.com S: Maintained -F: Documentation/kmemleak.txt +F: Documentation/dev-tools/kmemleak.rst F: include/linux/kmemleak.h F: mm/kmemleak.c F: mm/kmemleak-test.c @@@ -7569,15 -7534,6 +7578,15 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/platform/rcar-fcp.c F: include/media/rcar-fcp.h
+MEDIA DRIVERS FOR RENESAS - VIN +M: Niklas Söderlund niklas.soderlund@ragnatech.se +L: linux-media@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Supported +F: Documentation/devicetree/bindings/media/rcar_vin.txt +F: drivers/media/platform/rcar-vin/ + MEDIA DRIVERS FOR RENESAS - VSP1 M: Laurent Pinchart laurent.pinchart@ideasonboard.com L: linux-media@vger.kernel.org @@@ -7655,7 -7611,9 +7664,7 @@@ W: https://linuxtv.or Q: http://patchwork.kernel.org/project/linux-media/list/ T: git git://linuxtv.org/media_tree.git S: Maintained -F: Documentation/dvb/ -F: Documentation/video4linux/ -F: Documentation/DocBook/media/ +F: Documentation/media/ F: drivers/media/ F: drivers/staging/media/ F: include/linux/platform_data/media/ @@@ -7803,14 -7761,6 +7812,14 @@@ T: git git://git.monstr.eu/linux-2.6-mi S: Supported F: arch/microblaze/
+MICROCHIP / ATMEL ISC DRIVER +M: Songjun Wu songjun.wu@microchip.com +L: linux-media@vger.kernel.org +S: Supported +F: drivers/media/platform/atmel/atmel-isc.c +F: drivers/media/platform/atmel/atmel-isc-regs.h +F: devicetree/bindings/media/atmel-isc.txt + MICROSOFT SURFACE PRO 3 BUTTON DRIVER M: Chen Yu yu.c.chen@intel.com L: platform-driver-x86@vger.kernel.org @@@ -7924,7 -7874,7 +7933,7 @@@ F: kernel/module. MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER W: http://popies.net/meye/ S: Orphan -F: Documentation/video4linux/meye.txt +F: Documentation/media/v4l-drivers/meye* F: drivers/media/pci/meye/ F: include/uapi/linux/meye.h
@@@ -8219,15 -8169,6 +8228,15 @@@ S: Maintaine W: https://fedorahosted.org/dropwatch/ F: net/core/drop_monitor.c
+NETWORKING [DSA] +M: Andrew Lunn andrew@lunn.ch +M: Vivien Didelot vivien.didelot@savoirfairelinux.com +M: Florian Fainelli f.fainelli@gmail.com +S: Maintained +F: net/dsa/ +F: include/net/dsa.h +F: drivers/net/dsa/ + NETWORKING [GENERAL] M: "David S. Miller" davem@davemloft.net L: netdev@vger.kernel.org @@@ -9323,8 -9264,6 +9332,8 @@@ L: linux-arm-kernel@lists.infradead.or L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: drivers/pinctrl/samsung/ +F: include/dt-bindings/pinctrl/samsung.h +F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
PIN CONTROLLER - SINGLE M: Tony Lindgren tony@atomide.com @@@ -9579,7 -9518,7 +9588,7 @@@ L: linux-media@vger.kernel.or W: http://www.isely.net/pvrusb2/ T: git git://linuxtv.org/media_tree.git S: Maintained -F: Documentation/video4linux/README.pvrusb2 +F: Documentation/media/v4l-drivers/pvrusb2* F: drivers/media/usb/pvrusb2/
PWC WEBCAM DRIVER @@@ -9760,6 -9699,12 +9769,12 @@@ T: git git://git.kernel.org/pub/scm/lin S: Supported F: drivers/net/wireless/ath/ath10k/
+ QUALCOMM EMAC GIGABIT ETHERNET DRIVER + M: Timur Tabi timur@codeaurora.org + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/ethernet/qualcomm/emac/ + QUALCOMM HEXAGON ARCHITECTURE M: Richard Kuo rkuo@codeaurora.org L: linux-hexagon@vger.kernel.org @@@ -10015,6 -9960,7 +10030,7 @@@ F: net/rfkill
RHASHTABLE M: Thomas Graf tgraf@suug.ch + M: Herbert Xu herbert@gondor.apana.org.au L: netdev@vger.kernel.org S: Maintained F: lib/rhashtable.c @@@ -10233,7 -10179,7 +10249,7 @@@ L: linux-media@vger.kernel.or W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git S: Odd fixes -F: Documentation/video4linux/*.saa7134 +F: Documentation/media/v4l-drivers/saa7134* F: drivers/media/pci/saa7134/
SAA7146 VIDEO4LINUX-2 DRIVER @@@ -10375,13 -10321,6 +10391,13 @@@ S: Maintaine F: Documentation/devicetree/bindings/serial/ F: drivers/tty/serial/
+STI CEC DRIVER +M: Benjamin Gaignard benjamin.gaignard@linaro.org +L: kernel@stlinux.com +S: Maintained +F: drivers/staging/media/st-cec/ +F: Documentation/devicetree/bindings/media/stih-cec.txt + SYNOPSYS DESIGNWARE DMAC DRIVER M: Viresh Kumar vireshk@kernel.org M: Andy Shevchenko andriy.shevchenko@linux.intel.com @@@ -11942,15 -11881,6 +11958,15 @@@ W: https://linuxtv.or T: git git://linuxtv.org/media_tree.git S: Odd fixes F: drivers/media/usb/tm6000/ +F: Documentation/media/v4l-drivers/tm6000* + +TW5864 VIDEO4LINUX DRIVER +M: Bluecherry Maintainers maintainers@bluecherrydvr.com +M: Andrey Utkin andrey.utkin@corp.bluecherry.net +M: Andrey Utkin andrey_utkin@fastmail.com +L: linux-media@vger.kernel.org +S: Supported +F: drivers/media/pci/tw5864/
TW68 VIDEO4LINUX DRIVER M: Hans Verkuil hverkuil@xs4all.nl @@@ -12366,6 -12296,7 +12382,7 @@@ F: drivers/net/usb/smsc75xx.
USB SMSC95XX ETHERNET DRIVER M: Steve Glendinning steve.glendinning@shawell.net + M: Microchip Linux Driver Support UNGLinuxDriver@microchip.com L: netdev@vger.kernel.org S: Maintained F: drivers/net/usb/smsc95xx.* @@@ -12447,7 -12378,7 +12464,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git W: http://royale.zerezo.com/zr364xx/ S: Maintained -F: Documentation/video4linux/zr364xx.txt +F: Documentation/media/v4l-drivers/zr364xx* F: drivers/media/usb/zr364xx/
ULPI BUS @@@ -12654,7 -12585,7 +12671,7 @@@ F: include/linux/if_*vlan. F: net/8021q/
VLYNQ BUS -M: Florian Fainelli florian@openwrt.org +M: Florian Fainelli f.fainelli@gmail.com L: openwrt-devel@lists.openwrt.org (subscribers-only) S: Maintained F: drivers/vlynq/vlynq.c @@@ -13027,10 -12958,11 +13044,10 @@@ F: arch/x86/xen/*swiotlb F: drivers/xen/*swiotlb*
XFS FILESYSTEM -P: Silicon Graphics Inc M: Dave Chinner david@fromorbit.com -M: xfs@oss.sgi.com -L: xfs@oss.sgi.com -W: http://oss.sgi.com/projects/xfs +M: linux-xfs@vger.kernel.org +L: linux-xfs@vger.kernel.org +W: http://xfs.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git S: Supported F: Documentation/filesystems/xfs.txt diff --combined arch/arm64/boot/dts/apm/apm-storm.dtsi index 954ea6a,d5c3435..63be8e5 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@@ -110,10 -110,10 +110,10 @@@
timer { compatible = "arm,armv8-timer"; - interrupts = <1 0 0xff01>, /* Secure Phys IRQ */ - <1 13 0xff01>, /* Non-secure Phys IRQ */ - <1 14 0xff01>, /* Virt IRQ */ - <1 15 0xff01>; /* Hyp IRQ */ + interrupts = <1 0 0xff08>, /* Secure Phys IRQ */ + <1 13 0xff08>, /* Non-secure Phys IRQ */ + <1 14 0xff08>, /* Virt IRQ */ + <1 15 0xff08>; /* Hyp IRQ */ clock-frequency = <50000000>; };
@@@ -553,64 -553,6 +553,64 @@@ }; };
+ pmu: pmu@78810000 { + compatible = "apm,xgene-pmu-v2"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + regmap-csw = <&csw>; + regmap-mcba = <&mcba>; + regmap-mcbb = <&mcbb>; + reg = <0x0 0x78810000 0x0 0x1000>; + interrupts = <0x0 0x22 0x4>; + + pmul3c@7e610000 { + compatible = "apm,xgene-pmu-l3c"; + reg = <0x0 0x7e610000 0x0 0x1000>; + }; + + pmuiob@7e940000 { + compatible = "apm,xgene-pmu-iob"; + reg = <0x0 0x7e940000 0x0 0x1000>; + }; + + pmucmcb@7e710000 { + compatible = "apm,xgene-pmu-mcb"; + reg = <0x0 0x7e710000 0x0 0x1000>; + enable-bit-index = <0>; + }; + + pmucmcb@7e730000 { + compatible = "apm,xgene-pmu-mcb"; + reg = <0x0 0x7e730000 0x0 0x1000>; + enable-bit-index = <1>; + }; + + pmucmc@7e810000 { + compatible = "apm,xgene-pmu-mc"; + reg = <0x0 0x7e810000 0x0 0x1000>; + enable-bit-index = <0>; + }; + + pmucmc@7e850000 { + compatible = "apm,xgene-pmu-mc"; + reg = <0x0 0x7e850000 0x0 0x1000>; + enable-bit-index = <1>; + }; + + pmucmc@7e890000 { + compatible = "apm,xgene-pmu-mc"; + reg = <0x0 0x7e890000 0x0 0x1000>; + enable-bit-index = <2>; + }; + + pmucmc@7e8d0000 { + compatible = "apm,xgene-pmu-mc"; + reg = <0x0 0x7e8d0000 0x0 0x1000>; + enable-bit-index = <3>; + }; + }; + pcie0: pcie@1f2b0000 { status = "disabled"; device_type = "pci"; @@@ -627,10 -569,10 +627,10 @@@ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1 - 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1 - 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1 - 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x4 + 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x4 + 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x4 + 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x4>; dma-coherent; clocks = <&pcie0clk 0>; msi-parent = <&msi>; @@@ -652,10 -594,10 +652,10 @@@ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x1 - 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x1 - 0x0 0x0 0x0 0x3 &gic 0x0 0xca 0x1 - 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc8 0x4 + 0x0 0x0 0x0 0x2 &gic 0x0 0xc9 0x4 + 0x0 0x0 0x0 0x3 &gic 0x0 0xca 0x4 + 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x4>; dma-coherent; clocks = <&pcie1clk 0>; msi-parent = <&msi>; @@@ -677,10 -619,10 +677,10 @@@ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x1 - 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x1 - 0x0 0x0 0x0 0x3 &gic 0x0 0xd0 0x1 - 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xce 0x4 + 0x0 0x0 0x0 0x2 &gic 0x0 0xcf 0x4 + 0x0 0x0 0x0 0x3 &gic 0x0 0xd0 0x4 + 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x4>; dma-coherent; clocks = <&pcie2clk 0>; msi-parent = <&msi>; @@@ -702,10 -644,10 +702,10 @@@ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x1 - 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x1 - 0x0 0x0 0x0 0x3 &gic 0x0 0xd6 0x1 - 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xd4 0x4 + 0x0 0x0 0x0 0x2 &gic 0x0 0xd5 0x4 + 0x0 0x0 0x0 0x3 &gic 0x0 0xd6 0x4 + 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x4>; dma-coherent; clocks = <&pcie3clk 0>; msi-parent = <&msi>; @@@ -727,10 -669,10 +727,10 @@@ dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; interrupt-map-mask = <0x0 0x0 0x0 0x7>; - interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x1 - 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x1 - 0x0 0x0 0x0 0x3 &gic 0x0 0xdc 0x1 - 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>; + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xda 0x4 + 0x0 0x0 0x0 0x2 &gic 0x0 0xdb 0x4 + 0x0 0x0 0x0 0x3 &gic 0x0 0xdc 0x4 + 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x4>; dma-coherent; clocks = <&pcie4clk 0>; msi-parent = <&msi>; @@@ -755,11 -697,6 +755,11 @@@ mboxes = <&mailbox 0>; };
+ hwmonslimpro { + compatible = "apm,xgene-slimpro-hwmon"; + mboxes = <&mailbox 7>; + }; + serial0: serial@1c020000 { status = "disabled"; device_type = "serial"; @@@ -986,7 -923,7 +986,7 @@@ /* mac address will be overwritten by the bootloader */ local-mac-address = [00 00 00 00 00 00]; phy-connection-type = "rgmii"; - phy-handle = <&menet0phy>,<&menetphy>; + phy-handle = <&menetphy>,<&menet0phy>; mdio { compatible = "apm,xgene-mdio"; #address-cells = <1>; diff --combined drivers/infiniband/hw/cxgb4/cm.c index 80f9889,3cbbfbe..71c8867 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@@ -49,6 -49,7 +49,7 @@@
#include <rdma/ib_addr.h>
+ #include <libcxgb_cm.h> #include "iw_cxgb4.h" #include "clip_tbl.h"
@@@ -239,15 -240,13 +240,13 @@@ int c4iw_ofld_send(struct c4iw_rdev *rd
static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) { - struct cpl_tid_release *req; + u32 len = roundup(sizeof(struct cpl_tid_release), 16);
- skb = get_skb(skb, sizeof *req, GFP_KERNEL); + skb = get_skb(skb, len, GFP_KERNEL); if (!skb) return; - req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); - INIT_TP_WR(req, hwtid); - OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); - set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); + + cxgb_mk_tid_release(skb, len, hwtid, 0); c4iw_ofld_send(rdev, skb); return; } @@@ -333,8 -332,6 +332,8 @@@ static void remove_ep_tid(struct c4iw_e
spin_lock_irqsave(&ep->com.dev->lock, flags); _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); + if (idr_is_empty(&ep->com.dev->hwtid_idr)) + wake_up(&ep->com.dev->wait); spin_unlock_irqrestore(&ep->com.dev->lock, flags); }
@@@ -466,72 -463,6 +465,6 @@@ static struct net_device *get_real_dev( return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; }
- static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev) - { - int i; - - egress_dev = get_real_dev(egress_dev); - for (i = 0; i < dev->rdev.lldi.nports; i++) - if (dev->rdev.lldi.ports[i] == egress_dev) - return 1; - return 0; - } - - static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip, - __u8 *peer_ip, __be16 local_port, - __be16 peer_port, u8 tos, - __u32 sin6_scope_id) - { - struct dst_entry *dst = NULL; - - if (IS_ENABLED(CONFIG_IPV6)) { - struct flowi6 fl6; - - memset(&fl6, 0, sizeof(fl6)); - memcpy(&fl6.daddr, peer_ip, 16); - memcpy(&fl6.saddr, local_ip, 16); - if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) - fl6.flowi6_oif = sin6_scope_id; - dst = ip6_route_output(&init_net, NULL, &fl6); - if (!dst) - goto out; - if (!our_interface(dev, ip6_dst_idev(dst)->dev) && - !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { - dst_release(dst); - dst = NULL; - } - } - - out: - return dst; - } - - static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, - __be32 peer_ip, __be16 local_port, - __be16 peer_port, u8 tos) - { - struct rtable *rt; - struct flowi4 fl4; - struct neighbour *n; - - rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, - peer_port, local_port, IPPROTO_TCP, - tos, 0); - if (IS_ERR(rt)) - return NULL; - n = dst_neigh_lookup(&rt->dst, &peer_ip); - if (!n) - return NULL; - if (!our_interface(dev, n->dev) && - !(n->dev->flags & IFF_LOOPBACK)) { - neigh_release(n); - dst_release(&rt->dst); - return NULL; - } - neigh_release(n); - return &rt->dst; - } - static void arp_failure_discard(void *handle, struct sk_buff *skb) { pr_err(MOD "ARP failure\n"); @@@ -706,56 -637,32 +639,32 @@@ static int send_flowc(struct c4iw_ep *e
static int send_halfclose(struct c4iw_ep *ep) { - struct cpl_close_con_req *req; struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); - int wrlen = roundup(sizeof *req, 16); + u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (WARN_ON(!skb)) return -ENOMEM;
- set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - t4_set_arp_err_handler(skb, NULL, arp_failure_discard); - req = (struct cpl_close_con_req *) skb_put(skb, wrlen); - memset(req, 0, wrlen); - INIT_TP_WR(req, ep->hwtid); - OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, - ep->hwtid)); + cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx, + NULL, arp_failure_discard); + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); }
static int send_abort(struct c4iw_ep *ep) { - struct cpl_abort_req *req; - int wrlen = roundup(sizeof *req, 16); + u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16); struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (WARN_ON(!req_skb)) return -ENOMEM;
- set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx); - t4_set_arp_err_handler(req_skb, ep, abort_arp_failure); - req = (struct cpl_abort_req *)skb_put(req_skb, wrlen); - memset(req, 0, wrlen); - INIT_TP_WR(req, ep->hwtid); - OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); - req->cmd = CPL_ABORT_SEND_RST; - return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); - } + cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx, + ep, abort_arp_failure);
- static void best_mtu(const unsigned short *mtus, unsigned short mtu, - unsigned int *idx, int use_ts, int ipv6) - { - unsigned short hdr_size = (ipv6 ? - sizeof(struct ipv6hdr) : - sizeof(struct iphdr)) + - sizeof(struct tcphdr) + - (use_ts ? - round_up(TCPOLEN_TIMESTAMP, 4) : 0); - unsigned short data_size = mtu - hdr_size; - - cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); + return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); }
static int send_connect(struct c4iw_ep *ep) @@@ -770,7 -677,7 +679,7 @@@ u64 opt0; u32 opt2; unsigned int mtu_idx; - int wscale; + u32 wscale; int win, sizev4, sizev6, wrlen; struct sockaddr_in *la = (struct sockaddr_in *) &ep->com.local_addr; @@@ -817,10 -724,10 +726,10 @@@ } set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, - enable_tcp_timestamps, - (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); - wscale = compute_wscale(rcv_win); + cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, + enable_tcp_timestamps, + (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); + wscale = cxgb_compute_wscale(rcv_win);
/* * Specify the largest window that will fit in opt0. The @@@ -1447,9 -1354,9 +1356,9 @@@ static void established_upcall(struct c
static int update_rx_credits(struct c4iw_ep *ep, u32 credits) { - struct cpl_rx_data_ack *req; struct sk_buff *skb; - int wrlen = roundup(sizeof *req, 16); + u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16); + u32 credit_dack;
PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); skb = get_skb(NULL, wrlen, GFP_KERNEL); @@@ -1466,15 -1373,12 +1375,12 @@@ if (ep->rcv_win > RCV_BUFSIZ_M * 1024) credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
- req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); - memset(req, 0, wrlen); - INIT_TP_WR(req, ep->hwtid); - OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, - ep->hwtid)); - req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F | - RX_DACK_CHANGE_F | - RX_DACK_MODE_V(dack_mode)); - set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); + credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F | + RX_DACK_MODE_V(dack_mode); + + cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx, + credit_dack); + c4iw_ofld_send(&ep->com.dev->rdev, skb); return credits; } @@@ -1972,7 -1876,7 +1878,7 @@@ static int send_fw_act_open_req(struct struct sk_buff *skb; struct fw_ofld_connection_wr *req; unsigned int mtu_idx; - int wscale; + u32 wscale; struct sockaddr_in *sin; int win;
@@@ -1997,10 -1901,10 +1903,10 @@@ htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F); req->tcb.tx_max = (__force __be32) jiffies; req->tcb.rcv_adv = htons(1); - best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, - enable_tcp_timestamps, - (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); - wscale = compute_wscale(rcv_win); + cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, + enable_tcp_timestamps, + (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); + wscale = cxgb_compute_wscale(rcv_win);
/* * Specify the largest window that will fit in opt0. The @@@ -2054,15 -1958,6 +1960,6 @@@ static inline int act_open_has_tid(int status != CPL_ERR_CONN_EXIST); }
- /* Returns whether a CPL status conveys negative advice. - */ - static int is_neg_adv(unsigned int status) - { - return status == CPL_ERR_RTX_NEG_ADVICE || - status == CPL_ERR_PERSIST_NEG_ADVICE || - status == CPL_ERR_KEEPALV_NEG_ADVICE; - } - static char *neg_adv_str(unsigned int status) { switch (status) { @@@ -2119,10 -2014,8 +2016,10 @@@ static int import_ep(struct c4iw_ep *ep } ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, rt_tos2priority(tos)); - if (!ep->l2t) + if (!ep->l2t) { + dev_put(pdev); goto out; + } ep->mtu = pdev->mtu; ep->tx_chan = cxgb4_port_chan(pdev); ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, @@@ -2218,16 -2111,21 +2115,21 @@@ static int c4iw_reconnect(struct c4iw_e
/* find a route */ if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { - ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, - raddr->sin_addr.s_addr, laddr->sin_port, - raddr->sin_port, ep->com.cm_id->tos); + ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev, + laddr->sin_addr.s_addr, + raddr->sin_addr.s_addr, + laddr->sin_port, + raddr->sin_port, ep->com.cm_id->tos); iptype = 4; ra = (__u8 *)&raddr->sin_addr; } else { - ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr, - raddr6->sin6_addr.s6_addr, - laddr6->sin6_port, raddr6->sin6_port, 0, - raddr6->sin6_scope_id); + ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi, + get_real_dev, + laddr6->sin6_addr.s6_addr, + raddr6->sin6_addr.s6_addr, + laddr6->sin6_port, + raddr6->sin6_port, 0, + raddr6->sin6_scope_id); iptype = 6; ra = (__u8 *)&raddr6->sin6_addr; } @@@ -2299,7 -2197,7 +2201,7 @@@ static int act_open_rpl(struct c4iw_de PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, status, status2errno(status));
- if (is_neg_adv(status)) { + if (cxgb_is_neg_adv(status)) { PDBG("%s Connection problems for atid %u status %u (%s)\n", __func__, atid, status, neg_adv_str(status)); ep->stats.connect_neg_adv++; @@@ -2426,7 -2324,7 +2328,7 @@@ static int accept_cr(struct c4iw_ep *ep unsigned int mtu_idx; u64 opt0; u32 opt2; - int wscale; + u32 wscale; struct cpl_t5_pass_accept_rpl *rpl5 = NULL; int win; enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; @@@ -2447,10 -2345,10 +2349,10 @@@ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid));
- best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, - enable_tcp_timestamps && req->tcpopt.tstamp, - (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); - wscale = compute_wscale(rcv_win); + cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, + enable_tcp_timestamps && req->tcpopt.tstamp, + (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1); + wscale = cxgb_compute_wscale(rcv_win);
/* * Specify the largest window that will fit in opt0. The @@@ -2522,42 -2420,6 +2424,6 @@@ static void reject_cr(struct c4iw_dev * return; }
- static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type, - int *iptype, __u8 *local_ip, __u8 *peer_ip, - __be16 *local_port, __be16 *peer_port) - { - int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ? - ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) : - T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)); - int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ? - IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) : - T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)); - struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); - struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); - struct tcphdr *tcp = (struct tcphdr *) - ((u8 *)(req + 1) + eth_len + ip_len); - - if (ip->version == 4) { - PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, - ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), - ntohs(tcp->dest)); - *iptype = 4; - memcpy(peer_ip, &ip->saddr, 4); - memcpy(local_ip, &ip->daddr, 4); - } else { - PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__, - ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source), - ntohs(tcp->dest)); - *iptype = 6; - memcpy(peer_ip, ip6->saddr.s6_addr, 16); - memcpy(local_ip, ip6->daddr.s6_addr, 16); - } - *peer_port = tcp->source; - *local_port = tcp->dest; - - return; - } - static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *child_ep = NULL, *parent_ep; @@@ -2586,8 -2448,8 +2452,8 @@@ goto reject; }
- get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype, - local_ip, peer_ip, &local_port, &peer_port); + cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, + &iptype, local_ip, peer_ip, &local_port, &peer_port);
/* Find output route */ if (iptype == 4) { @@@ -2595,18 -2457,19 +2461,19 @@@ , __func__, parent_ep, hwtid, local_ip, peer_ip, ntohs(local_port), ntohs(peer_port), peer_mss); - dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, - local_port, peer_port, - tos); + dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, + *(__be32 *)local_ip, *(__be32 *)peer_ip, + local_port, peer_port, tos); } else { PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" , __func__, parent_ep, hwtid, local_ip, peer_ip, ntohs(local_port), ntohs(peer_port), peer_mss); - dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, - PASS_OPEN_TOS_G(ntohl(req->tos_stid)), - ((struct sockaddr_in6 *) - &parent_ep->com.local_addr)->sin6_scope_id); + dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, + local_ip, peer_ip, local_port, peer_port, + PASS_OPEN_TOS_G(ntohl(req->tos_stid)), + ((struct sockaddr_in6 *) + &parent_ep->com.local_addr)->sin6_scope_id); } if (!dst) { printk(KERN_ERR MOD "%s - failed to find dst entry!\n", @@@ -2839,18 -2702,18 +2706,18 @@@ static int peer_abort(struct c4iw_dev * { struct cpl_abort_req_rss *req = cplhdr(skb); struct c4iw_ep *ep; - struct cpl_abort_rpl *rpl; struct sk_buff *rpl_skb; struct c4iw_qp_attributes attrs; int ret; int release = 0; unsigned int tid = GET_TID(req); + u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
ep = get_ep_from_tid(dev, tid); if (!ep) return 0;
- if (is_neg_adv(req->status)) { + if (cxgb_is_neg_adv(req->status)) { PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", __func__, ep->hwtid, req->status, neg_adv_str(req->status)); @@@ -2943,11 -2806,9 +2810,9 @@@ release = 1; goto out; } - set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); - INIT_TP_WR(rpl, ep->hwtid); - OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); - rpl->cmd = CPL_ABORT_NO_RST; + + cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx); + c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); out: if (release) @@@ -3379,9 -3240,11 +3244,11 @@@ int c4iw_connect(struct iw_cm_id *cm_id PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", __func__, &laddr->sin_addr, ntohs(laddr->sin_port), ra, ntohs(raddr->sin_port)); - ep->dst = find_route(dev, laddr->sin_addr.s_addr, - raddr->sin_addr.s_addr, laddr->sin_port, - raddr->sin_port, cm_id->tos); + ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, + laddr->sin_addr.s_addr, + raddr->sin_addr.s_addr, + laddr->sin_port, + raddr->sin_port, cm_id->tos); } else { iptype = 6; ra = (__u8 *)&raddr6->sin6_addr; @@@ -3400,10 -3263,12 +3267,12 @@@ __func__, laddr6->sin6_addr.s6_addr, ntohs(laddr6->sin6_port), raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); - ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr, - raddr6->sin6_addr.s6_addr, - laddr6->sin6_port, raddr6->sin6_port, 0, - raddr6->sin6_scope_id); + ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev, + laddr6->sin6_addr.s6_addr, + raddr6->sin6_addr.s6_addr, + laddr6->sin6_port, + raddr6->sin6_port, 0, + raddr6->sin6_scope_id); } if (!ep->dst) { printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); @@@ -4045,8 -3910,9 +3914,9 @@@ static int rx_pkt(struct c4iw_dev *dev ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), ntohs(tcph->source), iph->tos);
- dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, - iph->tos); + dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev, + iph->daddr, iph->saddr, tcph->dest, + tcph->source, iph->tos); if (!dst) { pr_err("%s - failed to find dst entry!\n", __func__); @@@ -4321,7 -4187,7 +4191,7 @@@ static int peer_abort_intr(struct c4iw_ kfree_skb(skb); return 0; } - if (is_neg_adv(req->status)) { + if (cxgb_is_neg_adv(req->status)) { PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", __func__, ep->hwtid, req->status, neg_adv_str(req->status)); diff --combined drivers/infiniband/hw/cxgb4/device.c index 3c4b212,f170b63..93e3d27 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@@ -872,13 -872,9 +872,13 @@@ static void c4iw_rdev_close(struct c4iw static void c4iw_dealloc(struct uld_ctx *ctx) { c4iw_rdev_close(&ctx->dev->rdev); + WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr)); idr_destroy(&ctx->dev->cqidr); + WARN_ON_ONCE(!idr_is_empty(&ctx->dev->qpidr)); idr_destroy(&ctx->dev->qpidr); + WARN_ON_ONCE(!idr_is_empty(&ctx->dev->mmidr)); idr_destroy(&ctx->dev->mmidr); + wait_event(ctx->dev->wait, idr_is_empty(&ctx->dev->hwtid_idr)); idr_destroy(&ctx->dev->hwtid_idr); idr_destroy(&ctx->dev->stid_idr); idr_destroy(&ctx->dev->atid_idr); @@@ -996,7 -992,6 +996,7 @@@ static struct c4iw_dev *c4iw_alloc(cons mutex_init(&devp->rdev.stats.lock); mutex_init(&devp->db_mutex); INIT_LIST_HEAD(&devp->db_fc_list); + init_waitqueue_head(&devp->wait); devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
if (c4iw_debugfs_root) { @@@ -1480,6 -1475,10 +1480,10 @@@ static int c4iw_uld_control(void *handl
static struct cxgb4_uld_info c4iw_uld_info = { .name = DRV_NAME, + .nrxq = MAX_ULD_QSETS, + .rxq_size = 511, + .ciq = true, + .lro = false, .add = c4iw_uld_add, .rx_handler = c4iw_uld_rx_handler, .state_change = c4iw_uld_state_change, diff --combined drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 4b83b84,6a9bef1f..cdcf3ee --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@@ -263,7 -263,6 +263,7 @@@ struct c4iw_dev struct idr stid_idr; struct list_head db_fc_list; u32 avail_ird; + wait_queue_head_t wait; };
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) @@@ -882,15 -881,6 +882,6 @@@ static inline struct c4iw_listen_ep *to return cm_id->provider_data; }
- static inline int compute_wscale(int win) - { - int wscale = 0; - - while (wscale < 14 && (65535<<wscale) < win) - wscale++; - return wscale; - } - static inline int ocqp_supported(const struct cxgb4_lld_info *infop) { #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) diff --combined drivers/infiniband/hw/mlx5/main.c index e19537c,e4aecbf..551aa0e --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -232,23 -232,19 +232,19 @@@ static int set_roce_addr(struct ib_devi const union ib_gid *gid, const struct ib_gid_attr *attr) { - struct mlx5_ib_dev *dev = to_mdev(device); - u32 in[MLX5_ST_SZ_DW(set_roce_address_in)]; - u32 out[MLX5_ST_SZ_DW(set_roce_address_out)]; + struct mlx5_ib_dev *dev = to_mdev(device); + u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
if (ll != IB_LINK_LAYER_ETHERNET) return -EINVAL;
- memset(in, 0, sizeof(in)); - ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
MLX5_SET(set_roce_address_in, in, roce_address_index, index); MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); - - memset(out, 0, sizeof(out)); return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); }
@@@ -288,9 -284,7 +284,9 @@@ __be16 mlx5_get_roce_udp_sport(struct m
static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) { - return !MLX5_CAP_GEN(dev->mdev, ib_virt); + if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) + return !MLX5_CAP_GEN(dev->mdev, ib_virt); + return 0; }
enum { @@@ -753,8 -747,7 +749,7 @@@ static int mlx5_query_hca_port(struct i &props->active_width); if (err) goto out; - err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB, - port); + err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); if (err) goto out;
@@@ -1430,13 -1423,6 +1425,13 @@@ static int parse_flow_attr(u32 *match_c dmac_47_16), ib_spec->eth.val.dst_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + smac_47_16), + ib_spec->eth.mask.src_mac); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, + smac_47_16), + ib_spec->eth.val.src_mac); + if (ib_spec->eth.mask.vlan_tag) { MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, vlan_tag, 1); diff --combined drivers/net/ethernet/broadcom/bnx2.c index 505ceaf,ecd357d..27f11a5 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@@ -50,7 -50,7 +50,7 @@@ #include <linux/log2.h> #include <linux/aer.h>
- #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) + #if IS_ENABLED(CONFIG_CNIC) #define BCM_CNIC 1 #include "cnic_if.h" #endif @@@ -6356,6 -6356,10 +6356,6 @@@ bnx2_open(struct net_device *dev struct bnx2 *bp = netdev_priv(dev); int rc;
- rc = bnx2_request_firmware(bp); - if (rc < 0) - goto out; - netif_carrier_off(dev);
bnx2_disable_int(bp); @@@ -6424,6 -6428,7 +6424,6 @@@ open_err bnx2_free_irq(bp); bnx2_free_mem(bp); bnx2_del_napi(bp); - bnx2_release_firmware(bp); goto out; }
@@@ -8570,12 -8575,6 +8570,12 @@@ bnx2_init_one(struct pci_dev *pdev, con
pci_set_drvdata(pdev, dev);
+ rc = bnx2_request_firmware(bp); + if (rc < 0) + goto error; + + + bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | @@@ -8608,7 -8607,6 +8608,7 @@@ return 0;
error: + bnx2_release_firmware(bp); pci_iounmap(pdev, bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); diff --combined drivers/net/ethernet/ibm/emac/core.c index 7af09cb,ec4d0f3..8f13919 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@@ -977,37 -977,7 +977,37 @@@ static void emac_set_multicast_list(str dev->mcast_pending = 1; return; } + + mutex_lock(&dev->link_lock); __emac_set_multicast_list(dev); + mutex_unlock(&dev->link_lock); +} + +static int emac_set_mac_address(struct net_device *ndev, void *sa) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct sockaddr *addr = sa; + struct emac_regs __iomem *p = dev->emacp; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + mutex_lock(&dev->link_lock); + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + emac_rx_disable(dev); + emac_tx_disable(dev); + out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]); + out_be32(&p->ialr, (ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]); + emac_tx_enable(dev); + emac_rx_enable(dev); + + mutex_unlock(&dev->link_lock); + + return 0; }
static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) @@@ -2716,7 -2686,7 +2716,7 @@@ static const struct net_device_ops emac .ndo_do_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = emac_set_mac_address, .ndo_start_xmit = emac_start_xmit, .ndo_change_mtu = eth_change_mtu, }; @@@ -2729,7 -2699,7 +2729,7 @@@ static const struct net_device_ops emac .ndo_do_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = emac_set_mac_address, .ndo_start_xmit = emac_start_xmit_sg, .ndo_change_mtu = emac_change_mtu, }; @@@ -2780,7 -2750,7 +2780,7 @@@ static int emac_probe(struct platform_d /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */ dev->emac_irq = irq_of_parse_and_map(np, 0); dev->wol_irq = irq_of_parse_and_map(np, 1); - if (dev->emac_irq == NO_IRQ) { + if (!dev->emac_irq) { printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name); goto err_free; } @@@ -2943,9 -2913,9 +2943,9 @@@ err_reg_unmap: iounmap(dev->emacp); err_irq_unmap: - if (dev->wol_irq != NO_IRQ) + if (dev->wol_irq) irq_dispose_mapping(dev->wol_irq); - if (dev->emac_irq != NO_IRQ) + if (dev->emac_irq) irq_dispose_mapping(dev->emac_irq); err_free: free_netdev(ndev); @@@ -2987,9 -2957,9 +2987,9 @@@ static int emac_remove(struct platform_ emac_dbg_unregister(dev); iounmap(dev->emacp);
- if (dev->wol_irq != NO_IRQ) + if (dev->wol_irq) irq_dispose_mapping(dev->wol_irq); - if (dev->emac_irq != NO_IRQ) + if (dev->emac_irq) irq_dispose_mapping(dev->emac_irq);
free_netdev(dev->ndev); diff --combined drivers/net/ethernet/mediatek/mtk_eth_soc.c index 3743af8,481f360..c37dc98 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@@ -18,6 -18,7 +18,7 @@@ #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/clk.h> + #include <linux/pm_runtime.h> #include <linux/if_vlan.h> #include <linux/reset.h> #include <linux/tcp.h> @@@ -144,6 -145,9 +145,9 @@@ static void mtk_phy_link_adjust(struct MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return; + switch (mac->phy_dev->speed) { case SPEED_1000: mcr |= MAC_MCR_SPEED_1000; @@@ -230,7 -234,7 +234,7 @@@ static int mtk_phy_connect(struct mtk_m { struct mtk_eth *eth = mac->hw; struct device_node *np; - u32 val, ge_mode; + u32 val;
np = of_parse_phandle(mac->of_node, "phy-handle", 0); if (!np && of_phy_is_fixed_link(mac->of_node)) @@@ -244,18 -248,18 +248,18 @@@ case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII: - ge_mode = 0; + mac->ge_mode = 0; break; case PHY_INTERFACE_MODE_MII: - ge_mode = 1; + mac->ge_mode = 1; break; case PHY_INTERFACE_MODE_REVMII: - ge_mode = 2; + mac->ge_mode = 2; break; case PHY_INTERFACE_MODE_RMII: if (!mac->id) goto err_phy; - ge_mode = 3; + mac->ge_mode = 3; break; default: goto err_phy; @@@ -264,7 -268,7 +268,7 @@@ /* put the gmac into the right mode */ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); - val |= SYSCFG0_GE_MODE(ge_mode, mac->id); + val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id); regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
mtk_phy_connect_node(eth, mac, np); @@@ -336,25 -340,27 +340,27 @@@ static void mtk_mdio_cleanup(struct mtk mdiobus_unregister(eth->mii_bus); }
- static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) + static inline void mtk_irq_disable(struct mtk_eth *eth, + unsigned reg, u32 mask) { unsigned long flags; u32 val;
spin_lock_irqsave(ð->irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, reg); + mtk_w32(eth, val & ~mask, reg); spin_unlock_irqrestore(ð->irq_lock, flags); }
- static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) + static inline void mtk_irq_enable(struct mtk_eth *eth, + unsigned reg, u32 mask) { unsigned long flags; u32 val;
spin_lock_irqsave(ð->irq_lock, flags); - val = mtk_r32(eth, MTK_QDMA_INT_MASK); - mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + val = mtk_r32(eth, reg); + mtk_w32(eth, val | mask, reg); spin_unlock_irqrestore(ð->irq_lock, flags); }
@@@ -363,18 -369,20 +369,20 @@@ static int mtk_set_mac_address(struct n int ret = eth_mac_addr(dev, p); struct mtk_mac *mac = netdev_priv(dev); const char *macaddr = dev->dev_addr; - unsigned long flags;
if (ret) return ret;
- spin_lock_irqsave(&mac->hw->page_lock, flags); + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + spin_lock_bh(&mac->hw->page_lock); mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA_MAC_ADRH(mac->id)); mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | (macaddr[4] << 8) | macaddr[5], MTK_GDMA_MAC_ADRL(mac->id)); - spin_unlock_irqrestore(&mac->hw->page_lock, flags); + spin_unlock_bh(&mac->hw->page_lock);
return 0; } @@@ -759,7 -767,6 +767,6 @@@ static int mtk_start_xmit(struct sk_buf struct mtk_eth *eth = mac->hw; struct mtk_tx_ring *ring = ð->tx_ring; struct net_device_stats *stats = &dev->stats; - unsigned long flags; bool gso = false; int tx_num;
@@@ -767,14 -774,17 +774,17 @@@ * however we have 2 queues running on the same ring so we need to lock * the ring access */ - spin_lock_irqsave(ð->page_lock, flags); + spin_lock(ð->page_lock); + + if (unlikely(test_bit(MTK_RESETTING, ð->state))) + goto drop;
tx_num = mtk_cal_txd_req(skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { mtk_stop_queue(eth); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock); return NETDEV_TX_BUSY; }
@@@ -799,22 -809,62 +809,62 @@@ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) mtk_stop_queue(eth);
- spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock);
return NETDEV_TX_OK;
drop: - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock(ð->page_lock); stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
+ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) + { + int i; + struct mtk_rx_ring *ring; + int idx; + + if (!eth->hwlro) + return ð->rx_ring[0]; + + for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { + ring = ð->rx_ring[i]; + idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); + if (ring->dma[idx].rxd2 & RX_DMA_DONE) { + ring->calc_idx_update = true; + return ring; + } + } + + return NULL; + } + + static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) + { + struct mtk_rx_ring *ring; + int i; + + if (!eth->hwlro) { + ring = ð->rx_ring[0]; + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + } else { + for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { + ring = ð->rx_ring[i]; + if (ring->calc_idx_update) { + ring->calc_idx_update = false; + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + } + } + } + } + static int mtk_poll_rx(struct napi_struct *napi, int budget, struct mtk_eth *eth) { - struct mtk_rx_ring *ring = ð->rx_ring; - int idx = ring->calc_idx; + struct mtk_rx_ring *ring; + int idx; struct sk_buff *skb; u8 *data, *new_data; struct mtk_rx_dma *rxd, trxd; @@@ -826,7 -876,11 +876,11 @@@ dma_addr_t dma_addr; int mac = 0;
- idx = NEXT_RX_DESP_IDX(idx); + ring = mtk_get_rx_ring(eth); + if (unlikely(!ring)) + goto rx_done; + + idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size); rxd = &ring->dma[idx]; data = ring->data[idx];
@@@ -841,6 -895,9 +895,9 @@@
netdev = eth->netdev[mac];
+ if (unlikely(test_bit(MTK_RESETTING, ð->state))) + goto release_desc; + /* alloc new buffer */ new_data = napi_alloc_frag(ring->frag_size); if (unlikely(!new_data)) { @@@ -890,17 -947,19 +947,19 @@@ release_desc rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
ring->calc_idx = idx; + + done++; + } + + rx_done: + if (done) { /* make sure that all changes to the dma ring are flushed before * we continue */ wmb(); - mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0); - done++; + mtk_update_rx_cpu_idx(eth); }
- if (done < budget) - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); - return done; }
@@@ -1009,7 -1068,7 +1068,7 @@@ static int mtk_napi_tx(struct napi_stru return budget;
napi_complete(napi); - mtk_irq_enable(eth, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
return tx_done; } @@@ -1019,30 -1078,33 +1078,33 @@@ static int mtk_napi_rx(struct napi_stru struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); u32 status, mask; int rx_done = 0; + int remain_budget = budget;
mtk_handle_status_irq(eth); - mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); - rx_done = mtk_poll_rx(napi, budget, eth); + + poll_again: + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); + rx_done = mtk_poll_rx(napi, remain_budget, eth);
if (unlikely(netif_msg_intr(eth))) { - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + status = mtk_r32(eth, MTK_PDMA_INT_STATUS); + mask = mtk_r32(eth, MTK_PDMA_INT_MASK); dev_info(eth->dev, "done rx %d, intr 0x%08x/0x%x\n", rx_done, status, mask); } - - if (rx_done == budget) - return budget; - - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - if (status & MTK_RX_DONE_INT) + if (rx_done == remain_budget) return budget;
+ status = mtk_r32(eth, MTK_PDMA_INT_STATUS); + if (status & MTK_RX_DONE_INT) { + remain_budget -= rx_done; + goto poll_again; + } napi_complete(napi); - mtk_irq_enable(eth, MTK_RX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
- return rx_done; + return rx_done + budget - remain_budget; }
static int mtk_tx_alloc(struct mtk_eth *eth) @@@ -1089,6 -1151,7 +1151,7 @@@ mtk_w32(eth, ring->phys + ((MTK_DMA_SIZE - 1) * sz), MTK_QTX_DRX_PTR); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
return 0;
@@@ -1117,32 -1180,41 +1180,41 @@@ static void mtk_tx_clean(struct mtk_et } }
- static int mtk_rx_alloc(struct mtk_eth *eth) + static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { - struct mtk_rx_ring *ring = ð->rx_ring; + struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; + int rx_data_len, rx_dma_size; int i;
- ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN); + if (rx_flag == MTK_RX_FLAGS_HWLRO) { + rx_data_len = MTK_MAX_LRO_RX_LENGTH; + rx_dma_size = MTK_HW_LRO_DMA_SIZE; + } else { + rx_data_len = ETH_DATA_LEN; + rx_dma_size = MTK_DMA_SIZE; + } + + ring->frag_size = mtk_max_frag_size(rx_data_len); ring->buf_size = mtk_max_buf_size(ring->frag_size); - ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data), + ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), GFP_KERNEL); if (!ring->data) return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) { + for (i = 0; i < rx_dma_size; i++) { ring->data[i] = netdev_alloc_frag(ring->frag_size); if (!ring->data[i]) return -ENOMEM; }
ring->dma = dma_alloc_coherent(eth->dev, - MTK_DMA_SIZE * sizeof(*ring->dma), + rx_dma_size * sizeof(*ring->dma), &ring->phys, GFP_ATOMIC | __GFP_ZERO); if (!ring->dma) return -ENOMEM;
- for (i = 0; i < MTK_DMA_SIZE; i++) { + for (i = 0; i < rx_dma_size; i++) { dma_addr_t dma_addr = dma_map_single(eth->dev, ring->data[i] + NET_SKB_PAD, ring->buf_size, @@@ -1153,28 -1225,30 +1225,30 @@@
ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); } - ring->calc_idx = MTK_DMA_SIZE - 1; + ring->dma_size = rx_dma_size; + ring->calc_idx_update = false; + ring->calc_idx = rx_dma_size - 1; + ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no); /* make sure that all changes to the dma ring are flushed before we * continue */ wmb();
- mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0); - mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0); - mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0); - mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); - mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); + mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no)); + mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no)); + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
return 0; }
- static void mtk_rx_clean(struct mtk_eth *eth) + static void mtk_rx_clean(struct mtk_eth *eth, int ring_no) { - struct mtk_rx_ring *ring = ð->rx_ring; + struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; int i;
if (ring->data && ring->dma) { - for (i = 0; i < MTK_DMA_SIZE; i++) { + for (i = 0; i < ring->dma_size; i++) { if (!ring->data[i]) continue; if (!ring->dma[i].rxd1) @@@ -1191,13 -1265,274 +1265,274 @@@
if (ring->dma) { dma_free_coherent(eth->dev, - MTK_DMA_SIZE * sizeof(*ring->dma), + ring->dma_size * sizeof(*ring->dma), ring->dma, ring->phys); ring->dma = NULL; } }
+ static int mtk_hwlro_rx_init(struct mtk_eth *eth) + { + int i; + u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0; + u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0; + + /* set LRO rings to auto-learn modes */ + ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE; + + /* validate LRO ring */ + ring_ctrl_dw2 |= MTK_RING_VLD; + + /* set AGE timer (unit: 20us) */ + ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H; + ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L; + + /* set max AGG timer (unit: 20us) */ + ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME; + + /* set max LRO AGG count */ + ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L; + ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H; + + for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { + mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); + mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); + mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); + } + + /* IPv4 checksum update enable */ + lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN; + + /* switch priority comparison to packet count mode */ + lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE; + + /* bandwidth threshold setting */ + mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); + + /* auto-learn score delta setting */ + mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); + + /* set refresh timer for altering flows to 1 sec. (unit: 20us) */ + mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, + MTK_PDMA_LRO_ALT_REFRESH_TIMER); + + /* set HW LRO mode & the max aggregation count for rx packets */ + lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff); + + /* the minimal remaining room of SDL0 in RXD for lro aggregation */ + lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL; + + /* enable HW LRO */ + lro_ctrl_dw0 |= MTK_LRO_EN; + + mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); + mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); + + return 0; + } + + static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) + { + int i; + u32 val; + + /* relinquish lro rings, flush aggregated packets */ + mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); + + /* wait for relinquishments done */ + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); + if (val & MTK_LRO_RING_RELINQUISH_DONE) { + msleep(20); + continue; + } + } + + /* invalidate lro rings */ + for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) + mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); + + /* disable HW LRO */ + mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); + } + + static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) + { + u32 reg_val; + + reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); + + /* invalidate the IP setting */ + mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); + + mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); + + /* validate the IP setting */ + mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); + } + + static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) + { + u32 reg_val; + + reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); + + /* invalidate the IP setting */ + mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); + + mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); + } + + static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac) + { + int cnt = 0; + int i; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (mac->hwlro_ip[i]) + cnt++; + } + + return cnt; + } + + static int mtk_hwlro_add_ipaddr(struct net_device *dev, + struct ethtool_rxnfc *cmd) + { + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int hwlro_idx; + + if ((fsp->flow_type != TCP_V4_FLOW) || + (!fsp->h_u.tcp_ip4_spec.ip4dst) || + (fsp->location > 1)) + return -EINVAL; + + mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; + + mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); + + return 0; + } + + static int mtk_hwlro_del_ipaddr(struct net_device *dev, + struct ethtool_rxnfc *cmd) + { + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int hwlro_idx; + + if (fsp->location > 1) + return -EINVAL; + + mac->hwlro_ip[fsp->location] = 0; + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; + + mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + mtk_hwlro_inval_ipaddr(eth, hwlro_idx); + + return 0; + } + + static void mtk_hwlro_netdev_disable(struct net_device *dev) + { + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int i, hwlro_idx; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + mac->hwlro_ip[i] = 0; + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; + + mtk_hwlro_inval_ipaddr(eth, hwlro_idx); + } + + mac->hwlro_ip_cnt = 0; + } + + static int mtk_hwlro_get_fdir_entry(struct net_device *dev, + struct ethtool_rxnfc *cmd) + { + struct mtk_mac *mac = netdev_priv(dev); + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + + /* only tcp dst ipv4 is meaningful, others are meaningless */ + fsp->flow_type = TCP_V4_FLOW; + fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); + fsp->m_u.tcp_ip4_spec.ip4dst = 0; + + fsp->h_u.tcp_ip4_spec.ip4src = 0; + fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; + fsp->h_u.tcp_ip4_spec.psrc = 0; + fsp->m_u.tcp_ip4_spec.psrc = 0xffff; + fsp->h_u.tcp_ip4_spec.pdst = 0; + fsp->m_u.tcp_ip4_spec.pdst = 0xffff; + fsp->h_u.tcp_ip4_spec.tos = 0; + fsp->m_u.tcp_ip4_spec.tos = 0xff; + + return 0; + } + + static int mtk_hwlro_get_fdir_all(struct net_device *dev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) + { + struct mtk_mac *mac = netdev_priv(dev); + int cnt = 0; + int i; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (mac->hwlro_ip[i]) { + rule_locs[cnt] = i; + cnt++; + } + } + + cmd->rule_cnt = cnt; + + return 0; + } + + static netdev_features_t mtk_fix_features(struct net_device *dev, + netdev_features_t features) + { + if (!(features & NETIF_F_LRO)) { + struct mtk_mac *mac = netdev_priv(dev); + int ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + if (ip_cnt) { + netdev_info(dev, "RX flow is programmed, LRO should keep on\n"); + + features |= NETIF_F_LRO; + } + } + + return features; + } + + static int mtk_set_features(struct net_device *dev, netdev_features_t features) + { + int err = 0; + + if (!((dev->features ^ features) & NETIF_F_LRO)) + return 0; + + if (!(features & NETIF_F_LRO)) + mtk_hwlro_netdev_disable(dev); + + return err; + } + /* wait for DMA to finish whatever it is doing before we start using it again */ static int mtk_dma_busy_wait(struct mtk_eth *eth) { @@@ -1218,6 -1553,7 +1553,7 @@@ static int mtk_dma_init(struct mtk_eth *eth) { int err; + u32 i;
if (mtk_dma_busy_wait(eth)) return -EBUSY; @@@ -1233,10 -1569,21 +1569,21 @@@ if (err) return err;
- err = mtk_rx_alloc(eth); + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); if (err) return err;
+ if (eth->hwlro) { + for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { + err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); + if (err) + return err; + } + err = mtk_hwlro_rx_init(eth); + if (err) + return err; + } + /* Enable random early drop and set drop threshold automatically */ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, MTK_QDMA_FC_THRES); @@@ -1261,7 -1608,14 +1608,14 @@@ static void mtk_dma_free(struct mtk_et eth->phy_scratch_ring = 0; } mtk_tx_clean(eth); - mtk_rx_clean(eth); + mtk_rx_clean(eth, 0); + + if (eth->hwlro) { + mtk_hwlro_rx_uninit(eth); + for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) + mtk_rx_clean(eth, i); + } + kfree(eth->scratch_head); }
@@@ -1282,7 -1636,7 +1636,7 @@@ static irqreturn_t mtk_handle_irq_rx(in
if (likely(napi_schedule_prep(ð->rx_napi))) { __napi_schedule(ð->rx_napi); - mtk_irq_disable(eth, MTK_RX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); }
return IRQ_HANDLED; @@@ -1294,7 -1648,7 +1648,7 @@@ static irqreturn_t mtk_handle_irq_tx(in
if (likely(napi_schedule_prep(ð->tx_napi))) { __napi_schedule(ð->tx_napi); - mtk_irq_disable(eth, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); }
return IRQ_HANDLED; @@@ -1305,11 -1659,12 +1659,12 @@@ static void mtk_poll_controller(struct { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
- mtk_irq_disable(eth, int_mask); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); mtk_handle_irq_rx(eth->irq[2], dev); - mtk_irq_enable(eth, int_mask); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); } #endif
@@@ -1324,11 -1679,15 +1679,15 @@@ static int mtk_start_dma(struct mtk_et }
mtk_w32(eth, - MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | - MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | - MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO, + MTK_TX_WB_DDONE | MTK_TX_DMA_EN | + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO, MTK_QDMA_GLO_CFG);
+ mtk_w32(eth, + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS | MTK_MULTI_EN, + MTK_PDMA_GLO_CFG); + return 0; }
@@@ -1346,7 -1705,8 +1705,8 @@@ static int mtk_open(struct net_device *
napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); - mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); } atomic_inc(ð->dma_refcnt);
@@@ -1358,16 -1718,15 +1718,15 @@@
static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) { - unsigned long flags; u32 val; int i;
/* stop the dma engine */ - spin_lock_irqsave(ð->page_lock, flags); + spin_lock_bh(ð->page_lock); val = mtk_r32(eth, glo_cfg); mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), glo_cfg); - spin_unlock_irqrestore(ð->page_lock, flags); + spin_unlock_bh(ð->page_lock);
/* wait for dma stop */ for (i = 0; i < 10; i++) { @@@ -1392,7 -1751,8 +1751,8 @@@ static int mtk_stop(struct net_device * if (!atomic_dec_and_test(ð->dma_refcnt)) return 0;
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT); napi_disable(ð->tx_napi); napi_disable(ð->rx_napi);
@@@ -1403,15 -1763,44 +1763,44 @@@ return 0; }
- static int __init mtk_hw_init(struct mtk_eth *eth) + static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) { - int err, i; + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, + reset_bits); + + usleep_range(1000, 1100); + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, + ~reset_bits); + mdelay(10); + } + + static int mtk_hw_init(struct mtk_eth *eth) + { + int i, val;
- /* reset the frame engine */ - reset_control_assert(eth->rstc); - usleep_range(10, 20); - reset_control_deassert(eth->rstc); - usleep_range(10, 20); + if (test_and_set_bit(MTK_HW_INIT, ð->state)) + return 0; + + pm_runtime_enable(eth->dev); + pm_runtime_get_sync(eth->dev); + + clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); + clk_prepare_enable(eth->clks[MTK_CLK_ESW]); + clk_prepare_enable(eth->clks[MTK_CLK_GP1]); + clk_prepare_enable(eth->clks[MTK_CLK_GP2]); + ethsys_reset(eth, RSTCTRL_FE); + ethsys_reset(eth, RSTCTRL_PPE); + + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->mac[i]) + continue; + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id); + val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id); + } + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
/* Set GE2 driving and slew rate */ regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); @@@ -1431,22 -1820,11 +1820,11 @@@ /* Enable RX VLan Offloading */ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, - dev_name(eth->dev), eth); - if (err) - return err; - err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, - dev_name(eth->dev), eth); - if (err) - return err; - - err = mtk_mdio_init(eth); - if (err) - return err; - /* disable delay and normal interrupt */ mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); - mtk_irq_disable(eth, ~0); + mtk_w32(eth, 0, MTK_PDMA_DELAY_INT); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); mtk_w32(eth, 0, MTK_RST_GL);
@@@ -1460,9 -1838,8 +1838,8 @@@ for (i = 0; i < 2; i++) { u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
- /* setup the forward port to send frame to QDMA */ + /* setup the forward port to send frame to PDMA */ val &= ~0xffff; - val |= 0x5555;
/* Enable RX checksum */ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; @@@ -1474,6 -1851,22 +1851,22 @@@ return 0; }
+ static int mtk_hw_deinit(struct mtk_eth *eth) + { + if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) + return 0; + + clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); + clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); + clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); + clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); + + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + + return 0; + } + static int __init mtk_init(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); @@@ -1501,7 -1894,11 +1894,11 @@@ static void mtk_uninit(struct net_devic struct mtk_eth *eth = mac->hw;
phy_disconnect(mac->phy_dev); - mtk_irq_disable(eth, ~0); + mtk_mdio_cleanup(eth); + mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); + mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); + free_irq(eth->irq[1], dev); + free_irq(eth->irq[2], dev); }
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@@ -1528,6 -1925,12 +1925,12 @@@ static void mtk_pending_work(struct wor
rtnl_lock();
+ dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); + + while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) + cpu_relax(); + + dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); /* stop all devices to make sure that dma is properly shut down */ for (i = 0; i < MTK_MAC_COUNT; i++) { if (!eth->netdev[i]) @@@ -1535,6 -1938,27 +1938,27 @@@ mtk_stop(eth->netdev[i]); __set_bit(i, &restart); } + dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); + + /* restart underlying hardware such as power, clock, pin mux + * and the connected phy + */ + mtk_hw_deinit(eth); + + if (eth->dev->pins) + pinctrl_select_state(eth->dev->pins->p, + eth->dev->pins->default_state); + mtk_hw_init(eth); + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->mac[i] || + of_phy_is_fixed_link(eth->mac[i]->of_node)) + continue; + err = phy_init_hw(eth->mac[i]->phy_dev); + if (err) + dev_err(eth->dev, "%s: PHY init failed.\n", + eth->netdev[i]->name); + }
/* restart DMA and enable IRQs */ for (i = 0; i < MTK_MAC_COUNT; i++) { @@@ -1547,20 -1971,44 +1971,44 @@@ dev_close(eth->netdev[i]); } } + + dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); + + clear_bit_unlock(MTK_RESETTING, ð->state); + rtnl_unlock(); }
- static int mtk_cleanup(struct mtk_eth *eth) + static int mtk_free_dev(struct mtk_eth *eth) { int i;
for (i = 0; i < MTK_MAC_COUNT; i++) { if (!eth->netdev[i]) continue; + free_netdev(eth->netdev[i]); + } + + return 0; + }
+ static int mtk_unreg_dev(struct mtk_eth *eth) + { + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; unregister_netdev(eth->netdev[i]); - free_netdev(eth->netdev[i]); } + + return 0; + } + + static int mtk_cleanup(struct mtk_eth *eth) + { + mtk_unreg_dev(eth); + mtk_free_dev(eth); cancel_work_sync(ð->pending_work);
return 0; @@@ -1572,6 -2020,9 +2020,9 @@@ static int mtk_get_settings(struct net_ struct mtk_mac *mac = netdev_priv(dev); int err;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + err = phy_read_status(mac->phy_dev); if (err) return -ENODEV; @@@ -1622,6 -2073,9 +2073,9 @@@ static int mtk_nway_reset(struct net_de { struct mtk_mac *mac = netdev_priv(dev);
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + return genphy_restart_aneg(mac->phy_dev); }
@@@ -1630,6 -2084,9 +2084,9 @@@ static u32 mtk_get_link(struct net_devi struct mtk_mac *mac = netdev_priv(dev); int err;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + err = genphy_update_link(mac->phy_dev); if (err) return ethtool_op_get_link(dev); @@@ -1670,6 -2127,9 +2127,9 @@@ static void mtk_get_ethtool_stats(struc unsigned int start; int i;
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return; + if (netif_running(dev) && netif_device_present(dev)) { if (spin_trylock(&hwstats->stats_lock)) { mtk_stats_update_mac(mac); @@@ -1678,7 -2138,7 +2138,7 @@@ }
do { - data_src = (u64*)hwstats; + data_src = (u64 *)hwstats; data_dst = data; start = u64_stats_fetch_begin_irq(&hwstats->syncp);
@@@ -1687,7 -2147,63 +2147,63 @@@ } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); }
- static struct ethtool_ops mtk_ethtool_ops = { + static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) + { + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + if (dev->features & NETIF_F_LRO) { + cmd->data = MTK_MAX_RX_RING_NUM; + ret = 0; + } + break; + case ETHTOOL_GRXCLSRLCNT: + if (dev->features & NETIF_F_LRO) { + struct mtk_mac *mac = netdev_priv(dev); + + cmd->rule_cnt = mac->hwlro_ip_cnt; + ret = 0; + } + break; + case ETHTOOL_GRXCLSRULE: + if (dev->features & NETIF_F_LRO) + ret = mtk_hwlro_get_fdir_entry(dev, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + if (dev->features & NETIF_F_LRO) + ret = mtk_hwlro_get_fdir_all(dev, cmd, + rule_locs); + break; + default: + break; + } + + return ret; + } + + static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) + { + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (dev->features & NETIF_F_LRO) + ret = mtk_hwlro_add_ipaddr(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + if (dev->features & NETIF_F_LRO) + ret = mtk_hwlro_del_ipaddr(dev, cmd); + break; + default: + break; + } + + return ret; + } + + static const struct ethtool_ops mtk_ethtool_ops = { .get_settings = mtk_get_settings, .set_settings = mtk_set_settings, .get_drvinfo = mtk_get_drvinfo, @@@ -1698,6 -2214,8 +2214,8 @@@ .get_strings = mtk_get_strings, .get_sset_count = mtk_get_sset_count, .get_ethtool_stats = mtk_get_ethtool_stats, + .get_rxnfc = mtk_get_rxnfc, + .set_rxnfc = mtk_set_rxnfc, };
static const struct net_device_ops mtk_netdev_ops = { @@@ -1712,6 -2230,8 +2230,8 @@@ .ndo_change_mtu = eth_change_mtu, .ndo_tx_timeout = mtk_tx_timeout, .ndo_get_stats64 = mtk_get_stats64, + .ndo_fix_features = mtk_fix_features, + .ndo_set_features = mtk_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mtk_poll_controller, #endif @@@ -1750,6 -2270,9 +2270,9 @@@ static int mtk_add_mac(struct mtk_eth * mac->hw = eth; mac->of_node = np;
+ memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); + mac->hwlro_ip_cnt = 0; + mac->hw_stats = devm_kzalloc(eth->dev, sizeof(*mac->hw_stats), GFP_KERNEL); @@@ -1766,21 -2289,17 +2289,17 @@@ eth->netdev[id]->watchdog_timeo = 5 * HZ; eth->netdev[id]->netdev_ops = &mtk_netdev_ops; eth->netdev[id]->base_addr = (unsigned long)eth->base; + + eth->netdev[id]->hw_features = MTK_HW_FEATURES; + if (eth->hwlro) + eth->netdev[id]->hw_features |= NETIF_F_LRO; + eth->netdev[id]->vlan_features = MTK_HW_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); eth->netdev[id]->features |= MTK_HW_FEATURES; eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
- err = register_netdev(eth->netdev[id]); - if (err) { - dev_err(eth->dev, "error bringing up device\n"); - goto free_netdev; - } eth->netdev[id]->irq = eth->irq[0]; - netif_info(eth, probe, eth->netdev[id], - "mediatek frame engine at 0x%08lx, irq %d\n", - eth->netdev[id]->base_addr, eth->irq[0]); - return 0;
free_netdev: @@@ -1827,11 -2346,7 +2346,7 @@@ static int mtk_probe(struct platform_de return PTR_ERR(eth->pctl); }
- eth->rstc = devm_reset_control_get(&pdev->dev, "eth"); - if (IS_ERR(eth->rstc)) { - dev_err(&pdev->dev, "no eth reset found\n"); - return PTR_ERR(eth->rstc); - } + eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
for (i = 0; i < 3; i++) { eth->irq[i] = platform_get_irq(pdev, i); @@@ -1850,11 -2365,6 +2365,6 @@@ } }
- clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); - clk_prepare_enable(eth->clks[MTK_CLK_ESW]); - clk_prepare_enable(eth->clks[MTK_CLK_GP1]); - clk_prepare_enable(eth->clks[MTK_CLK_GP2]); - eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); INIT_WORK(ð->pending_work, mtk_pending_work);
@@@ -1872,7 -2382,35 +2382,35 @@@
err = mtk_add_mac(eth, mac_np); if (err) - goto err_free_dev; + goto err_deinit_hw; + } + + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, + dev_name(eth->dev), eth); + if (err) + goto err_free_dev; + + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, + dev_name(eth->dev), eth); + if (err) + goto err_free_dev; + + err = mtk_mdio_init(eth); + if (err) + goto err_free_dev; + + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i]) + continue; + + err = register_netdev(eth->netdev[i]); + if (err) { + dev_err(eth->dev, "error bringing up device\n"); + goto err_deinit_mdio; + } else + netif_info(eth, probe, eth->netdev[i], + "mediatek frame engine at 0x%08lx, irq %d\n", + eth->netdev[i]->base_addr, eth->irq[0]); }
/* we run 2 devices on the same DMA ring so we need a dummy device @@@ -1888,8 -2426,13 +2426,13 @@@
return 0;
+ err_deinit_mdio: + mtk_mdio_cleanup(eth); err_free_dev: - mtk_cleanup(eth); + mtk_free_dev(eth); + err_deinit_hw: + mtk_hw_deinit(eth); + return err; }
@@@ -1905,16 -2448,11 +2448,11 @@@ static int mtk_remove(struct platform_d mtk_stop(eth->netdev[i]); }
- clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); - clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); - clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); - clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); + mtk_hw_deinit(eth);
netif_napi_del(ð->tx_napi); netif_napi_del(ð->rx_napi); mtk_cleanup(eth); - mtk_mdio_cleanup(eth); - platform_set_drvdata(pdev, NULL);
return 0; } @@@ -1923,7 -2461,6 +2461,7 @@@ const struct of_device_id of_mtk_match[ { .compatible = "mediatek,mt7623-eth" }, {}, }; +MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = { .probe = mtk_probe, diff --combined drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index b247949,4927494..f75f864 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@@ -81,19 -81,12 +81,12 @@@ enum MC_ADDR_CHANGE | \ PROMISC_CHANGE)
- int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); - void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); - static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) { - int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]; - int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)]; + int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0}; + int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0}; void *nic_vport_ctx; - int err; - - memset(out, 0, sizeof(out)); - memset(in, 0, sizeof(in));
MLX5_SET(modify_nic_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); @@@ -116,99 -109,27 +109,27 @@@ MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_promisc_change, 1);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); - if (err) - goto ex; - err = mlx5_cmd_status_to_err_v2(out); - if (err) - goto ex; - return 0; - ex: - return err; + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
/* E-Switch vport context HW commands */ - static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport, - u32 *out, int outlen) - { - u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)]; - - memset(in, 0, sizeof(in)); - - MLX5_SET(query_nic_vport_context_in, in, opcode, - MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT); - - MLX5_SET(query_esw_vport_context_in, in, vport_number, vport); - if (vport) - MLX5_SET(query_esw_vport_context_in, in, other_vport, 1); - - return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen); - } - - static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, - u16 *vlan, u8 *qos) - { - u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)]; - int err; - bool cvlan_strip; - bool cvlan_insert; - - memset(out, 0, sizeof(out)); - - *vlan = 0; - *qos = 0; - - if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || - !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) - return -ENOTSUPP; - - err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out)); - if (err) - goto out; - - cvlan_strip = MLX5_GET(query_esw_vport_context_out, out, - esw_vport_context.vport_cvlan_strip); - - cvlan_insert = MLX5_GET(query_esw_vport_context_out, out, - esw_vport_context.vport_cvlan_insert); - - if (cvlan_strip || cvlan_insert) { - *vlan = MLX5_GET(query_esw_vport_context_out, out, - esw_vport_context.cvlan_id); - *qos = MLX5_GET(query_esw_vport_context_out, out, - esw_vport_context.cvlan_pcp); - } - - esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n", - vport, *vlan, *qos); - out: - return err; - } - static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport, void *in, int inlen) { - u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)]; - - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
+ MLX5_SET(modify_esw_vport_context_in, in, opcode, + MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); if (vport) MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1); - - MLX5_SET(modify_esw_vport_context_in, in, opcode, - MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); - - return mlx5_cmd_exec_check_status(dev, in, inlen, - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); }
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, u16 vlan, u8 qos, bool set) { - u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) @@@ -216,7 -137,6 +137,6 @@@
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n", vport, vlan, qos, set); - if (set) { MLX5_SET(modify_esw_vport_context_in, in, esw_vport_context.vport_cvlan_strip, 1); @@@ -241,13 -161,10 +161,10 @@@ static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac, u8 vlan_valid, u16 vlan) { - u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)]; - u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)]; + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; u8 *in_mac_addr;
- memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); MLX5_SET(set_l2_table_entry_in, in, table_index, index); @@@ -257,23 -174,18 +174,18 @@@ in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); ether_addr_copy(&in_mac_addr[2], mac);
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index) { - u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)]; - u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); MLX5_SET(delete_l2_table_entry_in, in, table_index, index); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - out, sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix) @@@ -340,7 -252,7 +252,7 @@@ __esw_fdb_set_vport_rule(struct mlx5_es
spec = mlx5_vzalloc(sizeof(*spec)); if (!spec) { - pr_warn("FDB: Failed to alloc match parameters\n"); + esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); return NULL; } dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, @@@ -374,8 -286,8 +286,8 @@@ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 0, &dest); if (IS_ERR(flow_rule)) { - pr_warn( - "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", + esw_warn(esw->dev, + "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); flow_rule = NULL; } @@@ -955,7 -867,7 +867,7 @@@ static void esw_update_vport_rx_mode(st esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", vport_num, promisc_all, promisc_mc);
- if (!vport->trusted || !vport->enabled) { + if (!vport->info.trusted || !vport->enabled) { promisc_uc = 0; promisc_mc = 0; promisc_all = 0; @@@ -1291,30 -1203,20 +1203,20 @@@ static int esw_vport_ingress_config(str struct mlx5_vport *vport) { struct mlx5_flow_spec *spec; - u8 smac[ETH_ALEN]; int err = 0; u8 *smac_v;
- if (vport->spoofchk) { - err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac); - if (err) { - esw_warn(esw->dev, - "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n", - vport->vport, err); - return err; - } + if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) { + mlx5_core_warn(esw->dev, + "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", + vport->vport); + return -EPERM;
- if (!is_valid_ether_addr(smac)) { - mlx5_core_warn(esw->dev, - "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", - vport->vport); - return -EPERM; - } }
esw_vport_cleanup_ingress_rules(esw, vport);
- if (!vport->vlan && !vport->qos && !vport->spoofchk) { + if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { esw_vport_disable_ingress_acl(esw, vport); return 0; } @@@ -1323,7 -1225,7 +1225,7 @@@
esw_debug(esw->dev, "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", - vport->vport, vport->vlan, vport->qos); + vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec)); if (!spec) { @@@ -1333,16 -1235,16 +1235,16 @@@ goto out; }
- if (vport->vlan || vport->qos) + if (vport->info.vlan || vport->info.qos) MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
- if (vport->spoofchk) { + if (vport->info.spoofchk) { MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); smac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.smac_47_16); - ether_addr_copy(smac_v, smac); + ether_addr_copy(smac_v, vport->info.mac); }
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; @@@ -1352,8 -1254,9 +1254,9 @@@ 0, NULL); if (IS_ERR(vport->ingress.allow_rule)) { err = PTR_ERR(vport->ingress.allow_rule); - pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure ingress allow rule, err(%d)\n", + vport->vport, err); vport->ingress.allow_rule = NULL; goto out; } @@@ -1365,8 -1268,9 +1268,9 @@@ 0, NULL); if (IS_ERR(vport->ingress.drop_rule)) { err = PTR_ERR(vport->ingress.drop_rule); - pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure ingress drop rule, err(%d)\n", + vport->vport, err); vport->ingress.drop_rule = NULL; goto out; } @@@ -1386,7 -1290,7 +1290,7 @@@ static int esw_vport_egress_config(stru
esw_vport_cleanup_egress_rules(esw, vport);
- if (!vport->vlan && !vport->qos) { + if (!vport->info.vlan && !vport->info.qos) { esw_vport_disable_egress_acl(esw, vport); return 0; } @@@ -1395,7 -1299,7 +1299,7 @@@
esw_debug(esw->dev, "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", - vport->vport, vport->vlan, vport->qos); + vport->vport, vport->info.vlan, vport->info.qos);
spec = mlx5_vzalloc(sizeof(*spec)); if (!spec) { @@@ -1409,7 -1313,7 +1313,7 @@@ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; vport->egress.allowed_vlan = @@@ -1418,8 -1322,9 +1322,9 @@@ 0, NULL); if (IS_ERR(vport->egress.allowed_vlan)) { err = PTR_ERR(vport->egress.allowed_vlan); - pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure egress allowed vlan rule failed, err(%d)\n", + vport->vport, err); vport->egress.allowed_vlan = NULL; goto out; } @@@ -1432,8 -1337,9 +1337,9 @@@ 0, NULL); if (IS_ERR(vport->egress.drop_rule)) { err = PTR_ERR(vport->egress.drop_rule); - pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "vport[%d] configure egress drop rule failed, err(%d)\n", + vport->vport, err); vport->egress.drop_rule = NULL; } out: @@@ -1441,6 -1347,41 +1347,41 @@@ return err; }
+ static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) + { + ((u8 *)node_guid)[7] = mac[0]; + ((u8 *)node_guid)[6] = mac[1]; + ((u8 *)node_guid)[5] = mac[2]; + ((u8 *)node_guid)[4] = 0xff; + ((u8 *)node_guid)[3] = 0xfe; + ((u8 *)node_guid)[2] = mac[3]; + ((u8 *)node_guid)[1] = mac[4]; + ((u8 *)node_guid)[0] = mac[5]; + } + + static void esw_apply_vport_conf(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) + { + int vport_num = vport->vport; + + if (!vport_num) + return; + + mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport_num, + vport->info.link_state); + mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac); + mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid); + modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos, + (vport->info.vlan || vport->info.qos)); + + /* Only legacy mode needs ACLs */ + if (esw->mode == SRIOV_LEGACY) { + esw_vport_ingress_config(esw, vport); + esw_vport_egress_config(esw, vport); + } + } static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { @@@ -1451,23 -1392,17 +1392,17 @@@
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- /* Only VFs need ACLs for VST and spoofchk filtering */ - if (vport_num && esw->mode == SRIOV_LEGACY) { - esw_vport_ingress_config(esw, vport); - esw_vport_egress_config(esw, vport); - } - - mlx5_modify_vport_admin_state(esw->dev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - vport_num, - MLX5_ESW_VPORT_ADMIN_STATE_AUTO); + /* Restore old vport configuration */ + esw_apply_vport_conf(esw, vport);
/* Sync with current vport context */ vport->enabled_events = enable_events; vport->enabled = true;
/* only PF is trusted by default */ - vport->trusted = (vport_num) ? false : true; + if (!vport_num) + vport->info.trusted = true; + esw_vport_change_handle_locked(vport);
esw->enabled_vports++; @@@ -1487,11 -1422,6 +1422,6 @@@ static void esw_disable_vport(struct ml vport->enabled = false;
synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC)); - - mlx5_modify_vport_admin_state(esw->dev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - vport_num, - MLX5_ESW_VPORT_ADMIN_STATE_DOWN); /* Wait for current already scheduled events to complete */ flush_workqueue(esw->work_queue); /* Disable events from this vport */ @@@ -1503,7 -1433,12 +1433,12 @@@ */ esw_vport_change_handle_locked(vport); vport->enabled_events = 0; + if (vport_num && esw->mode == SRIOV_LEGACY) { + mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport_num, + MLX5_ESW_VPORT_ADMIN_STATE_DOWN); esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); } @@@ -1554,7 -1489,6 +1489,7 @@@ int mlx5_eswitch_enable_sriov(struct ml
abort: esw_enable_vport(esw, 0, UC_ADDR_CHANGE); + esw->mode = SRIOV_NONE; return err; }
@@@ -1590,6 -1524,25 +1525,25 @@@ void mlx5_eswitch_disable_sriov(struct esw_enable_vport(esw, 0, UC_ADDR_CHANGE); }
+ void mlx5_eswitch_attach(struct mlx5_eswitch *esw) + { + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return; + + esw_enable_vport(esw, 0, UC_ADDR_CHANGE); + /* VF Vports will be enabled when SRIOV is enabled */ + } + + void mlx5_eswitch_detach(struct mlx5_eswitch *esw) + { + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || + MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return; + + esw_disable_vport(esw, 0); + } + int mlx5_eswitch_init(struct mlx5_core_dev *dev) { int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); @@@ -1657,6 -1610,7 +1611,7 @@@ struct mlx5_vport *vport = &esw->vports[vport_num];
vport->vport = vport_num; + vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO; vport->dev = dev; INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); @@@ -1667,8 -1621,6 +1622,6 @@@ esw->mode = SRIOV_NONE;
dev->priv.eswitch = esw; - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); - /* VF Vports will be enabled when SRIOV is enabled */ return 0; abort: if (esw->work_queue) @@@ -1687,7 -1639,6 +1640,6 @@@ void mlx5_eswitch_cleanup(struct mlx5_e return;
esw_info(esw->dev, "cleanup\n"); - esw_disable_vport(esw, 0);
esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); @@@ -1720,18 -1671,6 +1672,6 @@@ void mlx5_eswitch_vport_event(struct ml (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
- static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) - { - ((u8 *)node_guid)[7] = mac[0]; - ((u8 *)node_guid)[6] = mac[1]; - ((u8 *)node_guid)[5] = mac[2]; - ((u8 *)node_guid)[4] = 0xff; - ((u8 *)node_guid)[3] = 0xfe; - ((u8 *)node_guid)[2] = mac[3]; - ((u8 *)node_guid)[1] = mac[4]; - ((u8 *)node_guid)[0] = mac[5]; - } - int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int vport, u8 mac[ETH_ALEN]) { @@@ -1744,13 -1683,15 +1684,15 @@@ if (!LEGAL_VPORT(esw, vport)) return -EINVAL;
+ mutex_lock(&esw->state_lock); evport = &esw->vports[vport];
- if (evport->spoofchk && !is_valid_ether_addr(mac)) { + if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { mlx5_core_warn(esw->dev, "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", vport); - return -EPERM; + err = -EPERM; + goto unlock; }
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); @@@ -1758,7 -1699,7 +1700,7 @@@ mlx5_core_warn(esw->dev, "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", vport, err); - return err; + goto unlock; }
node_guid_gen_from_mac(&node_guid, mac); @@@ -1768,9 -1709,12 +1710,12 @@@ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", vport, err);
- mutex_lock(&esw->state_lock); + ether_addr_copy(evport->info.mac, mac); + evport->info.node_guid = node_guid; if (evport->enabled && esw->mode == SRIOV_LEGACY) err = esw_vport_ingress_config(esw, evport); + + unlock: mutex_unlock(&esw->state_lock); return err; } @@@ -1778,22 -1722,38 +1723,38 @@@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int vport, int link_state) { + struct mlx5_vport *evport; + int err = 0; + if (!ESW_ALLOWED(esw)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL;
- return mlx5_modify_vport_admin_state(esw->dev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - vport, link_state); + mutex_lock(&esw->state_lock); + evport = &esw->vports[vport]; + + err = mlx5_modify_vport_admin_state(esw->dev, + MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, + vport, link_state); + if (err) { + mlx5_core_warn(esw->dev, + "Failed to set vport %d link state, err = %d", + vport, err); + goto unlock; + } + + evport->info.link_state = link_state; + + unlock: + mutex_unlock(&esw->state_lock); + return 0; }
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi) { struct mlx5_vport *evport; - u16 vlan; - u8 qos;
if (!ESW_ALLOWED(esw)) return -EPERM; @@@ -1805,14 -1765,14 +1766,14 @@@ memset(ivi, 0, sizeof(*ivi)); ivi->vf = vport - 1;
- mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac); - ivi->linkstate = mlx5_query_vport_admin_state(esw->dev, - MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, - vport); - query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos); - ivi->vlan = vlan; - ivi->qos = qos; - ivi->spoofchk = evport->spoofchk; + mutex_lock(&esw->state_lock); + ether_addr_copy(ivi->mac, evport->info.mac); + ivi->linkstate = evport->info.link_state; + ivi->vlan = evport->info.vlan; + ivi->qos = evport->info.qos; + ivi->spoofchk = evport->info.spoofchk; + ivi->trusted = evport->info.trusted; + mutex_unlock(&esw->state_lock);
return 0; } @@@ -1832,23 -1792,23 +1793,23 @@@ int mlx5_eswitch_set_vport_vlan(struct if (vlan || qos) set = 1;
+ mutex_lock(&esw->state_lock); evport = &esw->vports[vport];
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); if (err) - return err; + goto unlock;
- mutex_lock(&esw->state_lock); - evport->vlan = vlan; - evport->qos = qos; + evport->info.vlan = vlan; + evport->info.qos = qos; if (evport->enabled && esw->mode == SRIOV_LEGACY) { err = esw_vport_ingress_config(esw, evport); if (err) - goto out; + goto unlock; err = esw_vport_egress_config(esw, evport); }
- out: + unlock: mutex_unlock(&esw->state_lock); return err; } @@@ -1865,16 -1825,14 +1826,14 @@@ int mlx5_eswitch_set_vport_spoofchk(str if (!LEGAL_VPORT(esw, vport)) return -EINVAL;
- evport = &esw->vports[vport]; - mutex_lock(&esw->state_lock); - pschk = evport->spoofchk; - evport->spoofchk = spoofchk; - if (evport->enabled && esw->mode == SRIOV_LEGACY) { + evport = &esw->vports[vport]; + pschk = evport->info.spoofchk; + evport->info.spoofchk = spoofchk; + if (evport->enabled && esw->mode == SRIOV_LEGACY) err = esw_vport_ingress_config(esw, evport); - if (err) - evport->spoofchk = pschk; - } + if (err) + evport->info.spoofchk = pschk; mutex_unlock(&esw->state_lock);
return err; @@@ -1890,10 -1848,9 +1849,9 @@@ int mlx5_eswitch_set_vport_trust(struc if (!LEGAL_VPORT(esw, vport)) return -EINVAL;
- evport = &esw->vports[vport]; - mutex_lock(&esw->state_lock); - evport->trusted = setting; + evport = &esw->vports[vport]; + evport->info.trusted = setting; if (evport->enabled) esw_vport_change_handle_locked(evport); mutex_unlock(&esw->state_lock); @@@ -1906,7 -1863,7 +1864,7 @@@ int mlx5_eswitch_get_vport_stats(struc struct ifla_vf_stats *vf_stats) { int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; int err = 0; u32 *out;
@@@ -1919,8 -1876,6 +1877,6 @@@ if (!out) return -ENOMEM;
- memset(in, 0, sizeof(in)); - MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER); MLX5_SET(query_vport_counter_in, in, op_mod, 0); diff --combined drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 287ade1,7a0415e..113c323 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@@ -41,10 -41,8 +41,8 @@@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft) { - u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]; - u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
MLX5_SET(set_flow_table_root_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); @@@ -55,30 -53,23 +53,23 @@@ MLX5_SET(set_flow_table_root_in, in, other_vport, 1); }
- memset(out, 0, sizeof(out)); - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, u16 vport, + enum fs_flow_table_op_mod op_mod, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id) { - u32 out[MLX5_ST_SZ_DW(create_flow_table_out)]; - u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]; + u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0}; int err;
- memset(in, 0, sizeof(in)); - MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
- if (next_ft) { - MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); - MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); - } MLX5_SET(create_flow_table_in, in, table_type, type); MLX5_SET(create_flow_table_in, in, level, level); MLX5_SET(create_flow_table_in, in, log_size, log_size); @@@ -87,10 -78,23 +78,23 @@@ MLX5_SET(create_flow_table_in, in, other_vport, 1); }
- memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + switch (op_mod) { + case FS_FT_OP_MOD_NORMAL: + if (next_ft) { + MLX5_SET(create_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id); + } + break; + + case FS_FT_OP_MOD_LAG_DEMUX: + MLX5_SET(create_flow_table_in, in, op_mod, 0x1); + if (next_ft) + MLX5_SET(create_flow_table_in, in, lag_master_next_table_id, + next_ft->id); + break; + }
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (!err) *table_id = MLX5_GET(create_flow_table_out, out, table_id); @@@ -100,11 -104,8 +104,8 @@@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft) { - u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
MLX5_SET(destroy_flow_table_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE); @@@ -115,39 -116,49 +116,49 @@@ MLX5_SET(destroy_flow_table_in, in, other_vport, 1); }
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { - u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]; - u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
MLX5_SET(modify_flow_table_in, in, opcode, MLX5_CMD_OP_MODIFY_FLOW_TABLE); MLX5_SET(modify_flow_table_in, in, table_type, ft->type); MLX5_SET(modify_flow_table_in, in, table_id, ft->id); - if (ft->vport) { - MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); - MLX5_SET(modify_flow_table_in, in, other_vport, 1); - } - MLX5_SET(modify_flow_table_in, in, modify_field_select, - MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); - if (next_ft) { - MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); - MLX5_SET(modify_flow_table_in, in, table_miss_id, next_ft->id); + + if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) { + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, + lag_master_next_table_id, next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, + lag_master_next_table_id, 0); + } } else { - MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + if (ft->vport) { + MLX5_SET(modify_flow_table_in, in, vport_number, + ft->vport); + MLX5_SET(modify_flow_table_in, in, other_vport, 1); + } + MLX5_SET(modify_flow_table_in, in, modify_field_select, + MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); + if (next_ft) { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1); + MLX5_SET(modify_flow_table_in, in, table_miss_id, + next_ft->id); + } else { + MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0); + } }
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, @@@ -155,12 -166,10 +166,10 @@@ u32 *in, unsigned int *group_id) { + u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - u32 out[MLX5_ST_SZ_DW(create_flow_group_out)]; int err;
- memset(out, 0, sizeof(out)); - MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP); MLX5_SET(create_flow_group_in, in, table_type, ft->type); @@@ -170,13 -179,10 +179,10 @@@ MLX5_SET(create_flow_group_in, in, other_vport, 1); }
- err = mlx5_cmd_exec_check_status(dev, in, - inlen, out, - sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); if (!err) *group_id = MLX5_GET(create_flow_group_out, out, group_id); - return err; }
@@@ -184,11 -190,8 +190,8 @@@ int mlx5_cmd_destroy_flow_group(struct struct mlx5_flow_table *ft, unsigned int group_id) { - u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)]; - u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
MLX5_SET(destroy_flow_group_in, in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP); @@@ -200,8 -203,7 +203,7 @@@ MLX5_SET(destroy_flow_group_in, in, other_vport, 1); }
- return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, @@@ -212,7 -214,7 +214,7 @@@ { unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct); - u32 out[MLX5_ST_SZ_DW(set_fte_out)]; + u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; struct mlx5_flow_rule *dst; void *in_flow_context; void *in_match_value; @@@ -290,11 -292,8 +292,8 @@@ list_size); }
- memset(out, 0, sizeof(out)); - err = mlx5_cmd_exec_check_status(dev, in, inlen, out, - sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); kvfree(in); - return err; }
@@@ -303,7 -302,7 +302,7 @@@ int mlx5_cmd_create_fte(struct mlx5_cor unsigned group_id, struct fs_fte *fte) { - return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); + return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); }
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, @@@ -327,12 -326,8 +326,8 @@@ int mlx5_cmd_delete_fte(struct mlx5_cor struct mlx5_flow_table *ft, unsigned int index) { - u32 out[MLX5_ST_SZ_DW(delete_fte_out)]; - u32 in[MLX5_ST_SZ_DW(delete_fte_in)]; - int err; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); MLX5_SET(delete_fte_in, in, table_type, ft->type); @@@ -343,74 -338,55 +338,55 @@@ MLX5_SET(delete_fte_in, in, other_vport, 1); }
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); - - return err; + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) { - u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]; - u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; int err;
- memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(alloc_flow_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
- err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); - if (err) - return err; - - *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); - - return 0; + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); + return err; }
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) { - u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]; - u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)]; - - memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); + u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
MLX5_SET(dealloc_flow_counter_in, in, opcode, MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, - sizeof(out)); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); }
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, u64 *packets, u64 *bytes) { u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + - MLX5_ST_SZ_BYTES(traffic_counter)]; - u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; + MLX5_ST_SZ_BYTES(traffic_counter)] = {0}; + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0}; void *stats; int err = 0;
- memset(in, 0, sizeof(in)); - memset(out, 0, sizeof(out)); - MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, flow_counter_id, id); - - err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) return err;
stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics); *packets = MLX5_GET64(traffic_counter, stats, packets); *bytes = MLX5_GET64(traffic_counter, stats, octets); - return 0; }
@@@ -425,11 -401,11 +401,11 @@@ struct mlx5_cmd_fc_bulk mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) { struct mlx5_cmd_fc_bulk *b; - int outlen = sizeof(*b) + + int outlen = MLX5_ST_SZ_BYTES(query_flow_counter_out) + MLX5_ST_SZ_BYTES(traffic_counter) * num;
- b = kzalloc(outlen, GFP_KERNEL); + b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL); if (!b) return NULL;
@@@ -448,18 -424,14 +424,14 @@@ void mlx5_cmd_fc_bulk_free(struct mlx5_ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) { - u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; - - memset(in, 0, sizeof(in)); + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode, MLX5_CMD_OP_QUERY_FLOW_COUNTER); MLX5_SET(query_flow_counter_in, in, op_mod, 0); MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); - - return mlx5_cmd_exec_check_status(dev, in, sizeof(in), - b->out, b->outlen); + return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen); }
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, @@@ -480,3 -452,51 +452,51 @@@ *packets = MLX5_GET64(traffic_counter, stats, packets); *bytes = MLX5_GET64(traffic_counter, stats, octets); } + + #define MAX_ENCAP_SIZE (128) + + int mlx5_cmd_alloc_encap(struct mlx5_core_dev *dev, + int header_type, + size_t size, + void *encap_header, + u32 *encap_id) + { + u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)]; + u32 in[MLX5_ST_SZ_DW(alloc_encap_header_in) + + (MAX_ENCAP_SIZE / sizeof(u32))]; + void *encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, + encap_header); + void *header = MLX5_ADDR_OF(encap_header_in, encap_header_in, + encap_header); + int inlen = header - (void *)in + size; + int err; + + if (size > MAX_ENCAP_SIZE) + return -EINVAL; + + memset(in, 0, inlen); + MLX5_SET(alloc_encap_header_in, in, opcode, + MLX5_CMD_OP_ALLOC_ENCAP_HEADER); + MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size); + MLX5_SET(encap_header_in, encap_header_in, header_type, header_type); + memcpy(header, encap_header, size); + + memset(out, 0, sizeof(out)); + err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); + + *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id); + return err; + } + + void mlx5_cmd_dealloc_encap(struct mlx5_core_dev *dev, u32 encap_id) + { + u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(dealloc_encap_header_in, in, opcode, + MLX5_CMD_OP_DEALLOC_ENCAP_HEADER); + MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id); + + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); + } diff --combined drivers/net/ethernet/qlogic/qed/qed_mcp.c index f776a77,7d39cb9..bdc9ba9 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@@ -54,8 -54,7 +54,7 @@@ bool qed_mcp_is_init(struct qed_hwfn *p return true; }
- void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) + void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_PORT); @@@ -68,8 -67,7 +67,7 @@@ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); }
- void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) + void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); u32 tmp, i; @@@ -99,8 -97,7 +97,7 @@@ int qed_mcp_free(struct qed_hwfn *p_hwf return 0; }
- static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) + static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info = p_hwfn->mcp_info; u32 drv_mb_offsize, mfw_mb_offsize; @@@ -143,8 -140,7 +140,7 @@@ return 0; }
- int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) + int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_info *p_info; u32 size; @@@ -165,9 -161,7 +161,7 @@@
size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); - p_info->mfw_mb_shadow = - kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS( - p_info->mfw_mb_length), GFP_KERNEL); + p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err;
@@@ -177,7 -171,6 +171,6 @@@ return 0;
err: - DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n"); qed_mcp_free(p_hwfn); return -ENOMEM; } @@@ -189,8 -182,7 +182,7 @@@ * access is achieved by setting a blocking flag, which will fail other * competing contexts to send their mailboxes. */ - static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, - u32 cmd) + static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, u32 cmd) { spin_lock_bh(&p_hwfn->mcp_info->lock);
@@@ -221,15 -213,13 +213,13 @@@ return 0; }
- static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, - u32 cmd) + static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, u32 cmd) { if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) spin_unlock_bh(&p_hwfn->mcp_info->lock); }
- int qed_mcp_reset(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) + int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; u8 delay = CHIP_MCP_RESP_ITER_US; @@@ -326,7 -316,8 +316,8 @@@ static int qed_do_mcp_cmd(struct qed_hw *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); } else { /* FW BUG! */ - DP_ERR(p_hwfn, "MFW failed to respond!\n"); + DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n", + cmd, param); *o_mcp_resp = 0; rc = -EAGAIN; } @@@ -342,7 -333,7 +333,7 @@@ static int qed_mcp_cmd_and_union(struc
/* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; }
@@@ -398,9 -389,36 +389,36 @@@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn return 0; }
+ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) + { + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + mb_params.p_data_dst = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + *o_txn_size = *o_mcp_param; + memcpy(o_buf, &union_data.raw_data, *o_txn_size); + + return 0; + } + int qed_mcp_load_req(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *p_load_code) + struct qed_ptt *p_ptt, u32 *p_load_code) { struct qed_dev *cdev = p_hwfn->cdev; struct qed_mcp_mb_params mb_params; @@@ -527,8 -545,7 +545,7 @@@ static void qed_mcp_handle_transceiver_ "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, - transceiver_data))); + offsetof(struct public_port, transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE); @@@ -540,8 -557,7 +557,7 @@@ }
static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool b_reset) + struct qed_ptt *p_ptt, bool b_reset) { struct qed_mcp_link_state *p_link; u8 max_bw, min_bw; @@@ -557,8 -573,7 +573,7 @@@ "Received link update [0x%08x] from mfw [Addr 0x%x]\n", status, (u32)(p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, - link_status))); + offsetof(struct public_port, link_status))); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link indications\n"); @@@ -635,6 -650,9 +650,9 @@@ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? QED_LINK_PARTNER_SPEED_20G : 0; p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_25G : 0; + p_link->partner_adv_speed |= (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? QED_LINK_PARTNER_SPEED_40G : 0; p_link->partner_adv_speed |= @@@ -722,6 -740,48 +740,48 @@@ int qed_mcp_set_link(struct qed_hwfn *p return 0; }
+ static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum MFW_DRV_MSG_TYPE type) + { + enum qed_mcp_protocol_type stats_type; + union qed_mcp_protocol_stats stats; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + u32 hsi_param; + + switch (type) { + case MFW_DRV_MSG_GET_LAN_STATS: + stats_type = QED_MCP_LAN_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; + break; + case MFW_DRV_MSG_GET_FCOE_STATS: + stats_type = QED_MCP_FCOE_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; + break; + case MFW_DRV_MSG_GET_ISCSI_STATS: + stats_type = QED_MCP_ISCSI_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; + break; + case MFW_DRV_MSG_GET_RDMA_STATS: + stats_type = QED_MCP_RDMA_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; + break; + default: + DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type); + return; + } + + qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats); + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_STATS; + mb_params.param = hsi_param; + memcpy(&union_data, &stats, sizeof(stats)); + mb_params.p_data_src = &union_data; + qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + } + static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, struct public_func *p_shmem_info) { @@@ -752,8 -812,7 +812,7 @@@
static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct public_func *p_data, - int pfid) + struct public_func *p_data, int pfid) { u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, PUBLIC_FUNC); @@@ -763,51 -822,20 +822,20 @@@
memset(p_data, 0, sizeof(*p_data));
- size = min_t(u32, sizeof(*p_data), - QED_SECTION_SIZE(mfw_path_offsize)); + size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); for (i = 0; i < size / sizeof(u32); i++) ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, func_addr + (i << 2)); return size; }
- int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *p_pf) - { - struct public_func shmem_info; - int i; - - /* Find first Ethernet interface in port */ - for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev); - i += p_hwfn->cdev->num_ports_in_engines) { - qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID_BY_REL(p_hwfn, i)); - - if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) - continue; - - if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) == - FUNC_MF_CFG_PROTOCOL_ETHERNET) { - *p_pf = (u8)i; - return 0; - } - } - - DP_NOTICE(p_hwfn, - "Failed to find on port an ethernet interface in MF_SI mode\n"); - - return -EINVAL; - } - - static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) + static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_function_info *p_info; struct public_func shmem_info; u32 resp = 0, param = 0;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID(p_hwfn)); + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
qed_read_pf_bandwidth(p_hwfn, &shmem_info);
@@@ -867,6 -895,12 +895,12 @@@ int qed_mcp_handle_events(struct qed_hw case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_GET_LAN_STATS: + case MFW_DRV_MSG_GET_FCOE_STATS: + case MFW_DRV_MSG_GET_ISCSI_STATS: + case MFW_DRV_MSG_GET_RDMA_STATS: + qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i); + break; case MFW_DRV_MSG_BW_UPDATE: qed_mcp_update_bw(p_hwfn, p_ptt); break; @@@ -940,8 -974,7 +974,7 @@@ int qed_mcp_get_mfw_ver(struct qed_hwf return 0; }
- int qed_mcp_get_media_type(struct qed_dev *cdev, - u32 *p_media_type) + int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) { struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_ptt *p_ptt; @@@ -950,7 -983,7 +983,7 @@@ return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); return -EBUSY; }
@@@ -1003,15 -1036,13 +1036,13 @@@ int qed_mcp_fill_shmem_func_info(struc struct qed_mcp_function_info *info; struct public_func shmem_info;
- qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, - MCP_PF_ID(p_hwfn)); + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); info = &p_hwfn->mcp_info->func_info;
info->pause_on_host = (shmem_info.config & FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, - &info->protocol)) { + if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) { DP_ERR(p_hwfn, "Unknown personality %08x\n", (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); return -EINVAL; @@@ -1072,15 -1103,13 +1103,13 @@@ struct qed_mcp_link_capabilitie return &p_hwfn->mcp_info->link_capabilities; }
- int qed_mcp_drain(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) + int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 resp = 0, param = 0; int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NIG_DRAIN, 1000, - &resp, ¶m); + DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
/* Wait for the drain to complete before returning */ msleep(1020); @@@ -1089,8 -1118,7 +1118,7 @@@ }
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 *p_flash_size) + struct qed_ptt *p_ptt, u32 *p_flash_size) { u32 flash_size;
@@@ -1153,8 -1181,8 +1181,8 @@@ qed_mcp_send_drv_version(struct qed_hwf p_drv_version = &union_data.drv_version; p_drv_version->version = p_ver->version;
- for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { - val = cpu_to_be32(p_ver->name[i]); + for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { + val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; }
@@@ -1168,8 -1196,35 +1196,35 @@@ return rc; }
- int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - enum qed_led_mode mode) + int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) + { + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, + ¶m); + if (rc) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; + } + + int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) + { + u32 value, cpu_mode; + + qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); + + value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + value &= ~MCP_REG_CPU_MODE_SOFT_HALT; + qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); + cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + + return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0; + } + + int qed_mcp_set_led(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_led_mode mode) { u32 resp = 0, param = 0, drv_mb_param; int rc; @@@ -1195,6 -1250,27 +1250,27 @@@ return rc; }
+ int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 mask_parities) + { + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, + mask_parities, &resp, ¶m); + + if (rc) { + DP_ERR(p_hwfn, + "MCP response failure for mask parities, aborting\n"); + } else if (resp != FW_MSG_CODE_OK) { + DP_ERR(p_hwfn, + "MCP did not acknowledge mask parity request. Old MFW?\n"); + rc = -EINVAL; + } + + return rc; + } + int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 drv_mb_param = 0, rsp, param; diff --combined drivers/net/ethernet/stmicro/stmmac/Kconfig index 54de175,c732b8c..3818c5e --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@@ -61,13 -61,13 +61,13 @@@ config DWMAC_LPC18X config DWMAC_MESON tristate "Amlogic Meson dwmac support" default ARCH_MESON - depends on OF && (ARCH_MESON || COMPILE_TEST) + depends on OF && COMMON_CLK && (ARCH_MESON || COMPILE_TEST) help Support for Ethernet controller on Amlogic Meson SoCs.
This selects the Amlogic Meson SoC glue layer support for - the stmmac device driver. This driver is used for Meson6 and - Meson8 SoCs. + the stmmac device driver. This driver is used for Meson6, + Meson8, Meson8b and GXBB SoCs.
config DWMAC_ROCKCHIP tristate "Rockchip dwmac support" @@@ -104,6 -104,18 +104,18 @@@ config DWMAC_ST device driver. This driver is used on for the STi series SOCs GMAC ethernet controller.
+ config DWMAC_STM32 + tristate "STM32 DWMAC support" + default ARCH_STM32 + depends on OF && HAS_IOMEM + select MFD_SYSCON + ---help--- + Support for ethernet controller on STM32 SOCs. + + This selects STM32 SoC glue layer support for the stmmac + device driver. This driver is used on for the STM32 series + SOCs GMAC ethernet controller. + config DWMAC_SUNXI tristate "Allwinner GMAC support" default ARCH_SUNXI diff --combined drivers/net/ethernet/stmicro/stmmac/Makefile index f77edb9,f0c9396..5d6ece5 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@@ -9,10 -9,11 +9,11 @@@ stmmac-objs:= stmmac_main.o stmmac_etht obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o -obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o +obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o + obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o stmmac-platform-objs:= stmmac_platform.o diff --combined drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 885a5e6,6f6bbc5..7df4ff1 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@@ -145,7 -145,7 +145,7 @@@ static void dwmac1000_set_mchash(void _ numhashregs = 8; break; default: - pr_debug("STMMAC: err in setting mulitcast filter\n"); + pr_debug("STMMAC: err in setting multicast filter\n"); return; break; } @@@ -261,7 -261,7 +261,7 @@@ static void dwmac1000_pmt(struct mac_de } if (mode & WAKE_UCAST) { pr_debug("GMAC: WOL on global unicast\n"); - pmt |= global_unicast; + pmt |= power_down | global_unicast | wake_up_frame_en; }
writel(pmt, ioaddr + GMAC_PMT); diff --combined drivers/net/wireless/intel/iwlwifi/mvm/tx.c index b3a87a3,8b91544..f915024 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@@ -513,15 -513,6 +513,15 @@@ int iwl_mvm_tx_skb_non_sta(struct iwl_m int hdrlen = ieee80211_hdrlen(hdr->frame_control); int queue;
+ /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used + * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel + * queue. STATION (HS2.0) uses the auxiliary context of the FW, + * and hence needs to be sent on the aux queue + */ + if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + skb_info->control.vif->type == NL80211_IFTYPE_STATION) + IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; + memcpy(&info, skb->cb, sizeof(info));
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU)) @@@ -535,6 -526,16 +535,6 @@@ /* This holds the amsdu headers length */ skb_info->driver_data[0] = (void *)(uintptr_t)0;
- /* - * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used - * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel - * queue. STATION (HS2.0) uses the auxiliary context of the FW, - * and hence needs to be sent on the aux queue - */ - if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && - info.control.vif->type == NL80211_IFTYPE_STATION) - IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue; - queue = info.hw_queue;
/* @@@ -837,6 -838,22 +837,22 @@@ static void iwl_mvm_tx_add_stream(struc } }
+ /* Check if there are any timed-out TIDs on a given shared TXQ */ + static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id) + { + unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap; + unsigned long now = jiffies; + int tid; + + for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) { + if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] + + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) + return true; + } + + return false; + } + /* * Sets the fields in the Tx cmd that are crypto related */ @@@ -939,7 -956,6 +955,6 @@@ static int iwl_mvm_tx_mpdu(struct iwl_m iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return 0; - }
/* If we are here - TXQ exists and needs to be re-activated */ @@@ -952,8 -968,25 +967,25 @@@ txq_id); }
- /* Keep track of the time of the last frame for this RA/TID */ - mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; + if (iwl_mvm_is_dqa_supported(mvm)) { + /* Keep track of the time of the last frame for this RA/TID */ + mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; + + /* + * If we have timed-out TIDs - schedule the worker that will + * reconfig the queues and update them + * + * Note that the mvm->queue_info_lock isn't being taken here in + * order to not serialize the TX flow. This isn't dangerous + * because scheduling mvm->add_stream_wk can't ruin the state, + * and if we DON'T schedule it due to some race condition then + * next TX we get here we will. + */ + if (unlikely(mvm->queue_info[txq_id].status == + IWL_MVM_QUEUE_SHARED && + iwl_mvm_txq_should_update(mvm, txq_id))) + schedule_work(&mvm->add_stream_wk); + }
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); diff --combined drivers/net/xen-netback/xenbus.c index daf4c78,bacf6e0..9911b4e --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@@ -165,7 -165,7 +165,7 @@@ xenvif_write_io_ring(struct file *filp return count; }
- static int xenvif_dump_open(struct inode *inode, struct file *filp) + static int xenvif_io_ring_open(struct inode *inode, struct file *filp) { int ret; void *queue = NULL; @@@ -179,13 -179,35 +179,35 @@@
static const struct file_operations xenvif_dbg_io_ring_ops_fops = { .owner = THIS_MODULE, - .open = xenvif_dump_open, + .open = xenvif_io_ring_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = xenvif_write_io_ring, };
+ static int xenvif_read_ctrl(struct seq_file *m, void *v) + { + struct xenvif *vif = m->private; + + xenvif_dump_hash_info(vif, m); + + return 0; + } + + static int xenvif_ctrl_open(struct inode *inode, struct file *filp) + { + return single_open(filp, xenvif_read_ctrl, inode->i_private); + } + + static const struct file_operations xenvif_dbg_ctrl_ops_fops = { + .owner = THIS_MODULE, + .open = xenvif_ctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + }; + static void xenvif_debugfs_addif(struct xenvif *vif) { struct dentry *pfile; @@@ -210,6 -232,17 +232,17 @@@ pr_warn("Creation of io_ring file returned %ld!\n", PTR_ERR(pfile)); } + + if (vif->ctrl_task) { + pfile = debugfs_create_file("ctrl", + S_IRUSR, + vif->xenvif_dbg_root, + vif, + &xenvif_dbg_ctrl_ops_fops); + if (IS_ERR_OR_NULL(pfile)) + pr_warn("Creation of ctrl file returned %ld!\n", + PTR_ERR(pfile)); + } } else netdev_warn(vif->dev, "Creation of vif debugfs dir returned %ld!\n", @@@ -271,11 -304,6 +304,11 @@@ static int netback_probe(struct xenbus_ be->dev = dev; dev_set_drvdata(&dev->dev, be);
+ be->state = XenbusStateInitialising; + err = xenbus_switch_state(dev, XenbusStateInitialising); + if (err) + goto fail; + sg = 1;
do { @@@ -388,6 -416,11 +421,6 @@@
be->hotplug_script = script;
- err = xenbus_switch_state(dev, XenbusStateInitWait); - if (err) - goto fail; - - be->state = XenbusStateInitWait;
/* This kicks hotplug scripts, so do it immediately. */ err = backend_create_xenvif(be); @@@ -492,20 -525,20 +525,20 @@@ static inline void backend_switch_state
/* Handle backend state transitions: * - * The backend state starts in InitWait and the following transitions are + * The backend state starts in Initialising and the following transitions are * allowed. * - * InitWait -> Connected - * - * ^ \ | - * | \ | - * | \ | - * | \ | - * | \ | - * | \ | - * | V V + * Initialising -> InitWait -> Connected + * \ + * \ ^ \ | + * \ | \ | + * \ | \ | + * \ | \ | + * \ | \ | + * \ | \ | + * V | V V * - * Closed <-> Closing + * Closed <-> Closing * * The state argument specifies the eventual state of the backend and the * function transitions to that state via the shortest path. @@@ -515,20 -548,6 +548,20 @@@ static void set_backend_state(struct ba { while (be->state != state) { switch (be->state) { + case XenbusStateInitialising: + switch (state) { + case XenbusStateInitWait: + case XenbusStateConnected: + case XenbusStateClosing: + backend_switch_state(be, XenbusStateInitWait); + break; + case XenbusStateClosed: + backend_switch_state(be, XenbusStateClosed); + break; + default: + BUG(); + } + break; case XenbusStateClosed: switch (state) { case XenbusStateInitWait: diff --combined include/net/sock.h index 8741988,c797c57..ebf75db --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -1020,7 -1020,6 +1020,6 @@@ struct proto void (*unhash)(struct sock *sk); void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); - void (*clear_sk)(struct sock *sk, int size);
/* Keeping track of sockets in use */ #ifdef CONFIG_PROC_FS @@@ -1114,6 -1113,16 +1113,16 @@@ static inline bool sk_stream_is_writeab sk_stream_memory_free(sk); }
+ static inline int sk_under_cgroup_hierarchy(struct sock *sk, + struct cgroup *ancestor) + { + #ifdef CONFIG_SOCK_CGROUP_DATA + return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), + ancestor); + #else + return -ENOTSUPP; + #endif + }
static inline bool sk_has_memory_pressure(const struct sock *sk) { @@@ -1232,8 -1241,6 +1241,6 @@@ static inline int __sk_prot_rehash(stru return sk->sk_prot->hash(sk); }
- void sk_prot_clear_portaddr_nulls(struct sock *sk, int size); - /* About 10 seconds */ #define SOCK_DESTROY_TIME (10*HZ)
@@@ -1332,16 -1339,6 +1339,16 @@@ static inline void sk_mem_uncharge(stru if (!sk_has_account(sk)) return; sk->sk_forward_alloc += size; + + /* Avoid a possible overflow. + * TCP send queues can make this happen, if sk_mem_reclaim() + * is not called and more than 2 GBytes are released at once. + * + * If we reach 2 MBytes, reclaim 1 MBytes right now, there is + * no need to hold that much forward allocation anyway. + */ + if (unlikely(sk->sk_forward_alloc >= 1 << 21)) + __sk_mem_reclaim(sk, 1 << 20); }
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) diff --combined kernel/events/core.c index a54f2c2,a7b8c1c..9fc3be0 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@@ -2496,11 -2496,11 +2496,11 @@@ static int __perf_event_stop(void *info return 0; }
-static int perf_event_restart(struct perf_event *event) +static int perf_event_stop(struct perf_event *event, int restart) { struct stop_event_data sd = { .event = event, - .restart = 1, + .restart = restart, }; int ret = 0;
@@@ -3549,18 -3549,10 +3549,18 @@@ static int perf_event_read(struct perf_ .group = group, .ret = 0, }; - ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1); - /* The event must have been read from an online CPU: */ - WARN_ON_ONCE(ret); - ret = ret ? : data.ret; + /* + * Purposely ignore the smp_call_function_single() return + * value. + * + * If event->oncpu isn't a valid CPU it means the event got + * scheduled out and that will have updated the event count. + * + * Therefore, either way, we'll have an up-to-date event count + * after this. + */ + (void)smp_call_function_single(event->oncpu, __perf_event_read, &data, 1); + ret = data.ret; } else if (event->state == PERF_EVENT_STATE_INACTIVE) { struct perf_event_context *ctx = event->ctx; unsigned long flags; @@@ -4845,19 -4837,6 +4845,19 @@@ static void ring_buffer_attach(struct p spin_unlock_irqrestore(&rb->event_lock, flags); }
+ /* + * Avoid racing with perf_mmap_close(AUX): stop the event + * before swizzling the event::rb pointer; if it's getting + * unmapped, its aux_mmap_count will be 0 and it won't + * restart. See the comment in __perf_pmu_output_stop(). + * + * Data will inevitably be lost when set_output is done in + * mid-air, but then again, whoever does it like this is + * not in for the data anyway. + */ + if (has_aux(event)) + perf_event_stop(event, 0); + rcu_assign_pointer(event->rb, rb);
if (old_rb) { @@@ -6133,7 -6112,7 +6133,7 @@@ static void perf_event_addr_filters_exe raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (restart) - perf_event_restart(event); + perf_event_stop(event, 1); }
void perf_event_exec(void) @@@ -6177,13 -6156,7 +6177,13 @@@ static void __perf_event_output_stop(st
/* * In case of inheritance, it will be the parent that links to the - * ring-buffer, but it will be the child that's actually using it: + * ring-buffer, but it will be the child that's actually using it. + * + * We are using event::rb to determine if the event should be stopped, + * however this may race with ring_buffer_attach() (through set_output), + * which will make us skip the event that actually needs to be stopped. + * So ring_buffer_attach() has to stop an aux event before re-assigning + * its rb pointer. */ if (rcu_dereference(parent->rb) == rb) ro->err = __perf_event_stop(&sd); @@@ -6697,7 -6670,7 +6697,7 @@@ static void __perf_addr_filters_adjust( raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (restart) - perf_event_restart(event); + perf_event_stop(event, 1); }
/* @@@ -7049,7 -7022,7 +7049,7 @@@ static int __perf_event_overflow(struc irq_work_queue(&event->pending); }
- event->overflow_handler(event, data, regs); + READ_ONCE(event->overflow_handler)(event, data, regs);
if (*perf_event_fasync(event) && event->pending_kill) { event->pending_wakeup = 1; @@@ -7664,11 -7637,83 +7664,83 @@@ static void perf_event_free_filter(stru ftrace_profile_free_filter(event); }
+ #ifdef CONFIG_BPF_SYSCALL + static void bpf_overflow_handler(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) + { + struct bpf_perf_event_data_kern ctx = { + .data = data, + .regs = regs, + }; + int ret = 0; + + preempt_disable(); + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) + goto out; + rcu_read_lock(); + ret = BPF_PROG_RUN(event->prog, (void *)&ctx); + rcu_read_unlock(); + out: + __this_cpu_dec(bpf_prog_active); + preempt_enable(); + if (!ret) + return; + + event->orig_overflow_handler(event, data, regs); + } + + static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) + { + struct bpf_prog *prog; + + if (event->overflow_handler_context) + /* hw breakpoint or kernel counter */ + return -EINVAL; + + if (event->prog) + return -EEXIST; + + prog = bpf_prog_get_type(prog_fd, BPF_PROG_TYPE_PERF_EVENT); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + event->prog = prog; + event->orig_overflow_handler = READ_ONCE(event->overflow_handler); + WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); + return 0; + } + + static void perf_event_free_bpf_handler(struct perf_event *event) + { + struct bpf_prog *prog = event->prog; + + if (!prog) + return; + + WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler); + event->prog = NULL; + bpf_prog_put(prog); + } + #else + static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) + { + return -EOPNOTSUPP; + } + static void perf_event_free_bpf_handler(struct perf_event *event) + { + } + #endif + static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) { bool is_kprobe, is_tracepoint; struct bpf_prog *prog;
+ if (event->attr.type == PERF_TYPE_HARDWARE || + event->attr.type == PERF_TYPE_SOFTWARE) + return perf_event_set_bpf_handler(event, prog_fd); + if (event->attr.type != PERF_TYPE_TRACEPOINT) return -EINVAL;
@@@ -7709,6 -7754,8 +7781,8 @@@ static void perf_event_free_bpf_prog(st { struct bpf_prog *prog;
+ perf_event_free_bpf_handler(event); + if (!event->tp_event) return;
@@@ -7886,7 -7933,7 +7960,7 @@@ static void perf_event_addr_filters_app mmput(mm);
restart: - perf_event_restart(event); + perf_event_stop(event, 1); }
/* @@@ -9025,6 -9072,19 +9099,19 @@@ perf_event_alloc(struct perf_event_att if (!overflow_handler && parent_event) { overflow_handler = parent_event->overflow_handler; context = parent_event->overflow_handler_context; + #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING) + if (overflow_handler == bpf_overflow_handler) { + struct bpf_prog *prog = bpf_prog_inc(parent_event->prog); + + if (IS_ERR(prog)) { + err = PTR_ERR(prog); + goto err_ns; + } + event->prog = prog; + event->orig_overflow_handler = + parent_event->orig_overflow_handler; + } + #endif }
if (overflow_handler) { diff --combined net/batman-adv/routing.c index 3d19947,610f2c4..7e8dc64 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@@ -74,11 -74,23 +74,23 @@@ static void _batadv_update_route(struc if (!orig_ifinfo) return;
- rcu_read_lock(); - curr_router = rcu_dereference(orig_ifinfo->router); - if (curr_router && !kref_get_unless_zero(&curr_router->refcount)) - curr_router = NULL; - rcu_read_unlock(); + spin_lock_bh(&orig_node->neigh_list_lock); + /* curr_router used earlier may not be the current orig_ifinfo->router + * anymore because it was dereferenced outside of the neigh_list_lock + * protected region. After the new best neighbor has replace the current + * best neighbor the reference counter needs to decrease. Consequently, + * the code needs to ensure the curr_router variable contains a pointer + * to the replaced best neighbor. + */ + curr_router = rcu_dereference_protected(orig_ifinfo->router, true); + + /* increase refcount of new best neighbor */ + if (neigh_node) + kref_get(&neigh_node->refcount); + + rcu_assign_pointer(orig_ifinfo->router, neigh_node); + spin_unlock_bh(&orig_node->neigh_list_lock); + batadv_orig_ifinfo_put(orig_ifinfo);
/* route deleted */ if ((curr_router) && (!neigh_node)) { @@@ -100,27 -112,6 +112,6 @@@ curr_router->addr); }
- if (curr_router) - batadv_neigh_node_put(curr_router); - - spin_lock_bh(&orig_node->neigh_list_lock); - /* curr_router used earlier may not be the current orig_ifinfo->router - * anymore because it was dereferenced outside of the neigh_list_lock - * protected region. After the new best neighbor has replace the current - * best neighbor the reference counter needs to decrease. Consequently, - * the code needs to ensure the curr_router variable contains a pointer - * to the replaced best neighbor. - */ - curr_router = rcu_dereference_protected(orig_ifinfo->router, true); - - /* increase refcount of new best neighbor */ - if (neigh_node) - kref_get(&neigh_node->refcount); - - rcu_assign_pointer(orig_ifinfo->router, neigh_node); - spin_unlock_bh(&orig_node->neigh_list_lock); - batadv_orig_ifinfo_put(orig_ifinfo); - /* decrease refcount of previous best neighbor */ if (curr_router) batadv_neigh_node_put(curr_router); @@@ -470,29 -461,6 +461,29 @@@ static int batadv_check_unicast_packet( }
/** + * batadv_last_bonding_get - Get last_bonding_candidate of orig_node + * @orig_node: originator node whose last bonding candidate should be retrieved + * + * Return: last bonding candidate of router or NULL if not found + * + * The object is returned with refcounter increased by 1. + */ +static struct batadv_orig_ifinfo * +batadv_last_bonding_get(struct batadv_orig_node *orig_node) +{ + struct batadv_orig_ifinfo *last_bonding_candidate; + + spin_lock_bh(&orig_node->neigh_list_lock); + last_bonding_candidate = orig_node->last_bonding_candidate; + + if (last_bonding_candidate) + kref_get(&last_bonding_candidate->refcount); + spin_unlock_bh(&orig_node->neigh_list_lock); + + return last_bonding_candidate; +} + +/** * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node * @orig_node: originator node whose bonding candidates should be replaced * @new_candidate: new bonding candidate or NULL @@@ -562,7 -530,7 +553,7 @@@ batadv_find_router(struct batadv_priv * * router - obviously there are no other candidates. */ rcu_read_lock(); - last_candidate = orig_node->last_bonding_candidate; + last_candidate = batadv_last_bonding_get(orig_node); if (last_candidate) last_cand_router = rcu_dereference(last_candidate->router);
@@@ -654,9 -622,6 +645,9 @@@ next batadv_orig_ifinfo_put(next_candidate); }
+ if (last_candidate) + batadv_orig_ifinfo_put(last_candidate); + return router; }
diff --combined net/core/sock.c index fd7b41e,51a7304..038e660 --- a/net/core/sock.c +++ b/net/core/sock.c @@@ -1315,24 -1315,6 +1315,6 @@@ static void sock_copy(struct sock *nsk #endif }
- void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) - { - unsigned long nulls1, nulls2; - - nulls1 = offsetof(struct sock, __sk_common.skc_node.next); - nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); - if (nulls1 > nulls2) - swap(nulls1, nulls2); - - if (nulls1 != 0) - memset((char *)sk, 0, nulls1); - memset((char *)sk + nulls1 + sizeof(void *), 0, - nulls2 - nulls1 - sizeof(void *)); - memset((char *)sk + nulls2 + sizeof(void *), 0, - size - nulls2 - sizeof(void *)); - } - EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); - static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, int family) { @@@ -1344,12 -1326,8 +1326,8 @@@ sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); if (!sk) return sk; - if (priority & __GFP_ZERO) { - if (prot->clear_sk) - prot->clear_sk(sk, prot->obj_size); - else - sk_prot_clear_nulls(sk, prot->obj_size); - } + if (priority & __GFP_ZERO) + sk_prot_clear_nulls(sk, prot->obj_size); } else sk = kmalloc(prot->obj_size, priority);
@@@ -1362,6 -1340,7 +1340,6 @@@ if (!try_module_get(prot->owner)) goto out_free_sec; sk_tx_queue_clear(sk); - cgroup_sk_alloc(&sk->sk_cgrp_data); }
return sk; @@@ -1421,7 -1400,6 +1399,7 @@@ struct sock *sk_alloc(struct net *net, sock_net_set(sk, net); atomic_set(&sk->sk_wmem_alloc, 1);
+ cgroup_sk_alloc(&sk->sk_cgrp_data); sock_update_classid(&sk->sk_cgrp_data); sock_update_netprioidx(&sk->sk_cgrp_data); } @@@ -1566,9 -1544,6 +1544,9 @@@ struct sock *sk_clone_lock(const struc newsk->sk_priority = 0; newsk->sk_incoming_cpu = raw_smp_processor_id(); atomic64_set(&newsk->sk_cookie, 0); + + cgroup_sk_alloc(&newsk->sk_cgrp_data); + /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) diff --combined net/ipv4/tcp_output.c index f53d0cc,8b45794..f956cd7 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -2605,8 -2605,7 +2605,8 @@@ int __tcp_retransmit_skb(struct sock *s * copying overhead: fragmentation, tunneling, mangling etc. */ if (atomic_read(&sk->sk_wmem_alloc) > - min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) + min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), + sk->sk_sndbuf)) return -EAGAIN;
if (skb_still_in_host_queue(sk, skb)) @@@ -2777,7 -2776,7 +2777,7 @@@ void tcp_xmit_retransmit_queue(struct s
max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk)); tcp_for_write_queue_from(skb, sk) { - __u8 sacked = TCP_SKB_CB(skb)->sacked; + __u8 sacked; int segs;
if (skb == tcp_send_head(sk)) @@@ -2789,6 -2788,7 +2789,7 @@@ segs = tp->snd_cwnd - tcp_packets_in_flight(tp); if (segs <= 0) return; + sacked = TCP_SKB_CB(skb)->sacked; /* In case tcp_shift_skb_data() have aggregated large skbs, * we need to make sure not sending too bigs TSO packets */ diff --combined net/ipv6/ip6_vti.c index 52a2f73,cc7e058..e276dd8 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@@ -50,14 -50,14 +50,14 @@@ #include <net/net_namespace.h> #include <net/netns/generic.h>
- #define HASH_SIZE_SHIFT 5 - #define HASH_SIZE (1 << HASH_SIZE_SHIFT) + #define IP6_VTI_HASH_SIZE_SHIFT 5 + #define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT)
static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2) { u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
- return hash_32(hash, HASH_SIZE_SHIFT); + return hash_32(hash, IP6_VTI_HASH_SIZE_SHIFT); }
static int vti6_dev_init(struct net_device *dev); @@@ -69,7 -69,7 +69,7 @@@ struct vti6_net /* the vti6 tunnel fallback device */ struct net_device *fb_tnl_dev; /* lists for storing tunnels in use */ - struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE]; + struct ip6_tnl __rcu *tnls_r_l[IP6_VTI_HASH_SIZE]; struct ip6_tnl __rcu *tnls_wc[1]; struct ip6_tnl __rcu **tnls[2]; }; @@@ -340,7 -340,6 +340,7 @@@ static int vti6_rcv_cb(struct sk_buff * struct net_device *dev; struct pcpu_sw_netstats *tstats; struct xfrm_state *x; + struct xfrm_mode *inner_mode; struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6; u32 orig_mark = skb->mark; int ret; @@@ -358,19 -357,7 +358,19 @@@ }
x = xfrm_input_state(skb); - family = x->inner_mode->afinfo->family; + + inner_mode = x->inner_mode; + + if (x->sel.family == AF_UNSPEC) { + inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); + if (inner_mode == NULL) { + XFRM_INC_STATS(dev_net(skb->dev), + LINUX_MIB_XFRMINSTATEMODEERROR); + return -EINVAL; + } + } + + family = inner_mode->afinfo->family;
skb->mark = be32_to_cpu(t->parms.i_key); ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); @@@ -1053,7 -1040,7 +1053,7 @@@ static void __net_exit vti6_destroy_tun struct ip6_tnl *t; LIST_HEAD(list);
- for (h = 0; h < HASH_SIZE; h++) { + for (h = 0; h < IP6_VTI_HASH_SIZE; h++) { t = rtnl_dereference(ip6n->tnls_r_l[h]); while (t) { unregister_netdevice_queue(t->dev, &list); diff --combined net/ipv6/route.c index e3a224b,4dab585..5a5aeb9 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -1147,15 -1147,16 +1147,16 @@@ static struct rt6_info *ip6_pol_route_i return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); }
- static struct dst_entry *ip6_route_input_lookup(struct net *net, - struct net_device *dev, - struct flowi6 *fl6, int flags) + struct dst_entry *ip6_route_input_lookup(struct net *net, + struct net_device *dev, + struct flowi6 *fl6, int flags) { if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) flags |= RT6_LOOKUP_F_IFACE;
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input); } + EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
void ip6_route_input(struct sk_buff *skb) { @@@ -1164,7 -1165,7 +1165,7 @@@ int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip_tunnel_info *tun_info; struct flowi6 fl6 = { - .flowi6_iif = l3mdev_fib_oif(skb->dev), + .flowi6_iif = skb->dev->ifindex, .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), @@@ -1188,12 -1189,15 +1189,15 @@@ static struct rt6_info *ip6_pol_route_o struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct flowi6 *fl6, int flags) { - struct dst_entry *dst; bool any_src;
- dst = l3mdev_get_rt6_dst(net, fl6); - if (dst) - return dst; + if (rt6_need_strict(&fl6->daddr)) { + struct dst_entry *dst; + + dst = l3mdev_link_scope_lookup(net, fl6); + if (dst) + return dst; + }
fl6->flowi6_iif = LOOPBACK_IFINDEX;
@@@ -1604,7 -1608,9 +1608,9 @@@ static unsigned int ip6_mtu(const struc rcu_read_unlock();
out: - return min_t(unsigned int, mtu, IP6_MAX_MTU); + mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); + + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); }
static struct dst_entry *icmp6_dst_gc_list; @@@ -1986,18 -1992,9 +1992,18 @@@ static struct rt6_info *ip6_route_info_ if (!(gwa_type & IPV6_ADDR_UNICAST)) goto out;
- if (cfg->fc_table) + if (cfg->fc_table) { grt = ip6_nh_lookup_table(net, cfg, gw_addr);
+ if (grt) { + if (grt->rt6i_flags & RTF_GATEWAY || + (dev && dev != grt->dst.dev)) { + ip6_rt_put(grt); + grt = NULL; + } + } + } + if (!grt) grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); @@@ -2565,8 -2562,16 +2571,16 @@@ struct rt6_info *addrconf_dst_alloc(str { u32 tb_id; struct net *net = dev_net(idev->dev); - struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, - DST_NOCOUNT); + struct net_device *dev = net->loopback_dev; + struct rt6_info *rt; + + /* use L3 Master device as loopback for host routes if device + * is enslaved and address is not link local or multicast + */ + if (!rt6_need_strict(addr)) + dev = l3mdev_master_dev_rcu(idev->dev) ? : dev; + + rt = ip6_dst_alloc(net, dev, DST_NOCOUNT); if (!rt) return ERR_PTR(-ENOMEM);
@@@ -3345,11 -3350,6 +3359,6 @@@ static int inet6_rtm_getroute(struct sk } else { fl6.flowi6_oif = oif;
- if (netif_index_is_l3_master(net, oif)) { - fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC | - FLOWI_FLAG_SKIP_NH_OIF; - } - rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); }
diff --combined net/irda/af_irda.c index ccc2444,db63969..391c3cb --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@@ -832,7 -832,7 +832,7 @@@ static int irda_accept(struct socket *s struct sock *sk = sock->sk; struct irda_sock *new, *self = irda_sk(sk); struct sock *newsk; - struct sk_buff *skb; + struct sk_buff *skb = NULL; int err;
err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); @@@ -845,9 -845,6 +845,6 @@@ if (sock->state != SS_UNCONNECTED) goto out;
- if ((sk = sock->sk) == NULL) - goto out; - err = -EOPNOTSUPP; if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && (sk->sk_type != SOCK_DGRAM)) @@@ -900,6 -897,7 +897,6 @@@ err = -EPERM; /* value does not seem to make sense. -arnd */ if (!new->tsap) { pr_debug("%s(), dup failed!\n", __func__); - kfree_skb(skb); goto out; }
@@@ -918,6 -916,7 +915,6 @@@ /* Clean up the original one to keep it in listen state */ irttp_listen(self->tsap);
- kfree_skb(skb); sk->sk_ack_backlog--;
newsock->state = SS_CONNECTED; @@@ -925,7 -924,6 +922,7 @@@ irda_connect_response(new); err = 0; out: + kfree_skb(skb); release_sock(sk); return err; } diff --combined net/mac80211/agg-rx.c index afa9468,a5d69df..f6749dc --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@@ -261,16 -261,10 +261,16 @@@ void __ieee80211_start_rx_ba_session(st .timeout = timeout, .ssn = start_seq_num, }; - int i, ret = -EOPNOTSUPP; u16 status = WLAN_STATUS_REQUEST_DECLINED;
+ if (tid >= IEEE80211_FIRST_TSPEC_TSID) { + ht_dbg(sta->sdata, + "STA %pM requests BA session on unsupported tid %d\n", + sta->sta.addr, tid); + goto end_no_lock; + } + if (!sta->sta.ht_cap.ht_supported) { ht_dbg(sta->sdata, "STA %pM erroneously requests BA session on tid %d w/o QoS\n", @@@ -304,10 -298,13 +304,13 @@@ buf_size = IEEE80211_MAX_AMPDU_BUF;
/* make sure the size doesn't exceed the maximum supported by the hw */ - if (buf_size > local->hw.max_rx_aggregation_subframes) - buf_size = local->hw.max_rx_aggregation_subframes; + if (buf_size > sta->sta.max_rx_aggregation_subframes) + buf_size = sta->sta.max_rx_aggregation_subframes; params.buf_size = buf_size;
+ ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n", + buf_size, sta->sta.addr); + /* examine state machine */ mutex_lock(&sta->ampdu_mlme.mtx);
@@@ -412,8 -409,10 +415,10 @@@ }
end: - if (status == WLAN_STATUS_SUCCESS) + if (status == WLAN_STATUS_SUCCESS) { __set_bit(tid, sta->ampdu_mlme.agg_session_valid); + __clear_bit(tid, sta->ampdu_mlme.unexpected_agg); + } mutex_unlock(&sta->ampdu_mlme.mtx);
end_no_lock: diff --combined net/mac80211/mesh_hwmp.c index faccef9,fa7d37c..b747c96 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@@ -326,22 -326,33 +326,33 @@@ static u32 airtime_link_metric_get(stru u32 tx_time, estimated_retx; u64 result;
- if (sta->mesh->fail_avg >= 100) - return MAX_METRIC; + /* Try to get rate based on HW/SW RC algorithm. + * Rate is returned in units of Kbps, correct this + * to comply with airtime calculation units + * Round up in case we get rate < 100Kbps + */ + rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100); + + if (rate) { + err = 0; + } else { + if (sta->mesh->fail_avg >= 100) + return MAX_METRIC;
- sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo); - rate = cfg80211_calculate_bitrate(&rinfo); - if (WARN_ON(!rate)) - return MAX_METRIC; + sta_set_rate_info_tx(sta, &sta->tx_stats.last_rate, &rinfo); + rate = cfg80211_calculate_bitrate(&rinfo); + if (WARN_ON(!rate)) + return MAX_METRIC;
- err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100; + err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100; + }
/* bitrate is in units of 100 Kbps, while we need rate in units of * 1Mbps. This will be corrected on tx_time computation. */ tx_time = (device_constant + 10 * test_frame_len / rate); estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err)); - result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT) ; + result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT); return (u32)result; }
@@@ -746,7 -757,6 +757,7 @@@ static void hwmp_perr_frame_process(str sta = next_hop_deref_protected(mpath); if (mpath->flags & MESH_PATH_ACTIVE && ether_addr_equal(ta, sta->sta.addr) && + !(mpath->flags & MESH_PATH_FIXED) && (!(mpath->flags & MESH_PATH_SN_VALID) || SN_GT(target_sn, mpath->sn) || target_sn == 0)) { mpath->flags &= ~MESH_PATH_ACTIVE; @@@ -1013,7 -1023,7 +1024,7 @@@ void mesh_path_start_discovery(struct i goto enddiscovery;
spin_lock_bh(&mpath->state_lock); - if (mpath->flags & MESH_PATH_DELETED) { + if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) { spin_unlock_bh(&mpath->state_lock); goto enddiscovery; } diff --combined net/mac80211/sta_info.c index aa58df8,c803e2c..011880d --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@@ -67,12 -67,10 +67,10 @@@
static const struct rhashtable_params sta_rht_params = { .nelem_hint = 3, /* start small */ - .insecure_elasticity = true, /* Disable chain-length checks. */ .automatic_shrinking = true, .head_offset = offsetof(struct sta_info, hash_node), .key_offset = offsetof(struct sta_info, addr), .key_len = ETH_ALEN, - .hashfn = sta_addr_hash, .max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE, };
@@@ -80,8 -78,8 +78,8 @@@ static int sta_info_hash_del(struct ieee80211_local *local, struct sta_info *sta) { - return rhashtable_remove_fast(&local->sta_hash, &sta->hash_node, - sta_rht_params); + return rhltable_remove(&local->sta_hash, &sta->hash_node, + sta_rht_params); }
static void __cleanup_single_sta(struct sta_info *sta) @@@ -157,19 -155,22 +155,22 @@@ static void cleanup_single_sta(struct s sta_info_free(local, sta); }
+ struct rhlist_head *sta_info_hash_lookup(struct ieee80211_local *local, + const u8 *addr) + { + return rhltable_lookup(&local->sta_hash, addr, sta_rht_params); + } + /* protected by RCU */ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata, const u8 *addr) { struct ieee80211_local *local = sdata->local; + struct rhlist_head *tmp; struct sta_info *sta; - struct rhash_head *tmp; - const struct bucket_table *tbl;
rcu_read_lock(); - tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); - - for_each_sta_info(local, tbl, addr, sta, tmp) { + for_each_sta_info(local, addr, sta, tmp) { if (sta->sdata == sdata) { rcu_read_unlock(); /* this is safe as the caller must already hold @@@ -190,14 -191,11 +191,11 @@@ struct sta_info *sta_info_get_bss(struc const u8 *addr) { struct ieee80211_local *local = sdata->local; + struct rhlist_head *tmp; struct sta_info *sta; - struct rhash_head *tmp; - const struct bucket_table *tbl;
rcu_read_lock(); - tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); - - for_each_sta_info(local, tbl, addr, sta, tmp) { + for_each_sta_info(local, addr, sta, tmp) { if (sta->sdata == sdata || (sta->sdata->bss && sta->sdata->bss == sdata->bss)) { rcu_read_unlock(); @@@ -263,8 -261,8 +261,8 @@@ void sta_info_free(struct ieee80211_loc static int sta_info_hash_add(struct ieee80211_local *local, struct sta_info *sta) { - return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node, - sta_rht_params); + return rhltable_insert(&local->sta_hash, &sta->hash_node, + sta_rht_params); }
static void sta_deliver_ps_frames(struct work_struct *wk) @@@ -340,6 -338,9 +338,9 @@@ struct sta_info *sta_info_alloc(struct
memcpy(sta->addr, addr, ETH_ALEN); memcpy(sta->sta.addr, addr, ETH_ALEN); + sta->sta.max_rx_aggregation_subframes = + local->hw.max_rx_aggregation_subframes; + sta->local = local; sta->sdata = sdata; sta->rx_stats.last_rx = jiffies; @@@ -450,9 -451,9 +451,9 @@@ static int sta_info_insert_check(struc is_multicast_ether_addr(sta->sta.addr))) return -EINVAL;
- /* Strictly speaking this isn't necessary as we hold the mutex, but - * the rhashtable code can't really deal with that distinction. We - * do require the mutex for correctness though. + /* The RCU read lock is required by rhashtable due to + * asynchronous resize/rehash. We also require the mutex + * for correctness. */ rcu_read_lock(); lockdep_assert_held(&sdata->local->sta_mtx); @@@ -687,7 -688,7 +688,7 @@@ static void __sta_info_recalc_tim(struc }
/* No need to do anything if the driver does all */ - if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) + if (!local->ops->set_tim) return;
if (sta->dead) @@@ -1040,16 -1041,11 +1041,11 @@@ static void sta_info_cleanup(unsigned l round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL)); }
- u32 sta_addr_hash(const void *key, u32 length, u32 seed) - { - return jhash(key, ETH_ALEN, seed); - } - int sta_info_init(struct ieee80211_local *local) { int err;
- err = rhashtable_init(&local->sta_hash, &sta_rht_params); + err = rhltable_init(&local->sta_hash, &sta_rht_params); if (err) return err;
@@@ -1065,7 -1061,7 +1061,7 @@@ void sta_info_stop(struct ieee80211_local *local) { del_timer_sync(&local->sta_cleanup); - rhashtable_destroy(&local->sta_hash); + rhltable_destroy(&local->sta_hash); }
@@@ -1135,17 -1131,14 +1131,14 @@@ struct ieee80211_sta *ieee80211_find_st const u8 *localaddr) { struct ieee80211_local *local = hw_to_local(hw); + struct rhlist_head *tmp; struct sta_info *sta; - struct rhash_head *tmp; - const struct bucket_table *tbl; - - tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
/* * Just return a random station if localaddr is NULL * ... first in list. */ - for_each_sta_info(local, tbl, addr, sta, tmp) { + for_each_sta_info(local, addr, sta, tmp) { if (localaddr && !ether_addr_equal(sta->sdata->vif.addr, localaddr)) continue; @@@ -1616,6 -1609,7 +1609,6 @@@ ieee80211_sta_ps_deliver_response(struc
sta_info_recalc_tim(sta); } else { - unsigned long tids = sta->txq_buffered_tids & driver_release_tids; int tid;
/* @@@ -1647,8 -1641,7 +1640,8 @@@ for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
- if (!(tids & BIT(tid)) || txqi->tin.backlog_packets) + if (!(driver_release_tids & BIT(tid)) || + txqi->tin.backlog_packets) continue;
sta_info_recalc_tim(sta); @@@ -2279,11 -2272,7 +2272,7 @@@ void sta_set_sinfo(struct sta_info *sta if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER);
- /* check if the driver has a SW RC implementation */ - if (ref && ref->ops->get_expected_throughput) - thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); - else - thr = drv_get_expected_throughput(local, &sta->sta); + thr = sta_get_expected_throughput(sta);
if (thr != 0) { sinfo->filled |= BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT); @@@ -2291,6 -2280,25 +2280,25 @@@ } }
+ u32 sta_get_expected_throughput(struct sta_info *sta) + { + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct rate_control_ref *ref = NULL; + u32 thr = 0; + + if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL)) + ref = local->rate_ctrl; + + /* check if the driver has a SW RC implementation */ + if (ref && ref->ops->get_expected_throughput) + thr = ref->ops->get_expected_throughput(sta->rate_ctrl_priv); + else + thr = drv_get_expected_throughput(local, sta); + + return thr; + } + unsigned long ieee80211_sta_last_active(struct sta_info *sta) { struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); diff --combined net/mac80211/tx.c index 18b285e,61d302d..1ff08be --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@@ -796,36 -796,6 +796,36 @@@ static __le16 ieee80211_tx_next_seq(str return ret; }
+static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, + struct ieee80211_vif *vif, + struct ieee80211_sta *pubsta, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_txq *txq = NULL; + + if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || + (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) + return NULL; + + if (!ieee80211_is_data(hdr->frame_control)) + return NULL; + + if (pubsta) { + u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + + txq = pubsta->txq[tid]; + } else if (vif) { + txq = vif->txq; + } + + if (!txq) + return NULL; + + return to_txq_info(txq); +} + static ieee80211_tx_result debug_noinline ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) { @@@ -883,8 -853,7 +883,8 @@@ tid = *qc & IEEE80211_QOS_CTL_TID_MASK; tx->sta->tx_stats.msdu[tid]++;
- if (!tx->sta->sta.txq[0]) + if (!ieee80211_get_txq(tx->local, info->control.vif, &tx->sta->sta, + tx->skb)) hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
return TX_CONTINUE; @@@ -1274,6 -1243,36 +1274,6 @@@ ieee80211_tx_prepare(struct ieee80211_s return TX_CONTINUE; }
-static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, - struct ieee80211_vif *vif, - struct ieee80211_sta *pubsta, - struct sk_buff *skb) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_txq *txq = NULL; - - if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || - (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) - return NULL; - - if (!ieee80211_is_data(hdr->frame_control)) - return NULL; - - if (pubsta) { - u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; - - txq = pubsta->txq[tid]; - } else if (vif) { - txq = vif->txq; - } - - if (!txq) - return NULL; - - return to_txq_info(txq); -} - static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb) { IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time(); @@@ -1344,7 -1343,7 +1344,7 @@@ static struct sk_buff *fq_tin_dequeue_f local = container_of(fq, struct ieee80211_local, fq); txqi = container_of(tin, struct txq_info, tin); cparams = &local->cparams; - cstats = &local->cstats; + cstats = &txqi->cstats;
if (flow == &txqi->def_flow) cvars = &txqi->def_cvars; @@@ -1404,6 -1403,7 +1404,7 @@@ void ieee80211_txq_init(struct ieee8021 fq_tin_init(&txqi->tin); fq_flow_init(&txqi->def_flow); codel_vars_init(&txqi->def_cvars); + codel_stats_init(&txqi->cstats);
txqi->txq.vif = &sdata->vif;
@@@ -1442,7 -1442,6 +1443,6 @@@ int ieee80211_txq_setup_flows(struct ie return ret;
codel_params_init(&local->cparams); - codel_stats_init(&local->cstats); local->cparams.interval = MS2TIME(100); local->cparams.target = MS2TIME(20); local->cparams.ecn = true; @@@ -1515,12 -1514,8 +1515,12 @@@ out spin_unlock_bh(&fq->lock);
if (skb && skb_has_frag_list(skb) && - !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) - skb_linearize(skb); + !ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) { + if (skb_linearize(skb)) { + ieee80211_free_txskb(&local->hw, skb); + return NULL; + } + }
return skb; } @@@ -1648,7 -1643,7 +1648,7 @@@ static bool __ieee80211_tx(struct ieee8
switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: - if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) { + if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) { vif = &sdata->vif; break; } @@@ -2268,15 -2263,9 +2268,9 @@@ static int ieee80211_lookup_ra_sta(stru case NL80211_IFTYPE_STATION: if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) { sta = sta_info_get(sdata, skb->data); - if (sta) { - bool tdls_peer, tdls_auth; - - tdls_peer = test_sta_flag(sta, - WLAN_STA_TDLS_PEER); - tdls_auth = test_sta_flag(sta, - WLAN_STA_TDLS_PEER_AUTH); - - if (tdls_peer && tdls_auth) { + if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { + if (test_sta_flag(sta, + WLAN_STA_TDLS_PEER_AUTH)) { *sta_out = sta; return 0; } @@@ -2288,8 -2277,7 +2282,7 @@@ * after a TDLS sta is removed due to being * unreachable. */ - if (tdls_peer && !tdls_auth && - !ieee80211_is_tdls_setup(skb)) + if (!ieee80211_is_tdls_setup(skb)) return -EINVAL; }
@@@ -2339,7 -2327,6 +2332,6 @@@ static struct sk_buff *ieee80211_build_ struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL; const u8 *encaps_data; int encaps_len, skip_header_bytes; - int nh_pos, h_pos; bool wme_sta = false, authorized = false; bool tdls_peer; bool multicast; @@@ -2645,13 -2632,7 +2637,7 @@@ encaps_len = 0; }
- nh_pos = skb_network_header(skb) - skb->data; - h_pos = skb_transport_header(skb) - skb->data; - skb_pull(skb, skip_header_bytes); - nh_pos -= skip_header_bytes; - h_pos -= skip_header_bytes; - head_need = hdrlen + encaps_len + meshhdrlen - skb_headroom(skb);
/* @@@ -2677,18 -2658,12 +2663,12 @@@ } }
- if (encaps_data) { + if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); - nh_pos += encaps_len; - h_pos += encaps_len; - }
#ifdef CONFIG_MAC80211_MESH - if (meshhdrlen > 0) { + if (meshhdrlen > 0) memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); - nh_pos += meshhdrlen; - h_pos += meshhdrlen; - } #endif
if (ieee80211_is_data_qos(fc)) { @@@ -2704,15 -2679,7 +2684,7 @@@ } else memcpy(skb_push(skb, hdrlen), &hdr, hdrlen);
- nh_pos += hdrlen; - h_pos += hdrlen; - - /* Update skb pointers to various headers since this modified frame - * is going to go through Linux networking code that may potentially - * need things like pointer to IP header. */ skb_reset_mac_header(skb); - skb_set_network_header(skb, nh_pos); - skb_set_transport_header(skb, h_pos);
info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); @@@ -3269,7 -3236,7 +3241,7 @@@ static bool ieee80211_xmit_fast(struct
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { *ieee80211_get_qos_ctl(hdr) = tid; - if (!sta->sta.txq[0]) + if (!ieee80211_get_txq(local, &sdata->vif, &sta->sta, skb)) hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); } else { info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; @@@ -4395,9 -4362,6 +4367,6 @@@ void __ieee80211_tx_skb_tid_band(struc int ac = ieee802_1d_to_ac[tid & 7];
skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - skb_set_queue_mapping(skb, ac); skb->priority = tid;
diff --combined net/netfilter/nf_conntrack_core.c index 9934b0c,ac1db40..6570982 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@@ -72,12 -72,24 +72,24 @@@ EXPORT_SYMBOL_GPL(nf_conntrack_expect_l struct hlist_nulls_head *nf_conntrack_hash __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_hash);
+ struct conntrack_gc_work { + struct delayed_work dwork; + u32 last_bucket; + bool exiting; + }; + static __read_mostly struct kmem_cache *nf_conntrack_cachep; static __read_mostly spinlock_t nf_conntrack_locks_all_lock; - static __read_mostly seqcount_t nf_conntrack_generation; static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); static __read_mostly bool nf_conntrack_locks_all;
+ #define GC_MAX_BUCKETS_DIV 64u + #define GC_MAX_BUCKETS 8192u + #define GC_INTERVAL (5 * HZ) + #define GC_MAX_EVICTS 256u + + static struct conntrack_gc_work conntrack_gc_work; + void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) { spin_lock(lock); @@@ -164,7 -176,7 +176,7 @@@ unsigned int nf_conntrack_htable_size _ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly; - EXPORT_SYMBOL_GPL(nf_conntrack_max); + seqcount_t nf_conntrack_generation __read_mostly;
DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); @@@ -372,7 -384,6 +384,6 @@@ destroy_conntrack(struct nf_conntrack *
pr_debug("destroy_conntrack(%p)\n", ct); NF_CT_ASSERT(atomic_read(&nfct->use) == 0); - NF_CT_ASSERT(!timer_pending(&ct->timeout));
if (unlikely(nf_ct_is_template(ct))) { nf_ct_tmpl_free(ct); @@@ -435,35 -446,30 +446,30 @@@ bool nf_ct_delete(struct nf_conn *ct, u { struct nf_conn_tstamp *tstamp;
+ if (test_and_set_bit(IPS_DYING_BIT, &ct->status)) + return false; + tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) tstamp->stop = ktime_get_real_ns();
- if (nf_ct_is_dying(ct)) - goto delete; - if (nf_conntrack_event_report(IPCT_DESTROY, ct, portid, report) < 0) { - /* destroy event was not delivered */ + /* destroy event was not delivered. nf_ct_put will + * be done by event cache worker on redelivery. + */ nf_ct_delete_from_lists(ct); nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); return false; }
nf_conntrack_ecache_work(nf_ct_net(ct)); - set_bit(IPS_DYING_BIT, &ct->status); - delete: nf_ct_delete_from_lists(ct); nf_ct_put(ct); return true; } EXPORT_SYMBOL_GPL(nf_ct_delete);
- static void death_by_timeout(unsigned long ul_conntrack) - { - nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); - } - static inline bool nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, const struct nf_conntrack_tuple *tuple, @@@ -481,22 -487,17 +487,17 @@@ net_eq(net, nf_ct_net(ct)); }
- /* must be called with rcu read lock held */ - void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize) + /* caller must hold rcu readlock and none of the nf_conntrack_locks */ + static void nf_ct_gc_expired(struct nf_conn *ct) { - struct hlist_nulls_head *hptr; - unsigned int sequence, hsz; + if (!atomic_inc_not_zero(&ct->ct_general.use)) + return;
- do { - sequence = read_seqcount_begin(&nf_conntrack_generation); - hsz = nf_conntrack_htable_size; - hptr = nf_conntrack_hash; - } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); + if (nf_ct_should_gc(ct)) + nf_ct_kill(ct);
- *hash = hptr; - *hsize = hsz; + nf_ct_put(ct); } - EXPORT_SYMBOL_GPL(nf_conntrack_get_ht);
/* * Warning : @@@ -510,16 -511,24 +511,24 @@@ ____nf_conntrack_find(struct net *net, struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; struct hlist_nulls_node *n; - unsigned int bucket, sequence; + unsigned int bucket, hsize;
begin: - do { - sequence = read_seqcount_begin(&nf_conntrack_generation); - bucket = scale_hash(hash); - ct_hash = nf_conntrack_hash; - } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); + nf_conntrack_get_ht(&ct_hash, &hsize); + bucket = reciprocal_scale(hash, hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { + struct nf_conn *ct; + + ct = nf_ct_tuplehash_to_ctrack(h); + if (nf_ct_is_expired(ct)) { + nf_ct_gc_expired(ct); + continue; + } + + if (nf_ct_is_dying(ct)) + continue; + if (nf_ct_key_equal(h, tuple, zone, net)) { NF_CT_STAT_INC_ATOMIC(net, found); return h; @@@ -618,7 -627,6 +627,6 @@@ nf_conntrack_hash_check_insert(struct n zone, net)) goto out;
- add_timer(&ct->timeout); smp_wmb(); /* The caller holds a reference to this object */ atomic_set(&ct->ct_general.use, 2); @@@ -771,8 -779,7 +779,7 @@@ __nf_conntrack_confirm(struct sk_buff * /* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ - ct->timeout.expires += jiffies; - add_timer(&ct->timeout); + ct->timeout += nfct_time_stamp; atomic_inc(&ct->ct_general.use); ct->status |= IPS_CONFIRMED;
@@@ -823,29 -830,41 +830,41 @@@ nf_conntrack_tuple_taken(const struct n const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; - unsigned int hash, sequence; + unsigned int hash, hsize; struct hlist_nulls_node *n; struct nf_conn *ct;
zone = nf_ct_zone(ignored_conntrack);
rcu_read_lock(); - do { - sequence = read_seqcount_begin(&nf_conntrack_generation); - hash = hash_conntrack(net, tuple); - ct_hash = nf_conntrack_hash; - } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); + begin: + nf_conntrack_get_ht(&ct_hash, &hsize); + hash = __hash_conntrack(net, tuple, hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); - if (ct != ignored_conntrack && - nf_ct_key_equal(h, tuple, zone, net)) { + + if (ct == ignored_conntrack) + continue; + + if (nf_ct_is_expired(ct)) { + nf_ct_gc_expired(ct); + continue; + } + + if (nf_ct_key_equal(h, tuple, zone, net)) { NF_CT_STAT_INC_ATOMIC(net, found); rcu_read_unlock(); return 1; } NF_CT_STAT_INC_ATOMIC(net, searched); } + + if (get_nulls_value(n) != hash) { + NF_CT_STAT_INC_ATOMIC(net, search_restart); + goto begin; + } + rcu_read_unlock();
return 0; @@@ -867,6 -886,11 +886,11 @@@ static unsigned int early_drop_list(str hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h);
+ if (nf_ct_is_expired(tmp)) { + nf_ct_gc_expired(tmp); + continue; + } + if (test_bit(IPS_ASSURED_BIT, &tmp->status) || !net_eq(nf_ct_net(tmp), net) || nf_ct_is_dying(tmp)) @@@ -884,7 -908,6 +908,6 @@@ */ if (net_eq(nf_ct_net(tmp), net) && nf_ct_is_confirmed(tmp) && - del_timer(&tmp->timeout) && nf_ct_delete(tmp, 0, 0)) drops++;
@@@ -900,14 -923,11 +923,11 @@@ static noinline int early_drop(struct n
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) { struct hlist_nulls_head *ct_hash; - unsigned hash, sequence, drops; + unsigned int hash, hsize, drops;
rcu_read_lock(); - do { - sequence = read_seqcount_begin(&nf_conntrack_generation); - hash = scale_hash(_hash++); - ct_hash = nf_conntrack_hash; - } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); + nf_conntrack_get_ht(&ct_hash, &hsize); + hash = reciprocal_scale(_hash++, hsize);
drops = early_drop_list(net, &ct_hash[hash]); rcu_read_unlock(); @@@ -921,6 -941,69 +941,69 @@@ return false; }
+ static void gc_worker(struct work_struct *work) + { + unsigned int i, goal, buckets = 0, expired_count = 0; + unsigned long next_run = GC_INTERVAL; + unsigned int ratio, scanned = 0; + struct conntrack_gc_work *gc_work; + + gc_work = container_of(work, struct conntrack_gc_work, dwork.work); + + goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS); + i = gc_work->last_bucket; + + do { + struct nf_conntrack_tuple_hash *h; + struct hlist_nulls_head *ct_hash; + struct hlist_nulls_node *n; + unsigned int hashsz; + struct nf_conn *tmp; + + i++; + rcu_read_lock(); + + nf_conntrack_get_ht(&ct_hash, &hashsz); + if (i >= hashsz) + i = 0; + + hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) { + tmp = nf_ct_tuplehash_to_ctrack(h); + + scanned++; + if (nf_ct_is_expired(tmp)) { + nf_ct_gc_expired(tmp); + expired_count++; + continue; + } + } + + /* could check get_nulls_value() here and restart if ct + * was moved to another chain. But given gc is best-effort + * we will just continue with next hash slot. + */ + rcu_read_unlock(); + cond_resched_rcu_qs(); + } while (++buckets < goal && + expired_count < GC_MAX_EVICTS); + + if (gc_work->exiting) + return; + + ratio = scanned ? expired_count * 100 / scanned : 0; + if (ratio >= 90) + next_run = 0; + + gc_work->last_bucket = i; + schedule_delayed_work(&gc_work->dwork, next_run); + } + + static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) + { + INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); + gc_work->exiting = false; + } + static struct nf_conn * __nf_conntrack_alloc(struct net *net, const struct nf_conntrack_zone *zone, @@@ -957,8 -1040,6 +1040,6 @@@ /* save hash for reusing when confirming */ *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash; ct->status = 0; - /* Don't set timer yet: wait for confirmation */ - setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); write_pnet(&ct->ct_net, net); memset(&ct->__nfct_init_offset[0], 0, offsetof(struct nf_conn, proto) - @@@ -1035,9 -1116,9 +1116,9 @@@ init_conntrack(struct net *net, struct if (IS_ERR(ct)) return (struct nf_conntrack_tuple_hash *)ct;
- if (tmpl && nfct_synproxy(tmpl)) { - nfct_seqadj_ext_add(ct); - nfct_synproxy_ext_add(ct); + if (!nf_ct_add_synproxy(ct, tmpl)) { + nf_conntrack_free(ct); + return ERR_PTR(-ENOMEM); }
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL; @@@ -1332,7 -1413,6 +1413,6 @@@ void __nf_ct_refresh_acct(struct nf_con unsigned long extra_jiffies, int do_acct) { - NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); NF_CT_ASSERT(skb);
/* Only update if this is not a fixed timeout */ @@@ -1340,39 -1420,25 +1420,25 @@@ goto acct;
/* If not in hash table, timer will not be active yet */ - if (!nf_ct_is_confirmed(ct)) { - ct->timeout.expires = extra_jiffies; - } else { - unsigned long newtime = jiffies + extra_jiffies; - - /* Only update the timeout if the new timeout is at least - HZ jiffies from the old timeout. Need del_timer for race - avoidance (may already be dying). */ - if (newtime - ct->timeout.expires >= HZ) - mod_timer_pending(&ct->timeout, newtime); - } + if (nf_ct_is_confirmed(ct)) + extra_jiffies += nfct_time_stamp;
+ ct->timeout = extra_jiffies; acct: if (do_acct) nf_ct_acct_update(ct, ctinfo, skb->len); } EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
- bool __nf_ct_kill_acct(struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - const struct sk_buff *skb, - int do_acct) + bool nf_ct_kill_acct(struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + const struct sk_buff *skb) { - if (do_acct) - nf_ct_acct_update(ct, ctinfo, skb->len); + nf_ct_acct_update(ct, ctinfo, skb->len);
- if (del_timer(&ct->timeout)) { - ct->timeout.function((unsigned long)ct); - return true; - } - return false; + return nf_ct_delete(ct, 0, 0); } - EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); + EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@@ -1505,11 -1571,8 +1571,8 @@@ void nf_ct_iterate_cleanup(struct net *
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { /* Time to push up daises... */ - if (del_timer(&ct->timeout)) - nf_ct_delete(ct, portid, report); - - /* ... else the timer will get him soon. */
+ nf_ct_delete(ct, portid, report); nf_ct_put(ct); cond_resched(); } @@@ -1545,6 -1608,7 +1608,7 @@@ static int untrack_refs(void
void nf_conntrack_cleanup_start(void) { + conntrack_gc_work.exiting = true; RCU_INIT_POINTER(ip_ct_attach, NULL); }
@@@ -1554,6 -1618,7 +1618,7 @@@ void nf_conntrack_cleanup_end(void while (untrack_refs() > 0) schedule();
+ cancel_delayed_work_sync(&conntrack_gc_work.dwork); nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
nf_conntrack_proto_fini(); @@@ -1828,6 -1893,10 +1893,10 @@@ int nf_conntrack_init_start(void } /* - and look it like as a confirmed connection */ nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); + + conntrack_gc_work_init(&conntrack_gc_work); + schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL); + return 0;
err_proto: diff --combined net/netfilter/nf_nat_core.c index ecee105,81ae41f..bbb8f3d --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@@ -441,8 -441,7 +441,8 @@@ nf_nat_setup_info(struct nf_conn *ct ct->status |= IPS_DST_NAT;
if (nfct_help(ct)) - nfct_seqadj_ext_add(ct); + if (!nfct_seqadj_ext_add(ct)) + return NF_DROP; }
if (maniptype == NF_NAT_MANIP_SRC) { @@@ -566,16 -565,10 +566,10 @@@ static int nf_nat_proto_clean(struct nf * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() * will delete entry from already-freed table. */ - if (!del_timer(&ct->timeout)) - return 1; - ct->status &= ~IPS_NAT_DONE_MASK; - rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, nf_nat_bysource_params);
- add_timer(&ct->timeout); - /* don't delete conntrack. Although that would make things a lot * simpler, we'd end up flushing all conntracks on nat rmmod. */ @@@ -808,7 -801,7 +802,7 @@@ nfnetlink_parse_nat_setup(struct nf_con if (err < 0) return err;
- return nf_nat_setup_info(ct, &range, manip); + return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; } #else static int diff --combined net/wireless/nl80211.c index 4809f4d,887c4c1..fd111e2 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -848,13 -848,21 +848,21 @@@ nl80211_parse_connkeys(struct cfg80211_ struct nlattr *key; struct cfg80211_cached_keys *result; int rem, err, def = 0; + bool have_key = false; + + nla_for_each_nested(key, keys, rem) { + have_key = true; + break; + } + + if (!have_key) + return NULL;
result = kzalloc(sizeof(*result), GFP_KERNEL); if (!result) return ERR_PTR(-ENOMEM);
result->def = -1; - result->defmgmt = -1;
nla_for_each_nested(key, keys, rem) { memset(&parse, 0, sizeof(parse)); @@@ -866,7 -874,7 +874,7 @@@ err = -EINVAL; if (!parse.p.key) goto error; - if (parse.idx < 0 || parse.idx > 4) + if (parse.idx < 0 || parse.idx > 3) goto error; if (parse.def) { if (def) @@@ -881,16 -889,24 +889,24 @@@ parse.idx, false, NULL); if (err) goto error; + if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 && + parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) { + err = -EINVAL; + goto error; + } result->params[parse.idx].cipher = parse.p.cipher; result->params[parse.idx].key_len = parse.p.key_len; result->params[parse.idx].key = result->data[parse.idx]; memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len);
- if (parse.p.cipher == WLAN_CIPHER_SUITE_WEP40 || - parse.p.cipher == WLAN_CIPHER_SUITE_WEP104) { - if (no_ht) - *no_ht = true; - } + /* must be WEP key if we got here */ + if (no_ht) + *no_ht = true; + } + + if (result->def < 0) { + err = -EINVAL; + goto error; }
return result; @@@ -2525,10 -2541,35 +2541,35 @@@ static int nl80211_dump_interface(struc int if_idx = 0; int wp_start = cb->args[0]; int if_start = cb->args[1]; + int filter_wiphy = -1; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev;
rtnl_lock(); + if (!cb->args[2]) { + struct nl80211_dump_wiphy_state state = { + .filter_wiphy = -1, + }; + int ret; + + ret = nl80211_dump_wiphy_parse(skb, cb, &state); + if (ret) + return ret; + + filter_wiphy = state.filter_wiphy; + + /* + * if filtering, set cb->args[2] to +1 since 0 is the default + * value needed to determine that parsing is necessary. + */ + if (filter_wiphy >= 0) + cb->args[2] = filter_wiphy + 1; + else + cb->args[2] = -1; + } else if (cb->args[2] > 0) { + filter_wiphy = cb->args[2] - 1; + } + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; @@@ -2536,6 -2577,10 +2577,10 @@@ wp_idx++; continue; } + + if (filter_wiphy >= 0 && filter_wiphy != rdev->wiphy_idx) + continue; + if_idx = 0;
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { @@@ -2751,7 -2796,7 +2796,7 @@@ static int nl80211_new_interface(struc struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; struct wireless_dev *wdev; - struct sk_buff *msg, *event; + struct sk_buff *msg; int err; enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; u32 flags; @@@ -2855,20 -2900,15 +2900,15 @@@ return -ENOBUFS; }
- event = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (event) { - if (nl80211_send_iface(event, 0, 0, 0, - rdev, wdev, false) < 0) { - nlmsg_free(event); - goto out; - } - - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), - event, 0, NL80211_MCGRP_CONFIG, - GFP_KERNEL); - } + /* + * For wdevs which have no associated netdev object (e.g. of type + * NL80211_IFTYPE_P2P_DEVICE), emit the NEW_INTERFACE event here. + * For all other types, the event will be generated from the + * netdev notifier + */ + if (!wdev->netdev) + nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
- out: return genlmsg_reply(msg, info); }
@@@ -2876,18 -2916,10 +2916,10 @@@ static int nl80211_del_interface(struc { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev = info->user_ptr[1]; - struct sk_buff *msg; - int status;
if (!rdev->ops->del_virtual_intf) return -EOPNOTSUPP;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (msg && nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, true) < 0) { - nlmsg_free(msg); - msg = NULL; - } - /* * If we remove a wireless device without a netdev then clear * user_ptr[1] so that nl80211_post_doit won't dereference it @@@ -2898,15 -2930,7 +2930,7 @@@ if (!wdev->netdev) info->user_ptr[1] = NULL;
- status = rdev_del_virtual_intf(rdev, wdev); - if (status >= 0 && msg) - genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), - msg, 0, NL80211_MCGRP_CONFIG, - GFP_KERNEL); - else - nlmsg_free(msg); - - return status; + return rdev_del_virtual_intf(rdev, wdev); }
static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info) @@@ -5374,6 -5398,18 +5398,18 @@@ static int nl80211_check_s32(const stru return 0; }
+ static int nl80211_check_power_mode(const struct nlattr *nla, + enum nl80211_mesh_power_mode min, + enum nl80211_mesh_power_mode max, + enum nl80211_mesh_power_mode *out) + { + u32 val = nla_get_u32(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + static int nl80211_parse_mesh_config(struct genl_info *info, struct mesh_config *cfg, u32 *mask_out) @@@ -5518,7 -5554,7 +5554,7 @@@ do { NL80211_MESH_POWER_ACTIVE, NL80211_MESH_POWER_MAX, mask, NL80211_MESHCONF_POWER_MODE, - nl80211_check_u32); + nl80211_check_power_mode); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, 0, 65535, mask, NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16); @@@ -6978,7 -7014,7 +7014,7 @@@ static int nl80211_channel_switch(struc
params.n_counter_offsets_presp = len / sizeof(u16); if (rdev->wiphy.max_num_csa_counters && - (params.n_counter_offsets_beacon > + (params.n_counter_offsets_presp > rdev->wiphy.max_num_csa_counters)) return -EINVAL;
@@@ -7368,7 -7404,7 +7404,7 @@@ static int nl80211_authenticate(struct (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 || key.p.key_len != WLAN_KEY_LEN_WEP104)) return -EINVAL; - if (key.idx > 4) + if (key.idx > 3) return -EINVAL; } else { key.p.key_len = 0; @@@ -7773,12 -7809,13 +7809,13 @@@ static int nl80211_join_ibss(struct sk_
ibss.beacon_interval = 100;
- if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { + if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) ibss.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); - if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000) - return -EINVAL; - } + + err = cfg80211_validate_beacon_int(rdev, ibss.beacon_interval); + if (err) + return err;
if (!rdev->ops->join_ibss) return -EOPNOTSUPP; @@@ -7985,6 -8022,8 +8022,8 @@@ __cfg80211_alloc_vendor_skb(struct cfg8 }
data = nla_nest_start(skb, attr); + if (!data) + goto nla_put_failure;
((void **)skb->cb)[0] = rdev; ((void **)skb->cb)[1] = hdr; @@@ -9252,9 -9291,10 +9291,10 @@@ static int nl80211_join_mesh(struct sk_ if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { setup.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); - if (setup.beacon_interval < 10 || - setup.beacon_interval > 10000) - return -EINVAL; + + err = cfg80211_validate_beacon_int(rdev, setup.beacon_interval); + if (err) + return err; }
if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) { @@@ -9413,18 -9453,27 +9453,27 @@@ static int nl80211_send_wowlan_nd(struc if (!freqs) return -ENOBUFS;
- for (i = 0; i < req->n_channels; i++) - nla_put_u32(msg, i, req->channels[i]->center_freq); + for (i = 0; i < req->n_channels; i++) { + if (nla_put_u32(msg, i, req->channels[i]->center_freq)) + return -ENOBUFS; + }
nla_nest_end(msg, freqs);
if (req->n_match_sets) { matches = nla_nest_start(msg, NL80211_ATTR_SCHED_SCAN_MATCH); + if (!matches) + return -ENOBUFS; + for (i = 0; i < req->n_match_sets; i++) { match = nla_nest_start(msg, i); - nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID, - req->match_sets[i].ssid.ssid_len, - req->match_sets[i].ssid.ssid); + if (!match) + return -ENOBUFS; + + if (nla_put(msg, NL80211_SCHED_SCAN_MATCH_ATTR_SSID, + req->match_sets[i].ssid.ssid_len, + req->match_sets[i].ssid.ssid)) + return -ENOBUFS; nla_nest_end(msg, match); } nla_nest_end(msg, matches); @@@ -9436,6 -9485,9 +9485,9 @@@
for (i = 0; i < req->n_scan_plans; i++) { scan_plan = nla_nest_start(msg, i + 1); + if (!scan_plan) + return -ENOBUFS; + if (!scan_plan || nla_put_u32(msg, NL80211_SCHED_SCAN_PLAN_INTERVAL, req->scan_plans[i].interval) || @@@ -11847,6 -11899,29 +11899,29 @@@ void nl80211_notify_wiphy(struct cfg802 NL80211_MCGRP_CONFIG, GFP_KERNEL); }
+ void nl80211_notify_iface(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev, + enum nl80211_commands cmd) + { + struct sk_buff *msg; + + WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE && + cmd != NL80211_CMD_DEL_INTERFACE); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + if (nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, + cmd == NL80211_CMD_DEL_INTERFACE) < 0) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0, + NL80211_MCGRP_CONFIG, GFP_KERNEL); + } + static int nl80211_add_scan_req(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { diff --combined net/xfrm/xfrm_state.c index a30f898d,ba8bf51..5685da0 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@@ -28,6 -28,11 +28,11 @@@
#include "xfrm_hash.h"
+ #define xfrm_state_deref_prot(table, net) \ + rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock)) + + static void xfrm_state_gc_task(struct work_struct *work); + /* Each xfrm_state may be linked to two tables:
1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) @@@ -36,6 -41,15 +41,15 @@@ */
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; + static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation); + + static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); + static HLIST_HEAD(xfrm_state_gc_list); + + static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x) + { + return atomic_inc_not_zero(&x->refcnt); + }
static inline unsigned int xfrm_dst_hash(struct net *net, const xfrm_address_t *daddr, @@@ -76,18 -90,18 +90,18 @@@ static void xfrm_hash_transfer(struct h h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family, nhashmask); - hlist_add_head(&x->bydst, ndsttable+h); + hlist_add_head_rcu(&x->bydst, ndsttable + h);
h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, x->props.family, nhashmask); - hlist_add_head(&x->bysrc, nsrctable+h); + hlist_add_head_rcu(&x->bysrc, nsrctable + h);
if (x->id.spi) { h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, x->props.family, nhashmask); - hlist_add_head(&x->byspi, nspitable+h); + hlist_add_head_rcu(&x->byspi, nspitable + h); } } } @@@ -122,25 -136,29 +136,29 @@@ static void xfrm_hash_resize(struct wor }
spin_lock_bh(&net->xfrm.xfrm_state_lock); + write_seqcount_begin(&xfrm_state_hash_generation);
nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; + odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net); for (i = net->xfrm.state_hmask; i >= 0; i--) - xfrm_hash_transfer(net->xfrm.state_bydst+i, ndst, nsrc, nspi, - nhashmask); + xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
- odst = net->xfrm.state_bydst; - osrc = net->xfrm.state_bysrc; - ospi = net->xfrm.state_byspi; + osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net); + ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net); ohashmask = net->xfrm.state_hmask;
- net->xfrm.state_bydst = ndst; - net->xfrm.state_bysrc = nsrc; - net->xfrm.state_byspi = nspi; + rcu_assign_pointer(net->xfrm.state_bydst, ndst); + rcu_assign_pointer(net->xfrm.state_bysrc, nsrc); + rcu_assign_pointer(net->xfrm.state_byspi, nspi); net->xfrm.state_hmask = nhashmask;
+ write_seqcount_end(&xfrm_state_hash_generation); spin_unlock_bh(&net->xfrm.xfrm_state_lock);
osize = (ohashmask + 1) * sizeof(struct hlist_head); + + synchronize_rcu(); + xfrm_hash_free(odst, osize); xfrm_hash_free(osrc, osize); xfrm_hash_free(ospi, osize); @@@ -332,7 -350,6 +350,7 @@@ static void xfrm_state_gc_destroy(struc { tasklet_hrtimer_cancel(&x->mtimer); del_timer_sync(&x->rtimer); + kfree(x->aead); kfree(x->aalg); kfree(x->ealg); kfree(x->calg); @@@ -356,15 -373,16 +374,16 @@@
static void xfrm_state_gc_task(struct work_struct *work) { - struct net *net = container_of(work, struct net, xfrm.state_gc_work); struct xfrm_state *x; struct hlist_node *tmp; struct hlist_head gc_list;
spin_lock_bh(&xfrm_state_gc_lock); - hlist_move_list(&net->xfrm.state_gc_list, &gc_list); + hlist_move_list(&xfrm_state_gc_list, &gc_list); spin_unlock_bh(&xfrm_state_gc_lock);
+ synchronize_rcu(); + hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) xfrm_state_gc_destroy(x); } @@@ -501,14 -519,12 +520,12 @@@ EXPORT_SYMBOL(xfrm_state_alloc)
void __xfrm_state_destroy(struct xfrm_state *x) { - struct net *net = xs_net(x); - WARN_ON(x->km.state != XFRM_STATE_DEAD);
spin_lock_bh(&xfrm_state_gc_lock); - hlist_add_head(&x->gclist, &net->xfrm.state_gc_list); + hlist_add_head(&x->gclist, &xfrm_state_gc_list); spin_unlock_bh(&xfrm_state_gc_lock); - schedule_work(&net->xfrm.state_gc_work); + schedule_work(&xfrm_state_gc_work); } EXPORT_SYMBOL(__xfrm_state_destroy);
@@@ -521,10 -537,10 +538,10 @@@ int __xfrm_state_delete(struct xfrm_sta x->km.state = XFRM_STATE_DEAD; spin_lock(&net->xfrm.xfrm_state_lock); list_del(&x->km.all); - hlist_del(&x->bydst); - hlist_del(&x->bysrc); + hlist_del_rcu(&x->bydst); + hlist_del_rcu(&x->bysrc); if (x->id.spi) - hlist_del(&x->byspi); + hlist_del_rcu(&x->byspi); net->xfrm.state_num--; spin_unlock(&net->xfrm.xfrm_state_lock);
@@@ -660,7 -676,7 +677,7 @@@ static struct xfrm_state *__xfrm_state_ unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family); struct xfrm_state *x;
- hlist_for_each_entry(x, net->xfrm.state_byspi+h, byspi) { + hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) { if (x->props.family != family || x->id.spi != spi || x->id.proto != proto || @@@ -669,7 -685,8 +686,8 @@@
if ((mark & x->mark.m) != x->mark.v) continue; - xfrm_state_hold(x); + if (!xfrm_state_hold_rcu(x)) + continue; return x; }
@@@ -684,7 -701,7 +702,7 @@@ static struct xfrm_state *__xfrm_state_ unsigned int h = xfrm_src_hash(net, daddr, saddr, family); struct xfrm_state *x;
- hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { + hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) { if (x->props.family != family || x->id.proto != proto || !xfrm_addr_equal(&x->id.daddr, daddr, family) || @@@ -693,7 -710,8 +711,8 @@@
if ((mark & x->mark.m) != x->mark.v) continue; - xfrm_state_hold(x); + if (!xfrm_state_hold_rcu(x)) + continue; return x; }
@@@ -776,13 -794,16 +795,16 @@@ xfrm_state_find(const xfrm_address_t *d struct xfrm_state *best = NULL; u32 mark = pol->mark.v & pol->mark.m; unsigned short encap_family = tmpl->encap_family; + unsigned int sequence; struct km_event c;
to_put = NULL;
- spin_lock_bh(&net->xfrm.xfrm_state_lock); + sequence = read_seqcount_begin(&xfrm_state_hash_generation); + + rcu_read_lock(); h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); - hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { + hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) { if (x->props.family == encap_family && x->props.reqid == tmpl->reqid && (mark & x->mark.m) == x->mark.v && @@@ -798,7 -819,7 +820,7 @@@ goto found;
h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family); - hlist_for_each_entry(x, net->xfrm.state_bydst+h_wildcard, bydst) { + hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) { if (x->props.family == encap_family && x->props.reqid == tmpl->reqid && (mark & x->mark.m) == x->mark.v && @@@ -851,19 -872,21 +873,21 @@@ found }
if (km_query(x, tmpl, pol) == 0) { + spin_lock_bh(&net->xfrm.xfrm_state_lock); x->km.state = XFRM_STATE_ACQ; list_add(&x->km.all, &net->xfrm.state_all); - hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); + hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h); h = xfrm_src_hash(net, daddr, saddr, encap_family); - hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); + hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h); if (x->id.spi) { h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); - hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); + hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); } x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); net->xfrm.state_num++; xfrm_hash_grow_check(net, x->bydst.next != NULL); + spin_unlock_bh(&net->xfrm.xfrm_state_lock); } else { x->km.state = XFRM_STATE_DEAD; to_put = x; @@@ -872,13 -895,26 +896,26 @@@ } } out: - if (x) - xfrm_state_hold(x); - else + if (x) { + if (!xfrm_state_hold_rcu(x)) { + *err = -EAGAIN; + x = NULL; + } + } else { *err = acquire_in_progress ? -EAGAIN : error; - spin_unlock_bh(&net->xfrm.xfrm_state_lock); + } + rcu_read_unlock(); if (to_put) xfrm_state_put(to_put); + + if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) { + *err = -EAGAIN; + if (x) { + xfrm_state_put(x); + x = NULL; + } + } + return x; }
@@@ -946,16 -982,16 +983,16 @@@ static void __xfrm_state_insert(struct
h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family); - hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); + hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family); - hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); + hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
if (x->id.spi) { h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
- hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); + hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); }
tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); @@@ -1064,9 -1100,9 +1101,9 @@@ static struct xfrm_state *__find_acq_co xfrm_state_hold(x); tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); list_add(&x->km.all, &net->xfrm.state_all); - hlist_add_head(&x->bydst, net->xfrm.state_bydst+h); + hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h); h = xfrm_src_hash(net, daddr, saddr, family); - hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h); + hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
net->xfrm.state_num++;
@@@ -1582,7 -1618,7 +1619,7 @@@ int xfrm_alloc_spi(struct xfrm_state *x if (x->id.spi) { spin_lock_bh(&net->xfrm.xfrm_state_lock); h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); - hlist_add_head(&x->byspi, net->xfrm.state_byspi+h); + hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); spin_unlock_bh(&net->xfrm.xfrm_state_lock);
err = 0; @@@ -2100,8 -2136,6 +2137,6 @@@ int __net_init xfrm_state_init(struct n
net->xfrm.state_num = 0; INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); - INIT_HLIST_HEAD(&net->xfrm.state_gc_list); - INIT_WORK(&net->xfrm.state_gc_work, xfrm_state_gc_task); spin_lock_init(&net->xfrm.xfrm_state_lock); return 0;
@@@ -2119,7 -2153,7 +2154,7 @@@ void xfrm_state_fini(struct net *net
flush_work(&net->xfrm.state_hash_work); xfrm_state_flush(net, IPSEC_PROTO_ANY, false); - flush_work(&net->xfrm.state_gc_work); + flush_work(&xfrm_state_gc_work);
WARN_ON(!list_empty(&net->xfrm.state_all));