The following commit has been merged in the master branch: commit 5db9a51f2e437f9d4248b4bb807d05ffb8108335 Merge: cd2f69e791fb100d28e287faa25ffd5cc96901e8 bd6207202db8974ca3d3183ca0d5611d45b2973c Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Sep 26 10:43:27 2018 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined Documentation/ABI/testing/sysfs-class-net index ec2232f6a949,e2e0fe553ad8..664a8f6a634f --- a/Documentation/ABI/testing/sysfs-class-net +++ b/Documentation/ABI/testing/sysfs-class-net @@@ -91,24 -91,6 +91,24 @@@ Description stacked (e.g: VLAN interfaces) but still have the same MAC address as their parent device.
+What: /sys/class/net/<iface>/dev_port +Date: February 2014 +KernelVersion: 3.15 +Contact: netdev@vger.kernel.org +Description: + Indicates the port number of this network device, formatted + as a decimal value. Some NICs have multiple independent ports + on the same PCI bus, device and function. This attribute allows + userspace to distinguish the respective interfaces. + + Note: some device drivers started to use 'dev_id' for this + purpose since long before 3.15 and have not adopted the new + attribute ever since. To query the port number, some tools look + exclusively at 'dev_port', while others only consult 'dev_id'. + If a network device has multiple client adapter ports as + described in the previous paragraph and does not set this + attribute to its port number, it's a kernel bug. + What: /sys/class/net/<iface>/dormant Date: March 2006 KernelVersion: 2.6.17 @@@ -135,7 -117,7 +135,7 @@@ Description full: full duplex
Note: This attribute is only valid for interfaces that implement - the ethtool get_settings method (mostly Ethernet). + the ethtool get_link_ksettings method (mostly Ethernet).
What: /sys/class/net/<iface>/flags Date: April 2005 @@@ -242,7 -224,7 +242,7 @@@ Description an integer representing the link speed in Mbits/sec.
Note: this attribute is only valid for interfaces that implement - the ethtool get_settings method (mostly Ethernet ). + the ethtool get_link_ksettings method (mostly Ethernet).
What: /sys/class/net/<iface>/tx_queue_len Date: April 2005 diff --combined MAINTAINERS index b8bbd414d1fb,7233a9ed0f5b..4dfc64747ce1 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -840,7 -840,7 +840,7 @@@ ANALOG DEVICES INC ADGS1408 DRIVE M: Mircea Caprioru mircea.caprioru@analog.com S: Supported F: drivers/mux/adgs1408.c -F: Documentation/devicetree/bindings/mux/adgs1408.txt +F: Documentation/devicetree/bindings/mux/adi,adgs1408.txt
ANALOG DEVICES INC ADP5061 DRIVER M: Stefan Popa stefan.popa@analog.com @@@ -1175,21 -1175,18 +1175,21 @@@ T: git git://git.kernel.org/pub/scm/lin
ARM/ACTIONS SEMI ARCHITECTURE M: Andreas Färber afaerber@suse.de +R: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: owl F: arch/arm/mach-actions/ F: arch/arm/boot/dts/owl-* F: arch/arm64/boot/dts/actions/ +F: drivers/clk/actions/ F: drivers/clocksource/owl-* F: drivers/pinctrl/actions/* F: drivers/soc/actions/ F: include/dt-bindings/power/owl-* F: include/linux/soc/actions/ F: Documentation/devicetree/bindings/arm/actions.txt +F: Documentation/devicetree/bindings/clock/actions,s900-cmu.txt F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt F: Documentation/devicetree/bindings/power/actions,owl-sps.txt F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt @@@ -1254,7 -1251,7 +1254,7 @@@ N: meso
ARM/Annapurna Labs ALPINE ARCHITECTURE M: Tsahee Zidenberg tsahee@annapurnalabs.com -M: Antoine Tenart antoine.tenart@free-electrons.com +M: Antoine Tenart antoine.tenart@bootlin.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-alpine/ @@@ -1706,10 -1703,9 +1706,10 @@@ S: Odd Fixe ARM/Microchip (AT91) SoC support M: Nicolas Ferre nicolas.ferre@microchip.com M: Alexandre Belloni alexandre.belloni@bootlin.com +M: Ludovic Desroches ludovic.desroches@microchip.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.linux4sam.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git S: Supported N: at91 N: atmel @@@ -2089,12 -2085,10 +2089,12 @@@ F: include/linux/remoteproc/st_slim_rpr ARM/STM32 ARCHITECTURE M: Maxime Coquelin mcoquelin.stm32@gmail.com M: Alexandre Torgue alexandre.torgue@st.com +L: linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next N: stm32 +N: stm F: arch/arm/boot/dts/stm32* F: arch/arm/mach-stm32/ F: drivers/clocksource/armv7m_systick.c @@@ -2477,6 -2471,42 +2477,6 @@@ F: drivers/atm F: include/linux/atm* F: include/uapi/linux/atm*
-ATMEL AT91 / AT32 MCI DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -S: Maintained -F: drivers/mmc/host/atmel-mci.c - -ATMEL AT91 SAMA5D2-Compatible Shutdown Controller -M: Nicolas Ferre nicolas.ferre@microchip.com -S: Supported -F: drivers/power/reset/at91-sama5d2_shdwc.c - -ATMEL Audio ALSA driver -M: Nicolas Ferre nicolas.ferre@microchip.com -L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Supported -F: sound/soc/atmel - -ATMEL I2C DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-i2c@vger.kernel.org -S: Supported -F: drivers/i2c/busses/i2c-at91.c - -ATMEL ISI DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-media@vger.kernel.org -S: Supported -F: drivers/media/platform/atmel/atmel-isi.c -F: include/media/atmel-isi.h - -ATMEL LCDFB DRIVER -M: Nicolas Ferre nicolas.ferre@microchip.com -L: linux-fbdev@vger.kernel.org -S: Maintained -F: drivers/video/fbdev/atmel_lcdfb.c -F: include/video/atmel_lcdc.h - ATMEL MACB ETHERNET DRIVER M: Nicolas Ferre nicolas.ferre@microchip.com S: Supported @@@ -2489,6 -2519,43 +2489,6 @@@ S: Maintaine F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt F: drivers/input/touchscreen/atmel_mxt_ts.c
-ATMEL SAMA5D2 ADC DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-iio@vger.kernel.org -S: Supported -F: drivers/iio/adc/at91-sama5d2_adc.c - -ATMEL SDMMC DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-mmc@vger.kernel.org -S: Supported -F: drivers/mmc/host/sdhci-of-at91.c - -ATMEL SPI DRIVER -M: Nicolas Ferre nicolas.ferre@microchip.com -S: Supported -F: drivers/spi/spi-atmel.* - -ATMEL SSC DRIVER -M: Nicolas Ferre nicolas.ferre@microchip.com -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Supported -F: drivers/misc/atmel-ssc.c -F: include/linux/atmel-ssc.h - -ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS -M: Nicolas Ferre nicolas.ferre@microchip.com -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Supported -F: drivers/misc/atmel_tclib.c -F: drivers/clocksource/tcb_clksrc.c - -ATMEL USBA UDC DRIVER -M: Nicolas Ferre nicolas.ferre@microchip.com -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Supported -F: drivers/usb/gadget/udc/atmel_usba_udc.* - ATMEL WIRELESS DRIVER M: Simon Kelley simon@thekelleys.org.uk L: linux-wireless@vger.kernel.org @@@ -2497,6 -2564,13 +2497,6 @@@ W: http://atmelwlandriver.sourceforge.n S: Maintained F: drivers/net/wireless/atmel/atmel*
-ATMEL XDMA DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-arm-kernel@lists.infradead.org -L: dmaengine@vger.kernel.org -S: Supported -F: drivers/dma/at_xdmac.c - ATOMIC INFRASTRUCTURE M: Will Deacon will.deacon@arm.com M: Peter Zijlstra peterz@infradead.org @@@ -4412,12 -4486,11 +4412,12 @@@ S: Maintaine F: Documentation/ F: scripts/kernel-doc X: Documentation/ABI/ +X: Documentation/acpi/ X: Documentation/devicetree/ -X: Documentation/acpi -X: Documentation/power -X: Documentation/spi -X: Documentation/media +X: Documentation/i2c/ +X: Documentation/media/ +X: Documentation/power/ +X: Documentation/spi/ T: git git://git.lwn.net/linux.git docs-next
DOCUMENTATION/ITALIAN @@@ -4432,7 -4505,6 +4432,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/dw9714.c +F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt
DONGWOON DW9807 LENS VOICE COIL DRIVER M: Sakari Ailus sakari.ailus@linux.intel.com @@@ -4440,7 -4512,6 +4440,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/dw9807.c +F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.txt
DOUBLETALK DRIVER M: "James R. Van Zandt" jrv@vanzandt.mv.com @@@ -4457,9 -4528,9 +4457,9 @@@ F: drivers/soc/fsl/dpi
DPAA2 ETHERNET DRIVER M: Ioana Radulescu ruxandra.radulescu@nxp.com - L: linux-kernel@vger.kernel.org + L: netdev@vger.kernel.org S: Maintained - F: drivers/staging/fsl-dpaa2/ethernet + F: drivers/net/ethernet/freescale/dpaa2
DPAA2 ETHERNET SWITCH DRIVER M: Ioana Radulescu ruxandra.radulescu@nxp.com @@@ -5276,14 -5347,6 +5276,14 @@@ L: linux-edac@vger.kernel.or S: Maintained F: drivers/edac/ti_edac.c
+EDAC-QCOM +M: Channagoud Kadabi ckadabi@codeaurora.org +M: Venkata Narendra Kumar Gutta vnkgutta@codeaurora.org +L: linux-arm-msm@vger.kernel.org +L: linux-edac@vger.kernel.org +S: Maintained +F: drivers/edac/qcom_edac.c + EDIROL UA-101/UA-1000 DRIVER M: Clemens Ladisch clemens@ladisch.de L: alsa-devel@alsa-project.org (moderated for non-subscribers) @@@ -5451,7 -5514,7 +5451,7 @@@ W: http://ext4.wiki.kernel.or Q: http://patchwork.ozlabs.org/project/linux-ext4/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git S: Maintained -F: Documentation/filesystems/ext4.txt +F: Documentation/filesystems/ext4/ext4.rst F: fs/ext4/
Extended Verification Module (EVM) @@@ -7285,7 -7348,7 +7285,7 @@@ F: Documentation/networking/ixgb.tx F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt F: Documentation/networking/i40e.txt - F: Documentation/networking/i40evf.txt + F: Documentation/networking/iavf.txt F: Documentation/networking/ice.txt F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ @@@ -8120,6 -8183,15 +8120,15 @@@ S: Maintaine F: net/l3mdev F: include/net/l3mdev.h
+ LANTIQ / INTEL Ethernet drivers + M: Hauke Mehrtens hauke@hauke-m.de + L: netdev@vger.kernel.org + S: Maintained + F: net/dsa/tag_gswip.c + F: drivers/net/ethernet/lantiq_xrx200.c + F: drivers/net/dsa/lantiq_pce.h + F: drivers/net/dsa/intel_gswip.c + LANTIQ MIPS ARCHITECTURE M: John Crispin john@phrozen.org L: linux-mips@linux-mips.org @@@ -8732,7 -8804,7 +8741,7 @@@ S: Maintaine F: drivers/net/phy/marvell10g.c
MARVELL MVNETA ETHERNET DRIVER -M: Thomas Petazzoni thomas.petazzoni@free-electrons.com +M: Thomas Petazzoni thomas.petazzoni@bootlin.com L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/marvell/mvneta.* @@@ -8929,10 -9001,11 +8938,10 @@@ F: drivers/media/dvb-frontends/cxd2880/ F: drivers/media/spi/cxd2880*
MEDIA DRIVERS FOR DIGITAL DEVICES PCIE DEVICES -M: Daniel Scheller d.scheller.oss@gmail.com L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Orphan F: drivers/media/pci/ddbridge/*
MEDIA DRIVERS FOR FREESCALE IMX @@@ -8947,13 -9020,6 +8956,13 @@@ F: drivers/staging/media/imx F: include/linux/imx-media.h F: include/media/imx.h
+MEDIA DRIVER FOR FREESCALE IMX PXP +M: Philipp Zabel p.zabel@pengutronix.de +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/platform/imx-pxp.[ch] + MEDIA DRIVERS FOR HELENE M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org @@@ -8984,10 -9050,11 +8993,10 @@@ S: Supporte F: drivers/media/dvb-frontends/lnbh25*
MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS -M: Daniel Scheller d.scheller.oss@gmail.com L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Orphan F: drivers/media/dvb-frontends/mxl5xx*
MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices @@@ -9030,7 -9097,7 +9039,7 @@@ F: drivers/media/platform/rcar-fcp. F: include/media/rcar-fcp.h
MEDIA DRIVERS FOR RENESAS - FDP1 -M: Kieran Bingham kieran@bingham.xyz +M: Kieran Bingham kieran.bingham+renesas@ideasonboard.com L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git @@@ -9050,7 -9117,6 +9059,7 @@@ F: drivers/media/platform/rcar-vin
MEDIA DRIVERS FOR RENESAS - VSP1 M: Laurent Pinchart laurent.pinchart@ideasonboard.com +M: Kieran Bingham kieran.bingham+renesas@ideasonboard.com L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git @@@ -9059,17 -9125,19 +9068,17 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/platform/vsp1/
MEDIA DRIVERS FOR ST STV0910 DEMODULATOR ICs -M: Daniel Scheller d.scheller.oss@gmail.com L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Orphan F: drivers/media/dvb-frontends/stv0910*
MEDIA DRIVERS FOR ST STV6111 TUNER ICs -M: Daniel Scheller d.scheller.oss@gmail.com L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Orphan F: drivers/media/dvb-frontends/stv6111*
MEDIA DRIVERS FOR STM32 - DCMI @@@ -9455,19 -9523,13 +9464,19 @@@ T: git git://git.monstr.eu/linux-2.6-mi S: Supported F: arch/microblaze/
-MICROCHIP / ATMEL AT91 SERIAL DRIVER +MICROCHIP AT91 SERIAL DRIVER M: Richard Genoud richard.genoud@gmail.com S: Maintained F: drivers/tty/serial/atmel_serial.c F: drivers/tty/serial/atmel_serial.h
-MICROCHIP / ATMEL DMA DRIVER +MICROCHIP AUDIO ASOC DRIVERS +M: Codrin Ciubotariu codrin.ciubotariu@microchip.com +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +S: Supported +F: sound/soc/atmel + +MICROCHIP DMA DRIVER M: Ludovic Desroches ludovic.desroches@microchip.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: dmaengine@vger.kernel.org @@@ -9475,35 -9537,27 +9484,35 @@@ S: Supporte F: drivers/dma/at_hdmac.c F: drivers/dma/at_hdmac_regs.h F: include/linux/platform_data/dma-atmel.h +F: Documentation/devicetree/bindings/dma/atmel-dma.txt +F: include/dt-bindings/dma/at91.h
-MICROCHIP / ATMEL ECC DRIVER +MICROCHIP ECC DRIVER M: Tudor Ambarus tudor.ambarus@microchip.com L: linux-crypto@vger.kernel.org S: Maintained F: drivers/crypto/atmel-ecc.*
-MICROCHIP / ATMEL ISC DRIVER -M: Songjun Wu songjun.wu@microchip.com +MICROCHIP I2C DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +L: linux-i2c@vger.kernel.org +S: Supported +F: drivers/i2c/busses/i2c-at91.c + +MICROCHIP ISC DRIVER +M: Eugen Hristev eugen.hristev@microchip.com L: linux-media@vger.kernel.org S: Supported F: drivers/media/platform/atmel/atmel-isc.c F: drivers/media/platform/atmel/atmel-isc-regs.h F: devicetree/bindings/media/atmel-isc.txt
-MICROCHIP / ATMEL NAND DRIVER -M: Josh Wu rainyfeeling@outlook.com -L: linux-mtd@lists.infradead.org +MICROCHIP ISI DRIVER +M: Eugen Hristev eugen.hristev@microchip.com +L: linux-media@vger.kernel.org S: Supported -F: drivers/mtd/nand/raw/atmel/* -F: Documentation/devicetree/bindings/mtd/atmel-nand.txt +F: drivers/media/platform/atmel/atmel-isi.c +F: include/media/atmel-isi.h
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER M: Woojung Huh Woojung.Huh@microchip.com @@@ -9522,72 -9576,6 +9531,72 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/ethernet/microchip/lan743x_*
+MICROCHIP LCDFB DRIVER +M: Nicolas Ferre nicolas.ferre@microchip.com +L: linux-fbdev@vger.kernel.org +S: Maintained +F: drivers/video/fbdev/atmel_lcdfb.c +F: include/video/atmel_lcdc.h + +MICROCHIP MMC/SD/SDIO MCI DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +S: Maintained +F: drivers/mmc/host/atmel-mci.c + +MICROCHIP NAND DRIVER +M: Tudor Ambarus tudor.ambarus@microchip.com +L: linux-mtd@lists.infradead.org +S: Supported +F: drivers/mtd/nand/raw/atmel/* +F: Documentation/devicetree/bindings/mtd/atmel-nand.txt + +MICROCHIP PWM DRIVER +M: Claudiu Beznea claudiu.beznea@microchip.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-pwm@vger.kernel.org +S: Supported +F: drivers/pwm/pwm-atmel.c +F: Documentation/devicetree/bindings/pwm/atmel-pwm.txt + +MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +M: Eugen Hristev eugen.hristev@microchip.com +L: linux-iio@vger.kernel.org +S: Supported +F: drivers/iio/adc/at91-sama5d2_adc.c +F: Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt +F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h + +MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER +M: Nicolas Ferre nicolas.ferre@microchip.com +S: Supported +F: drivers/power/reset/at91-sama5d2_shdwc.c + +MICROCHIP SPI DRIVER +M: Nicolas Ferre nicolas.ferre@microchip.com +S: Supported +F: drivers/spi/spi-atmel.* + +MICROCHIP SSC DRIVER +M: Nicolas Ferre nicolas.ferre@microchip.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/misc/atmel-ssc.c +F: include/linux/atmel-ssc.h + +MICROCHIP TIMER COUNTER (TC) AND CLOCKSOURCE DRIVERS +M: Nicolas Ferre nicolas.ferre@microchip.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/misc/atmel_tclib.c +F: drivers/clocksource/tcb_clksrc.c + +MICROCHIP USBA UDC DRIVER +M: Cristian Birsan cristian.birsan@microchip.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/usb/gadget/udc/atmel_usba_udc.* + MICROCHIP USB251XB DRIVER M: Richard Leitner richard.leitner@skidata.com L: linux-usb@vger.kernel.org @@@ -9595,13 -9583,6 +9604,13 @@@ S: Maintaine F: drivers/usb/misc/usb251xb.c F: Documentation/devicetree/bindings/usb/usb251xb.txt
+MICROCHIP XDMA DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +L: linux-arm-kernel@lists.infradead.org +L: dmaengine@vger.kernel.org +S: Supported +F: drivers/dma/at_xdmac.c + MICROSEMI MIPS SOCS M: Alexandre Belloni alexandre.belloni@bootlin.com L: linux-mips@linux-mips.org @@@ -9726,19 -9707,6 +9735,19 @@@ S: Maintaine F: arch/arm/boot/dts/mmp* F: arch/arm/mach-mmp/
+MMU GATHER AND TLB INVALIDATION +M: Will Deacon will.deacon@arm.com +M: "Aneesh Kumar K.V" aneesh.kumar@linux.vnet.ibm.com +M: Andrew Morton akpm@linux-foundation.org +M: Nick Piggin npiggin@gmail.com +M: Peter Zijlstra peterz@infradead.org +L: linux-arch@vger.kernel.org +L: linux-mm@kvack.org +S: Maintained +F: arch/*/include/asm/tlb.h +F: include/asm-generic/tlb.h +F: mm/mmu_gather.c + MN88472 MEDIA DRIVER M: Antti Palosaari crope@iki.fi L: linux-media@vger.kernel.org @@@ -9757,6 -9725,13 +9766,6 @@@ Q: http://patchwork.linuxtv.org/project S: Maintained F: drivers/media/dvb-frontends/mn88473*
-PCI DRIVER FOR MOBIVEIL PCIE IP -M: Subrahmanya Lingappa l.subrahmanya@mobiveil.co.in -L: linux-pci@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt -F: drivers/pci/controller/pcie-mobiveil.c - MODULE SUPPORT M: Jessica Yu jeyu@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next @@@ -11078,7 -11053,7 +11087,7 @@@ S: Maintaine F: drivers/firmware/pcdp.*
PCI DRIVER FOR AARDVARK (Marvell Armada 3700) -M: Thomas Petazzoni thomas.petazzoni@free-electrons.com +M: Thomas Petazzoni thomas.petazzoni@bootlin.com L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -11110,7 -11085,7 +11119,7 @@@ F: Documentation/devicetree/bindings/pc F: drivers/pci/controller/pci-versatile.c
PCI DRIVER FOR ARMADA 8K -M: Thomas Petazzoni thomas.petazzoni@free-electrons.com +M: Thomas Petazzoni thomas.petazzoni@bootlin.com L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org S: Maintained @@@ -11171,15 -11146,8 +11180,15 @@@ F: include/uapi/linux/switchtec_ioctl. F: include/linux/switchtec.h F: drivers/ntb/hw/mscc/
+PCI DRIVER FOR MOBIVEIL PCIE IP +M: Subrahmanya Lingappa l.subrahmanya@mobiveil.co.in +L: linux-pci@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt +F: drivers/pci/controller/pcie-mobiveil.c + PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) -M: Thomas Petazzoni thomas.petazzoni@free-electrons.com +M: Thomas Petazzoni thomas.petazzoni@bootlin.com M: Jason Cooper jason@lakedaemon.net L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -11244,14 -11212,8 +11253,14 @@@ F: tools/pci
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC M: Russell Currey ruscur@russell.cc +M: Sam Bobroff sbobroff@linux.ibm.com +M: Oliver O'Halloran oohall@gmail.com L: linuxppc-dev@lists.ozlabs.org S: Supported +F: Documentation/PCI/pci-error-recovery.txt +F: drivers/pci/pcie/aer.c +F: drivers/pci/pcie/dpc.c +F: drivers/pci/pcie/err.c F: Documentation/powerpc/eeh-pci-error-recovery.txt F: arch/powerpc/kernel/eeh*.c F: arch/powerpc/platforms/*/eeh*.c @@@ -12307,7 -12269,6 +12316,7 @@@ F: Documentation/networking/rds.tx
RDT - RESOURCE ALLOCATION M: Fenghua Yu fenghua.yu@intel.com +M: Reinette Chatre reinette.chatre@intel.com L: linux-kernel@vger.kernel.org S: Supported F: arch/x86/kernel/cpu/intel_rdt* @@@ -13057,12 -13018,6 +13066,12 @@@ L: linux-mmc@vger.kernel.or S: Maintained F: drivers/mmc/host/sdhci-pci-dwc-mshc.c
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) MICROCHIP DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +L: linux-mmc@vger.kernel.org +S: Supported +F: drivers/mmc/host/sdhci-of-at91.c + SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER M: Ben Dooks ben-linux@fluff.org M: Jaehoon Chung jh80.chung@samsung.com @@@ -13503,8 -13458,9 +13512,8 @@@ F: drivers/i2c/busses/i2c-synquacer. F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
SOCIONEXT UNIPHIER SOUND DRIVER -M: Katsuhiro Suzuki suzuki.katsuhiro@socionext.com L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Maintained +S: Orphan F: sound/soc/uniphier/
SOEKRIS NET48XX LED SUPPORT @@@ -15966,7 -15922,6 +15975,7 @@@ F: net/x25 X86 ARCHITECTURE (32-BIT AND 64-BIT) M: Thomas Gleixner tglx@linutronix.de M: Ingo Molnar mingo@redhat.com +M: Borislav Petkov bp@alien8.de R: "H. Peter Anvin" hpa@zytor.com M: x86@kernel.org L: linux-kernel@vger.kernel.org @@@ -15995,15 -15950,6 +16004,15 @@@ M: Borislav Petkov <bp@alien8.de S: Maintained F: arch/x86/kernel/cpu/microcode/*
+X86 MM +M: Dave Hansen dave.hansen@linux.intel.com +M: Andy Lutomirski luto@kernel.org +M: Peter Zijlstra peterz@infradead.org +L: linux-kernel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm +S: Maintained +F: arch/x86/mm/ + X86 PLATFORM DRIVERS M: Darren Hart dvhart@infradead.org M: Andy Shevchenko andy@infradead.org diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fcc2328bb0d9,16f64c68193d..40093d88353f --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -3536,6 -3536,16 +3536,16 @@@ static void bnx2x_drv_info_iscsi_stat(s */ static void bnx2x_config_mf_bw(struct bnx2x *bp) { + /* Workaround for MFW bug. + * MFW is not supposed to generate BW attention in + * single function mode. + */ + if (!IS_MF(bp)) { + DP(BNX2X_MSG_MCP, + "Ignoring MF BW config in single function mode\n"); + return; + } + if (bp->link_vars.link_up) { bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); bnx2x_link_sync_notify(bp); @@@ -12894,6 -12904,19 +12904,6 @@@ static int bnx2x_ioctl(struct net_devic } }
-#ifdef CONFIG_NET_POLL_CONTROLLER -static void poll_bnx2x(struct net_device *dev) -{ - struct bnx2x *bp = netdev_priv(dev); - int i; - - for_each_eth_queue(bp, i) { - struct bnx2x_fastpath *fp = &bp->fp[i]; - napi_schedule(&bnx2x_fp(bp, fp->index, napi)); - } -} -#endif - static int bnx2x_validate_addr(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); @@@ -13100,11 -13123,15 +13110,12 @@@ static const struct net_device_ops bnx2 .ndo_tx_timeout = bnx2x_tx_timeout, .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = poll_bnx2x, -#endif .ndo_setup_tc = __bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, .ndo_set_vf_vlan = bnx2x_set_vf_vlan, .ndo_get_vf_config = bnx2x_get_vf_config, + .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index e1594c9df4c6,f4ba9b3f8819..749f63beddd8 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@@ -75,23 -75,17 +75,23 @@@ static int bnxt_tc_parse_redir(struct b return 0; }
-static void bnxt_tc_parse_vlan(struct bnxt *bp, - struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) +static int bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) { - if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { + switch (tcf_vlan_action(tc_act)) { + case TCA_VLAN_ACT_POP: actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; - } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { + break; + case TCA_VLAN_ACT_PUSH: actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + break; + default: + return -EOPNOTSUPP; } + return 0; }
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, @@@ -140,9 -134,7 +140,9 @@@ static int bnxt_tc_parse_actions(struc
/* Push/pop VLAN */ if (is_tcf_vlan(tc_act)) { - bnxt_tc_parse_vlan(bp, actions, tc_act); + rc = bnxt_tc_parse_vlan(bp, actions, tc_act); + if (rc) + return rc; continue; }
@@@ -189,7 -181,6 +189,6 @@@ static int bnxt_tc_parse_flow(struct bn struct bnxt_tc_flow *flow) { struct flow_dissector *dissector = tc_flow_cmd->dissector; - u16 addr_type = 0;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || @@@ -199,13 -190,6 +198,6 @@@ return -EOPNOTSUPP; }
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); @@@ -301,13 -285,6 +293,6 @@@ flow->l4_mask.icmp.code = mask->code; }
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); diff --combined drivers/net/ethernet/ibm/emac/core.c index 129f4e9f38da,5107c9450a19..760b2ad8e295 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@@ -423,7 -423,7 +423,7 @@@ static void emac_hash_mc(struct emac_in { const int regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); - u32 gaht_temp[regs]; + u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i;
@@@ -1409,7 -1409,7 +1409,7 @@@ static inline u16 emac_tx_csum(struct e return 0; }
- static inline int emac_xmit_finish(struct emac_instance *dev, int len) + static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len) { struct emac_regs __iomem *p = dev->emacp; struct net_device *ndev = dev->ndev; @@@ -1436,7 -1436,7 +1436,7 @@@ }
/* Tx lock BH */ - static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); unsigned int len = skb->len; @@@ -1494,7 -1494,8 +1494,8 @@@ static inline int emac_xmit_split(struc }
/* Tx lock BH disabled (SG version for TAH equipped EMACs) */ - static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) + static netdev_tx_t + emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; @@@ -2677,17 -2678,12 +2678,17 @@@ static int emac_init_phy(struct emac_in if (of_phy_is_fixed_link(np)) { int res = emac_dt_mdio_probe(dev);
- if (!res) { - res = of_phy_register_fixed_link(np); - if (res) - mdiobus_unregister(dev->mii_bus); + if (res) + return res; + + res = of_phy_register_fixed_link(np); + dev->phy_dev = of_phy_find_device(np); + if (res || !dev->phy_dev) { + mdiobus_unregister(dev->mii_bus); + return res ? res : -EINVAL; } - return res; + emac_adjust_link(dev->ndev); + put_device(&dev->phy_dev->mdio.dev); } return 0; } @@@ -2969,6 -2965,10 +2970,10 @@@ static int emac_init_config(struct emac dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT; }
+ /* This should never happen */ + if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS)) + return -ENXIO; + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); diff --combined drivers/net/ethernet/intel/ice/ice_main.c index 3f047bb43348,d54e63785ff0..4f5fe6af6dac --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@@ -7,7 -7,7 +7,7 @@@
#include "ice.h"
- #define DRV_VERSION "ice-0.7.0-k" + #define DRV_VERSION "0.7.1-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@@ -15,7 -15,7 +15,7 @@@ static const char ice_copyright[] = "Co
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); MODULE_DESCRIPTION(DRV_SUMMARY); - MODULE_LICENSE("GPL"); + MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
static int debug = -1; @@@ -32,10 -32,86 +32,86 @@@ static const struct net_device_ops ice_ static void ice_pf_dis_all_vsi(struct ice_pf *pf); static void ice_rebuild(struct ice_pf *pf); static int ice_vsi_release(struct ice_vsi *vsi); + static void ice_vsi_release_all(struct ice_pf *pf); static void ice_update_vsi_stats(struct ice_vsi *vsi); static void ice_update_pf_stats(struct ice_pf *pf);
/** + * ice_get_tx_pending - returns number of Tx descriptors not processed + * @ring: the ring of descriptors + */ + static u32 ice_get_tx_pending(struct ice_ring *ring) + { + u32 head, tail; + + head = ring->next_to_clean; + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + return 0; + } + + /** + * ice_check_for_hang_subtask - check for and recover hung queues + * @pf: pointer to PF struct + */ + static void ice_check_for_hang_subtask(struct ice_pf *pf) + { + struct ice_vsi *vsi = NULL; + unsigned int i; + u32 v, v_idx; + int packets; + + ice_for_each_vsi(pf, v) + if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { + vsi = pf->vsi[v]; + break; + } + + if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + return; + + if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) + return; + + for (i = 0; i < vsi->num_txq; i++) { + struct ice_ring *tx_ring = vsi->tx_rings[i]; + + if (tx_ring && tx_ring->desc) { + int itr = ICE_ITR_NONE; + + /* If packet counter has not changed the queue is + * likely stalled, so force an interrupt for this + * queue. + * + * prev_pkt would be negative if there was no + * pending work. + */ + packets = tx_ring->stats.pkts & INT_MAX; + if (tx_ring->tx_stats.prev_pkt == packets) { + /* Trigger sw interrupt to revive the queue */ + v_idx = tx_ring->q_vector->v_idx; + wr32(&vsi->back->hw, + GLINT_DYN_CTL(vsi->base_vector + v_idx), + (itr << GLINT_DYN_CTL_ITR_INDX_S) | + GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_INTENA_MSK_M); + continue; + } + + /* Memory barrier between read of packet count and call + * to ice_get_tx_pending() + */ + smp_rmb(); + tx_ring->tx_stats.prev_pkt = + ice_get_tx_pending(tx_ring) ? packets : -1; + } + } + } + + /** * ice_get_free_slot - get the next non-NULL location index in array * @array: array to search * @size: size of the array @@@ -274,6 -350,63 +350,63 @@@ static bool ice_vsi_fltr_changed(struc }
/** + * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI + * @vsi: VSI to enable or disable VLAN pruning on + * @ena: set to true to enable VLAN pruning and false to disable it + * + * returns 0 if VSI is updated, negative otherwise + */ + static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) + { + struct ice_vsi_ctx *ctxt; + struct device *dev; + int status; + + if (!vsi) + return -EINVAL; + + dev = &vsi->back->pdev->dev; + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info = vsi->info; + + if (ena) { + ctxt->info.sec_flags |= + ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; + ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } else { + ctxt->info.sec_flags &= + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } + + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + ctxt->vsi_num = vsi->vsi_num; + status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL); + if (status) { + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n", + ena ? "Ena" : "Dis", vsi->vsi_num, status, + vsi->back->hw.adminq.sq_last_status); + goto err_out; + } + + vsi->info.sec_flags = ctxt->info.sec_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + + devm_kfree(dev, ctxt); + return 0; + + err_out: + devm_kfree(dev, ctxt); + return -EIO; + } + + /** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @@@ -456,23 -589,13 +589,13 @@@ static voi ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - u32 v; - - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num); - - dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* disable the VSIs and their queues that are not already DOWN */ - /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */ ice_pf_dis_all_vsi(pf);
- ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + + set_bit(__ICE_PREPARED_FOR_RESET, pf->state); }
/** @@@ -490,26 -613,32 +613,32 @@@ static void ice_do_reset(struct ice_pf WARN_ON(in_interrupt());
/* PFR is a bit of a special case because it doesn't result in an OICR - * interrupt. So for PFR, we prepare for reset, issue the reset and - * rebuild sequentially. + * interrupt. Set pending bit here which otherwise gets set in the + * OICR handler. */ - if (reset_type == ICE_RESET_PFR) { + if (reset_type == ICE_RESET_PFR) set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - ice_prepare_for_reset(pf); - } + + ice_prepare_for_reset(pf);
/* trigger the reset */ if (ice_reset(hw, reset_type)) { dev_err(dev, "reset %d failed\n", reset_type); set_bit(__ICE_RESET_FAILED, pf->state); clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); return; }
+ /* PFR is a bit of a special case because it doesn't result in an OICR + * interrupt. So for PFR, rebuild after the reset and clear the reset- + * associated state bits. + */ if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; ice_rebuild(pf); clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); } }
@@@ -519,48 -648,57 +648,57 @@@ */ static void ice_reset_subtask(struct ice_pf *pf) { - enum ice_reset_req reset_type; - - rtnl_lock(); + enum ice_reset_req reset_type = ICE_RESET_INVAL;
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an - * OICR interrupt. The OICR handler (ice_misc_intr) determines what - * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in - * pf->state. So if reset/recovery is pending (as indicated by this bit) - * we do a rebuild and return. + * OICR interrupt. The OICR handler (ice_misc_intr) determines what type + * of reset is pending and sets bits in pf->state indicating the reset + * type and __ICE_RESET_RECOVERY_PENDING. So, if the latter bit is set + * prepare for pending reset if not already (for PF software-initiated + * global resets the software should already be prepared for it as + * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated + * by firmware or software on other PFs, that bit is not set so prepare + * for the reset now), poll for reset done, rebuild and return. */ if (ice_is_reset_recovery_pending(pf->state)) { clear_bit(__ICE_GLOBR_RECV, pf->state); clear_bit(__ICE_CORER_RECV, pf->state); - ice_prepare_for_reset(pf); + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */ - if (ice_check_reset(&pf->hw)) + if (ice_check_reset(&pf->hw)) { set_bit(__ICE_RESET_FAILED, pf->state); - else + } else { + /* done with reset. start rebuild */ + pf->hw.reset_ongoing = false; ice_rebuild(pf); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - goto unlock; + /* clear bit to resume normal operations, but + * ICE_NEEDS_RESTART bit is set incase rebuild failed + */ + clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + } + + return; }
/* No pending resets to finish processing. Check for new resets */ + if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) + reset_type = ICE_RESET_PFR; + if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) + reset_type = ICE_RESET_CORER; if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state)) reset_type = ICE_RESET_GLOBR; - else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) - reset_type = ICE_RESET_CORER; - else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) - reset_type = ICE_RESET_PFR; - else - goto unlock; + /* If no valid reset type requested just return */ + if (reset_type == ICE_RESET_INVAL) + return;
- /* reset if not already down or resetting */ + /* reset if not already down or busy */ if (!test_bit(__ICE_DOWN, pf->state) && !test_bit(__ICE_CFG_BUSY, pf->state)) { ice_do_reset(pf, reset_type); } - - unlock: - rtnl_unlock(); }
/** @@@ -903,6 -1041,9 +1041,9 @@@ static int __ice_clean_ctrlq(struct ice dev_err(&pf->pdev->dev, "Could not handle link event\n"); break; + case ice_aqc_opc_fw_logging: + ice_output_fw_log(hw, &event.desc, event.msg_buf); + break; default: dev_dbg(&pf->pdev->dev, "%s Receive Queue unknown event 0x%04x ignored\n", @@@ -966,8 -1107,9 +1107,9 @@@ static void ice_clean_adminq_subtask(st */ static void ice_service_task_schedule(struct ice_pf *pf) { - if (!test_bit(__ICE_DOWN, pf->state) && - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state)) + if (!test_bit(__ICE_SERVICE_DIS, pf->state) && + !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && + !test_bit(__ICE_NEEDS_RESTART, pf->state)) queue_work(ice_wq, &pf->serv_task); }
@@@ -985,6 -1127,22 +1127,22 @@@ static void ice_service_task_complete(s }
/** + * ice_service_task_stop - stop service task and cancel works + * @pf: board private structure + */ + static void ice_service_task_stop(struct ice_pf *pf) + { + set_bit(__ICE_SERVICE_DIS, pf->state); + + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + clear_bit(__ICE_SERVICE_SCHED, pf->state); + } + + /** * ice_service_timer - timer callback to schedule service task * @t: pointer to timer_list */ @@@ -997,6 -1155,114 +1155,114 @@@ static void ice_service_timer(struct ti }
/** + * ice_handle_mdd_event - handle malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from service task. OICR interrupt handler indicates MDD event + */ + static void ice_handle_mdd_event(struct ice_pf *pf) + { + struct ice_hw *hw = &pf->hw; + bool mdd_detected = false; + u32 reg; + + if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) + return; + + /* find what triggered the MDD event */ + reg = rd32(hw, GL_MDET_TX_PQM); + if (reg & GL_MDET_TX_PQM_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> + GL_MDET_TX_PQM_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> + GL_MDET_TX_PQM_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> + GL_MDET_TX_PQM_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> + GL_MDET_TX_PQM_QNUM_S); + + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_PQM, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_TX_TCLAN); + if (reg & GL_MDET_TX_TCLAN_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> + GL_MDET_TX_TCLAN_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> + GL_MDET_TX_TCLAN_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> + GL_MDET_TX_TCLAN_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> + GL_MDET_TX_TCLAN_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_RX); + if (reg & GL_MDET_RX_VALID_M) { + u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> + GL_MDET_RX_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> + GL_MDET_RX_VF_NUM_S; + u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> + GL_MDET_RX_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> + GL_MDET_RX_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_RX, 0xffffffff); + mdd_detected = true; + } + + if (mdd_detected) { + bool pf_mdd_detected = false; + + reg = rd32(hw, PF_MDET_TX_PQM); + if (reg & PF_MDET_TX_PQM_VALID_M) { + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_TX_TCLAN); + if (reg & PF_MDET_TX_TCLAN_VALID_M) { + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_RX); + if (reg & PF_MDET_RX_VALID_M) { + wr32(hw, PF_MDET_RX, 0xFFFF); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + /* Queue belongs to the PF initiate a reset */ + if (pf_mdd_detected) { + set_bit(__ICE_NEEDS_RESTART, pf->state); + ice_service_task_schedule(pf); + } + } + + /* re-enable MDD interrupt cause */ + clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); + reg = rd32(hw, PFINT_OICR_ENA); + reg |= PFINT_OICR_MAL_DETECT_M; + wr32(hw, PFINT_OICR_ENA, reg); + ice_flush(hw); + } + + /** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct */ @@@ -1010,14 -1276,17 +1276,17 @@@ static void ice_service_task(struct wor /* process reset requests first */ ice_reset_subtask(pf);
- /* bail if a reset/recovery cycle is pending */ + /* bail if a reset/recovery cycle is pending or rebuild failed */ if (ice_is_reset_recovery_pending(pf->state) || - test_bit(__ICE_SUSPENDED, pf->state)) { + test_bit(__ICE_SUSPENDED, pf->state) || + test_bit(__ICE_NEEDS_RESTART, pf->state)) { ice_service_task_complete(pf); return; }
+ ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); + ice_handle_mdd_event(pf); ice_watchdog_subtask(pf); ice_clean_adminq_subtask(pf);
@@@ -1029,6 -1298,7 +1298,7 @@@ * schedule the service task now. */ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || + test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@@ -1157,7 -1427,7 +1427,7 @@@ static void ice_vsi_delete(struct ice_v
memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
- status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); + status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); if (status) dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", vsi->vsi_num); @@@ -1420,13 -1690,13 +1690,13 @@@ static void ice_set_rss_vsi_ctx(struct }
/** - * ice_vsi_add - Create a new VSI or fetch preallocated VSI + * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command to create a new VSI. */ - static int ice_vsi_add(struct ice_vsi *vsi) + static int ice_vsi_init(struct ice_vsi *vsi) { struct ice_vsi_ctx ctxt = { 0 }; struct ice_pf *pf = vsi->back; @@@ -1453,13 -1723,17 +1723,17 @@@ ctxt.info.sw_id = vsi->port_info->sw_id; ice_vsi_setup_q_map(vsi, &ctxt);
- ret = ice_aq_add_vsi(hw, &ctxt, NULL); + ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); if (ret) { - dev_err(&vsi->back->pdev->dev, - "Add VSI AQ call failed, err %d\n", ret); + dev_err(&pf->pdev->dev, + "Add VSI failed, err %d\n", ret); return -EIO; } + + /* keep context for update VSI operations */ vsi->info = ctxt.info; + + /* record VSI number returned */ vsi->vsi_num = ctxt.vsi_num;
return ret; @@@ -1735,8 -2009,14 +2009,14 @@@ static irqreturn_t ice_misc_intr(int __ oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA);
+ if (oicr & PFINT_OICR_MAL_DETECT_M) { + ena_mask &= ~PFINT_OICR_MAL_DETECT_M; + set_bit(__ICE_MDD_EVENT_PENDING, pf->state); + } + if (oicr & PFINT_OICR_GRST_M) { u32 reset; + /* we have a reset warning */ ena_mask &= ~PFINT_OICR_GRST_M; reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> @@@ -1754,7 -2034,8 +2034,8 @@@ * We also make note of which reset happened so that peer * devices/drivers can be informed. */ - if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) { + if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING, + pf->state)) { if (reset == ICE_RESET_CORER) set_bit(__ICE_CORER_RECV, pf->state); else if (reset == ICE_RESET_GLOBR) @@@ -1762,7 -2043,20 +2043,20 @@@ else set_bit(__ICE_EMPR_RECV, pf->state);
- set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* There are couple of different bits at play here. + * hw->reset_ongoing indicates whether the hardware is + * in reset. This is set to true when a reset interrupt + * is received and set back to false after the driver + * has determined that the hardware is out of reset. + * + * __ICE_RESET_RECOVERY_PENDING in pf->state indicates + * that a post reset rebuild is required before the + * driver is operational again. This is set above. + * + * As this is the start of the reset/rebuild cycle, set + * both to indicate that. + */ + hw->reset_ongoing = true; } }
@@@ -2635,14 -2929,12 +2929,12 @@@ ice_vsi_cfg_rss_exit }
/** - * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI - * @vsi: pointer to the ice_vsi - * - * This reallocates the VSIs queue resources + * ice_vsi_rebuild - Rebuild VSI after reset + * @vsi: vsi to be rebuild * * Returns 0 on success and negative value on failure */ - static int ice_vsi_reinit_setup(struct ice_vsi *vsi) + static int ice_vsi_rebuild(struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; int ret, i; @@@ -2658,7 -2950,7 +2950,7 @@@ ice_vsi_set_num_qs(vsi);
/* Initialize VSI struct elements and create VSI in FW */ - ret = ice_vsi_add(vsi); + ret = ice_vsi_init(vsi); if (ret < 0) goto err_vsi;
@@@ -2668,19 -2960,7 +2960,7 @@@
switch (vsi->type) { case ICE_VSI_PF: - if (!vsi->netdev) { - ret = ice_cfg_netdev(vsi); - if (ret) - goto err_rings; - - ret = register_netdev(vsi->netdev); - if (ret) - goto err_rings; - - netif_carrier_off(vsi->netdev); - netif_tx_stop_all_queues(vsi->netdev); - } - + /* fall through */ ret = ice_vsi_alloc_q_vectors(vsi); if (ret) goto err_rings; @@@ -2732,21 -3012,23 +3012,23 @@@ err_vsi /** * ice_vsi_setup - Set up a VSI by a given type * @pf: board private structure - * @type: VSI type * @pi: pointer to the port_info instance + * @type: VSI type + * @vf_id: defines VF id to which this VSI connects. This field is meant to be + * used only for ICE_VSI_VF VSI type. For other VSI types, should + * fill-in ICE_INVAL_VFID as input. * * This allocates the sw VSI structure and its queue resources. * - * Returns pointer to the successfully allocated and configure VSI sw struct on - * success, otherwise returns NULL on failure. + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. */ static struct ice_vsi * - ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, - struct ice_port_info *pi) + ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, + enum ice_vsi_type type, u16 __always_unused vf_id) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = &pf->pdev->dev; - struct ice_vsi_ctx ctxt = { 0 }; struct ice_vsi *vsi; int ret, i;
@@@ -2769,12 -3051,10 +3051,10 @@@ ice_vsi_set_rss_params(vsi);
/* create the VSI */ - ret = ice_vsi_add(vsi); + ret = ice_vsi_init(vsi); if (ret) goto err_vsi;
- ctxt.vsi_num = vsi->vsi_num; - switch (vsi->type) { case ICE_VSI_PF: ret = ice_cfg_netdev(vsi); @@@ -2843,10 -3123,7 +3123,7 @@@ err_register_netdev vsi->netdev = NULL; } err_cfg_netdev: - ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); - if (ret) - dev_err(&vsi->back->pdev->dev, - "Free VSI AQ call failed, err %d\n", ret); + ice_vsi_delete(vsi); err_vsi: ice_vsi_put_qs(vsi); err_get_qs: @@@ -2858,6 -3135,20 +3135,20 @@@ }
/** + * ice_pf_vsi_setup - Set up a PF VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * + * Returns pointer to the successfully allocated VSI sw struct on success, + * otherwise returns NULL on failure. + */ + static struct ice_vsi * + ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) + { + return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); + } + + /** * ice_vsi_add_vlan - Add vsi membership for given vlan * @vsi: the vsi being configured * @vid: vlan id to be added @@@ -2908,7 -3199,7 +3199,7 @@@ static int ice_vlan_rx_add_vid(struct n { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - int ret = 0; + int ret;
if (vid >= VLAN_N_VID) { netdev_err(netdev, "VLAN id requested %d is out of range %d\n", @@@ -2919,6 -3210,13 +3210,13 @@@ if (vsi->info.pvid) return -EINVAL;
+ /* Enable VLAN pruning when VLAN 0 is added */ + if (unlikely(!vid)) { + ret = ice_cfg_vlan_pruning(vsi, true); + if (ret) + return ret; + } + /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is * needed to continue allowing all untagged packets since VLAN prune * list is applied to all packets by the switch @@@ -2935,16 -3233,19 +3233,19 @@@ * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN * @vsi: the VSI being configured * @vid: VLAN id to be removed + * + * Returns 0 on success and negative on failure */ - static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) + static int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) { struct ice_fltr_list_entry *list; struct ice_pf *pf = vsi->back; LIST_HEAD(tmp_add_list); + int status = 0;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); if (!list) - return; + return -ENOMEM;
list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; list->fltr_info.fwd_id.vsi_id = vsi->vsi_num; @@@ -2956,11 -3257,14 +3257,14 @@@ INIT_LIST_HEAD(&list->list_entry); list_add(&list->list_entry, &tmp_add_list);
- if (ice_remove_vlan(&pf->hw, &tmp_add_list)) + if (ice_remove_vlan(&pf->hw, &tmp_add_list)) { dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", vid, vsi->vsi_num); + status = -EIO; + }
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; }
/** @@@ -2976,19 -3280,25 +3280,25 @@@ static int ice_vlan_rx_kill_vid(struct { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + int status;
if (vsi->info.pvid) return -EINVAL;
- /* return code is ignored as there is nothing a user - * can do about failure to remove and a log message was - * already printed from the other function + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN + * information */ - ice_vsi_kill_vlan(vsi, vid); + status = ice_vsi_kill_vlan(vsi, vid); + if (status) + return status;
clear_bit(vid, vsi->active_vlans);
- return 0; + /* Disable VLAN pruning when VLAN 0 is removed */ + if (unlikely(!vid)) + status = ice_cfg_vlan_pruning(vsi, false); + + return status; }
/** @@@ -3004,50 -3314,48 +3314,48 @@@ static int ice_setup_pf_sw(struct ice_p struct ice_vsi *vsi; int status = 0;
- if (!ice_is_reset_recovery_pending(pf->state)) { - vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info); - if (!vsi) { - status = -ENOMEM; - goto error_exit; - } - } else { - vsi = pf->vsi[0]; - status = ice_vsi_reinit_setup(vsi); - if (status < 0) - return -EIO; + if (ice_is_reset_recovery_pending(pf->state)) + return -EBUSY; + + vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); + if (!vsi) { + status = -ENOMEM; + goto unroll_vsi_setup; }
- /* tmp_add_list contains a list of MAC addresses for which MAC - * filters need to be programmed. Add the VSI's unicast MAC to - * this list + /* To add a MAC filter, first add the MAC to a list and then + * pass the list to ice_add_mac. */ + + /* Add a unicast MAC filter so the VSI can get its packets */ status = ice_add_mac_to_list(vsi, &tmp_add_list, vsi->port_info->mac.perm_addr); if (status) - goto error_exit; + goto unroll_vsi_setup;
/* VSI needs to receive broadcast traffic, so add the broadcast - * MAC address to the list. + * MAC address to the list as well. */ eth_broadcast_addr(broadcast); status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); if (status) - goto error_exit; + goto free_mac_list;
/* program MAC filters for entries in tmp_add_list */ status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) { dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); status = -ENOMEM; - goto error_exit; + goto free_mac_list; }
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status;
- error_exit: + free_mac_list: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+ unroll_vsi_setup: if (vsi) { ice_vsi_free_q_vectors(vsi); if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) @@@ -3097,10 -3405,7 +3405,7 @@@ static void ice_determine_q_usage(struc */ static void ice_deinit_pf(struct ice_pf *pf) { - if (pf->serv_tmr.function) - del_timer_sync(&pf->serv_tmr); - if (pf->serv_task.func) - cancel_work_sync(&pf->serv_task); + ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->avail_q_mutex); } @@@ -3307,6 -3612,8 +3612,8 @@@ static int ice_probe(struct pci_dev *pd pf->pdev = pdev; pci_set_drvdata(pdev, pf); set_bit(__ICE_DOWN, pf->state); + /* Disable service task until DOWN bit is cleared */ + set_bit(__ICE_SERVICE_DIS, pf->state);
hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; @@@ -3364,6 -3671,9 +3671,9 @@@ goto err_init_interrupt_unroll; }
+ /* Driver is mostly up */ + clear_bit(__ICE_DOWN, pf->state); + /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in @@@ -3386,7 -3696,11 +3696,11 @@@ goto err_msix_misc_unroll; }
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + if (hw->evb_veb) + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + else + pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; + pf->first_sw->pf = pf;
/* record the sw_id available for later use */ @@@ -3399,8 -3713,7 +3713,7 @@@ goto err_alloc_sw_unroll; }
- /* Driver is mostly up */ - clear_bit(__ICE_DOWN, pf->state); + clear_bit(__ICE_SERVICE_DIS, pf->state);
/* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@@ -3414,6 -3727,7 +3727,7 @@@ return 0;
err_alloc_sw_unroll: + set_bit(__ICE_SERVICE_DIS, pf->state); set_bit(__ICE_DOWN, pf->state); devm_kfree(&pf->pdev->dev, pf->first_sw); err_msix_misc_unroll: @@@ -3436,24 -3750,14 +3750,14 @@@ err_exit_unroll static void ice_remove(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - int i = 0; - int err;
if (!pf) return;
set_bit(__ICE_DOWN, pf->state); + ice_service_task_stop(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++) { - if (!pf->vsi[i]) - continue; - - err = ice_vsi_release(pf->vsi[i]); - if (err) - dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n", - i, err); - } - + ice_vsi_release_all(pf); ice_free_irq_msix_misc(pf); ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); @@@ -3500,7 -3804,7 +3804,7 @@@ static int __init ice_module_init(void pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); pr_info("%s\n", ice_copyright);
- ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; @@@ -4185,7 -4489,14 +4489,14 @@@ static int ice_vsi_stop_tx_rings(struc } status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, NULL); - if (status) { + /* if the disable queue command was exercised during an active reset + * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as + * the reset operation disables queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&pf->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status) { dev_err(&pf->pdev->dev, "Failed to disable LAN Tx queues, error: %d\n", status); @@@ -4806,6 -5117,30 +5117,6 @@@ void ice_get_stats64(struct net_device stats->rx_length_errors = vsi_stats->rx_length_errors; }
-#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * ice_netpoll - polling "interrupt" handler - * @netdev: network interface device structure - * - * Used by netconsole to send skbs without having to re-enable interrupts. - * This is not called in the normal interrupt path. - */ -static void ice_netpoll(struct net_device *netdev) -{ - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_vsi *vsi = np->vsi; - struct ice_pf *pf = vsi->back; - int i; - - if (test_bit(__ICE_DOWN, vsi->state) || - !test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - return; - - for (i = 0; i < vsi->num_q_vectors; i++) - ice_msix_clean_rings(0, vsi->q_vectors[i]); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - /** * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI * @vsi: VSI having NAPI disabled @@@ -5056,8 -5391,14 +5367,14 @@@ static int ice_vsi_release(struct ice_v if (!vsi->back) return -ENODEV; pf = vsi->back; - - if (vsi->netdev) { + /* do not unregister and free netdevs while driver is in the reset + * recovery pending state. Since reset/rebuild happens through PF + * service task workqueue, its not a good idea to unregister netdev + * that is associated to the PF that is running the work queue items + * currently. This is done to avoid check_flush_dependency() warning + * on this wq + */ + if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) { unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; @@@ -5083,12 -5424,40 +5400,40 @@@ pf->q_left_tx += vsi->alloc_txq; pf->q_left_rx += vsi->alloc_rxq;
- ice_vsi_clear(vsi); + /* retain SW VSI data structure since it is needed to unregister and + * free VSI netdev when PF is not in reset recovery pending state,\ + * for ex: during rmmod. + */ + if (!ice_is_reset_recovery_pending(pf->state)) + ice_vsi_clear(vsi);
return 0; }
/** + * ice_vsi_release_all - Delete all VSIs + * @pf: PF from which all VSIs are being removed + */ + static void ice_vsi_release_all(struct ice_pf *pf) + { + int err, i; + + if (!pf->vsi) + return; + + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; + + err = ice_vsi_release(pf->vsi[i]); + if (err) + dev_dbg(&pf->pdev->dev, + "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", + i, err, pf->vsi[i]->vsi_num); + } + } + + /** * ice_dis_vsi - pause a VSI * @vsi: the VSI being paused */ @@@ -5100,27 -5469,31 +5445,31 @@@ static void ice_dis_vsi(struct ice_vsi set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && netif_running(vsi->netdev) && - vsi->type == ICE_VSI_PF) + vsi->type == ICE_VSI_PF) { + rtnl_lock(); vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - - ice_vsi_close(vsi); + rtnl_unlock(); + } else { + ice_vsi_close(vsi); + } }
/** * ice_ena_vsi - resume a VSI * @vsi: the VSI being resume */ - static void ice_ena_vsi(struct ice_vsi *vsi) + static int ice_ena_vsi(struct ice_vsi *vsi) { - if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) - return; + int err = 0; + + if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) + if (vsi->netdev && netif_running(vsi->netdev)) { + rtnl_lock(); + err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); + rtnl_unlock(); + }
- if (vsi->netdev && netif_running(vsi->netdev)) - vsi->netdev->netdev_ops->ndo_open(vsi->netdev); - else if (ice_vsi_open(vsi)) - /* this clears the DOWN bit */ - dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n", - vsi->vsi_num, vsi->vsw->sw_id); + return err; }
/** @@@ -5140,13 -5513,47 +5489,47 @@@ static void ice_pf_dis_all_vsi(struct i * ice_pf_ena_all_vsi - Resume all VSIs on a PF * @pf: the PF */ - static void ice_pf_ena_all_vsi(struct ice_pf *pf) + static int ice_pf_ena_all_vsi(struct ice_pf *pf) { int v;
ice_for_each_vsi(pf, v) if (pf->vsi[v]) - ice_ena_vsi(pf->vsi[v]); + if (ice_ena_vsi(pf->vsi[v])) + return -EIO; + + return 0; + } + + /** + * ice_vsi_rebuild_all - rebuild all VSIs in pf + * @pf: the PF + */ + static int ice_vsi_rebuild_all(struct ice_pf *pf) + { + int i; + + /* loop through pf->vsi array and reinit the VSI if found */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + int err; + + if (!pf->vsi[i]) + continue; + + err = ice_vsi_rebuild(pf->vsi[i]); + if (err) { + dev_err(&pf->pdev->dev, + "VSI at index %d rebuild failed\n", + pf->vsi[i]->idx); + return err; + } + + dev_info(&pf->pdev->dev, + "VSI at index %d rebuilt. vsi_num = 0x%x\n", + pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + } + + return 0; }
/** @@@ -5168,13 -5575,13 +5551,13 @@@ static void ice_rebuild(struct ice_pf * ret = ice_init_all_ctrlq(hw); if (ret) { dev_err(dev, "control queues init failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; }
ret = ice_clear_pf_cfg(hw); if (ret) { dev_err(dev, "clear PF configuration failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; }
ice_clear_pxe_mode(hw); @@@ -5182,14 -5589,24 +5565,24 @@@ ret = ice_get_caps(hw); if (ret) { dev_err(dev, "ice_get_caps failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; }
- /* basic nic switch setup */ - err = ice_setup_pf_sw(pf); + err = ice_sched_init_port(hw->port_info); + if (err) + goto err_sched_init_port; + + err = ice_vsi_rebuild_all(pf); if (err) { - dev_err(dev, "ice_setup_pf_sw failed\n"); - goto fail_reset; + dev_err(dev, "ice_vsi_rebuild_all failed\n"); + goto err_vsi_rebuild; + } + + ret = ice_replay_all_fltr(&pf->hw); + if (ret) { + dev_err(&pf->pdev->dev, + "error replaying switch filter rules\n"); + goto err_vsi_rebuild; }
/* start misc vector */ @@@ -5197,20 -5614,35 +5590,35 @@@ err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "misc vector setup failed: %d\n", err); - goto fail_reset; + goto err_vsi_rebuild; } }
/* restart the VSIs that were rebuilt and running before the reset */ - ice_pf_ena_all_vsi(pf); + err = ice_pf_ena_all_vsi(pf); + if (err) { + dev_err(&pf->pdev->dev, "error enabling VSIs\n"); + /* no need to disable VSIs in tear down path in ice_rebuild() + * since its already taken care in ice_vsi_open() + */ + goto err_vsi_rebuild; + }
+ /* if we get here, reset flow is successful */ + clear_bit(__ICE_RESET_FAILED, pf->state); return;
- fail_reset: + err_vsi_rebuild: + ice_vsi_release_all(pf); + err_sched_init_port: + ice_sched_cleanup_all(hw); + err_init_ctrlq: ice_shutdown_all_ctrlq(hw); set_bit(__ICE_RESET_FAILED, pf->state); clear_recovery: - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* set this bit in PF state to control service task scheduling */ + set_bit(__ICE_NEEDS_RESTART, pf->state); + dev_err(dev, "Rebuild failed, unload and reload driver\n"); }
/** @@@ -5366,6 -5798,232 +5774,232 @@@ int ice_get_rss(struct ice_vsi *vsi, u }
/** + * ice_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq + * @dev: the netdev being configured + * @filter_mask: filter mask passed in + * @nlflags: netlink flags passed in + * + * Return the bridge mode (VEB/VEPA) + */ + static int + ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags) + { + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u16 bmode; + + bmode = pf->first_sw->bridge_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, + filter_mask, NULL); + } + + /** + * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) + * @vsi: Pointer to VSI structure + * @bmode: Hardware bridge mode (VEB/VEPA) + * + * Returns 0 on success, negative on failure + */ + static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) + { + struct device *dev = &vsi->back->pdev->dev; + struct ice_aqc_vsi_props *vsi_props; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + vsi_props = &vsi->info; + ctxt.info = vsi->info; + + if (bmode == BRIDGE_MODE_VEB) + /* change from VEPA to VEB mode */ + ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + else + /* change from VEB to VEPA mode */ + ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + ctxt.vsi_num = vsi->vsi_num; + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + status = ice_aq_update_vsi(hw, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", + bmode, status, hw->adminq.sq_last_status); + return -EIO; + } + /* Update sw flags for book keeping */ + vsi_props->sw_flags = ctxt.info.sw_flags; + + return 0; + } + + /** + * ice_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * @flags: bridge setlink flags + * + * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is + * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if + * not already set for all VSIs connected to this switch. And also update the + * unicast switch filter rules for the corresponding switch of the netdev. + */ + static int + ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 __always_unused flags) + { + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_pf *pf = np->vsi->back; + struct nlattr *attr, *br_spec; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + struct ice_sw *pf_sw; + int rem, v, err = 0; + + pf_sw = pf->first_sw; + /* find the attribute in the netlink message */ + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + /* Continue if bridge mode is not being flipped */ + if (mode == pf_sw->bridge_mode) + continue; + /* Iterates through the PF VSI list and update the loopback + * mode of the VSI + */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); + if (err) + return err; + } + + hw->evb_veb = (mode == BRIDGE_MODE_VEB); + /* Update the unicast switch filter rules for the corresponding + * switch of the netdev + */ + status = ice_update_sw_rule_bridge_mode(hw); + if (status) { + netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", + mode, status, hw->adminq.sq_last_status); + /* revert hw->evb_veb */ + hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); + return -EIO; + } + + pf_sw->bridge_mode = mode; + } + + return 0; + } + + /** + * ice_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ + static void ice_tx_timeout(struct net_device *netdev) + { + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_ring *tx_ring = NULL; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u32 head, val = 0, i; + int hung_queue = -1; + + pf->tx_timeout_count++; + + /* find the stopped queue the same way the stack does */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(netdev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + netdev->watchdog_timeo))) { + hung_queue = i; + break; + } + } + + if (i == netdev->num_tx_queues) { + netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); + } else { + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_txq; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (hung_queue == + vsi->tx_rings[i]->q_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + } + + /* Reset recovery level if enough time has elapsed after last timeout. + * Also ensure no new reset action happens before next timeout period. + */ + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + + netdev->watchdog_timeo))) + return; + + if (tx_ring) { + head = tx_ring->next_to_clean; + /* Read interrupt register */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + val = rd32(&pf->hw, + GLINT_DYN_CTL(tx_ring->q_vector->v_idx + + tx_ring->vsi->base_vector - 1)); + + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + vsi->vsi_num, hung_queue, tx_ring->next_to_clean, + head, tx_ring->next_to_use, + readl(tx_ring->tail), val); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(__ICE_PFR_REQ, pf->state); + break; + case 2: + set_bit(__ICE_CORER_REQ, pf->state); + break; + case 3: + set_bit(__ICE_GLOBR_REQ, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); + set_bit(__ICE_DOWN, pf->state); + set_bit(__ICE_NEEDS_RESTART, vsi->state); + set_bit(__ICE_SERVICE_DIS, pf->state); + break; + } + + ice_service_task_schedule(pf); + pf->tx_timeout_recovery_level++; + } + + /** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure * @@@ -5383,6 -6041,11 +6017,11 @@@ static int ice_open(struct net_device * struct ice_vsi *vsi = np->vsi; int err;
+ if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { + netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); + return -EIO; + } + netif_carrier_off(netdev);
err = ice_vsi_open(vsi); @@@ -5473,9 -6136,15 +6112,12 @@@ static const struct net_device_ops ice_ .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ice_netpoll, -#endif /* CONFIG_NET_POLL_CONTROLLER */ .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, + .ndo_bridge_getlink = ice_bridge_getlink, + .ndo_bridge_setlink = ice_bridge_setlink, .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, + .ndo_tx_timeout = ice_tx_timeout, }; diff --combined drivers/net/ethernet/intel/igb/igb_main.c index 0796cef96fa3,c18e79112cad..0d29df8accd8 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -205,6 -205,10 +205,6 @@@ static struct notifier_block dca_notifi .priority = 0 }; #endif -#ifdef CONFIG_NET_POLL_CONTROLLER -/* for netdump / net console */ -static void igb_netpoll(struct net_device *); -#endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs; module_param(max_vfs, uint, 0); @@@ -239,7 -243,7 +239,7 @@@ static struct pci_driver igb_driver =
MODULE_AUTHOR("Intel Corporation, e1000-devel@lists.sourceforge.net"); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); - MODULE_LICENSE("GPL"); + MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@@ -2877,6 -2881,9 +2877,6 @@@ static const struct net_device_ops igb_ .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, .ndo_set_vf_trust = igb_ndo_set_vf_trust, .ndo_get_vf_config = igb_ndo_get_vf_config, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = igb_netpoll, -#endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, .ndo_fdb_add = igb_ndo_fdb_add, @@@ -9046,6 -9053,29 +9046,6 @@@ static int igb_pci_sriov_configure(stru return 0; }
-#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void igb_netpoll(struct net_device *netdev) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct igb_q_vector *q_vector; - int i; - - for (i = 0; i < adapter->num_q_vectors; i++) { - q_vector = adapter->q_vector[i]; - if (adapter->flags & IGB_FLAG_HAS_MSIX) - wr32(E1000_EIMC, q_vector->eims_value); - else - igb_irq_disable(adapter); - napi_schedule(&q_vector->napi); - } -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - /** * igb_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device diff --combined drivers/net/ethernet/intel/ixgb/ixgb_main.c index 7722153c4ac2,4bb89587cd6a..1d4d1686909a --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@@ -81,6 -81,11 +81,6 @@@ static int ixgb_vlan_rx_kill_vid(struc __be16 proto, u16 vid); static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
-#ifdef CONFIG_NET_POLL_CONTROLLER -/* for netdump / net console */ -static void ixgb_netpoll(struct net_device *dev); -#endif - static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, enum pci_channel_state state); static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); @@@ -102,7 -107,7 +102,7 @@@ static struct pci_driver ixgb_driver =
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); - MODULE_LICENSE("GPL"); + MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@@ -343,6 -348,9 +343,6 @@@ static const struct net_device_ops ixgb .ndo_tx_timeout = ixgb_tx_timeout, .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgb_netpoll, -#endif .ndo_fix_features = ixgb_fix_features, .ndo_set_features = ixgb_set_features, }; @@@ -2187,6 -2195,23 +2187,6 @@@ ixgb_restore_vlan(struct ixgb_adapter * ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); }
-#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ - -static void ixgb_netpoll(struct net_device *dev) -{ - struct ixgb_adapter *adapter = netdev_priv(dev); - - disable_irq(adapter->pdev->irq); - ixgb_intr(adapter->pdev->irq, dev); - enable_irq(adapter->pdev->irq); -} -#endif - /** * ixgb_io_error_detected - called when PCI error is detected * @pdev: pointer to pci device with error diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f27d73a7bf16,27a8546c88b2..140e87a10ff5 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -159,7 -159,7 +159,7 @@@ MODULE_PARM_DESC(debug, "Debug level (0
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); - MODULE_LICENSE("GPL"); + MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
static struct workqueue_struct *ixgbe_wq; @@@ -7775,6 -7775,33 +7775,33 @@@ static void ixgbe_reset_subtask(struct }
/** + * ixgbe_check_fw_error - Check firmware for errors + * @adapter: the adapter private structure + * + * Check firmware errors in register FWSM + */ + static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + u32 fwsm; + + /* read fwsm.ext_err_ind register and log errors */ + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + + if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || + !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) + e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", + fwsm); + + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + return true; + } + + return false; + } + + /** * ixgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ @@@ -7792,6 -7819,15 +7819,15 @@@ static void ixgbe_service_task(struct w ixgbe_service_event_complete(adapter); return; } + if (ixgbe_check_fw_error(adapter)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + unregister_netdev(adapter->netdev); + rtnl_unlock(); + } + ixgbe_service_event_complete(adapter); + return; + } if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; @@@ -8768,6 -8804,28 +8804,6 @@@ static int ixgbe_del_sanmac_netdev(stru return err; }
-#ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void ixgbe_netpoll(struct net_device *netdev) -{ - struct ixgbe_adapter *adapter = netdev_priv(netdev); - int i; - - /* if interface is down do nothing */ - if (test_bit(__IXGBE_DOWN, &adapter->state)) - return; - - /* loop through and schedule all active queues */ - for (i = 0; i < adapter->num_q_vectors; i++) - ixgbe_msix_clean_rings(0, adapter->q_vector[i]); -} - -#endif - static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, struct ixgbe_ring *ring) { @@@ -10229,6 -10287,9 +10265,6 @@@ static const struct net_device_ops ixgb .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, .ndo_setup_tc = __ixgbe_setup_tc, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgbe_netpoll, -#endif #ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, @@@ -10691,6 -10752,11 +10727,11 @@@ skip_sriov if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO;
+ if (ixgbe_check_fw_error(adapter)) { + err = -EIO; + goto err_sw_init; + } + /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); diff --combined drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 5a228582423b,ba6562e8cd73..98707ee11d72 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@@ -40,7 -40,7 +40,7 @@@ static const char ixgbevf_driver_string #define DRV_VERSION "4.1.0-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2015 Intel Corporation."; + "Copyright (c) 2009 - 2018 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@@ -79,7 -79,7 +79,7 @@@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tb
MODULE_AUTHOR("Intel Corporation, linux.nics@intel.com"); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); - MODULE_LICENSE("GPL"); + MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION);
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) @@@ -268,7 -268,7 +268,7 @@@ static bool ixgbevf_clean_tx_irq(struc struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0; + unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; unsigned int budget = tx_ring->count / 2; unsigned int i = tx_ring->next_to_clean;
@@@ -299,6 -299,8 +299,8 @@@ /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) + total_ipsec++;
/* free the skb */ if (ring_is_xdp(tx_ring)) @@@ -361,6 -363,7 +363,7 @@@ u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + adapter->tx_ipsec += total_ipsec;
if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { struct ixgbe_hw *hw = &adapter->hw; @@@ -516,6 -519,9 +519,9 @@@ static void ixgbevf_process_skb_fields( __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); }
+ if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) + ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); }
@@@ -1012,7 -1018,7 +1018,7 @@@ static int ixgbevf_xmit_xdp_ring(struc context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc->vlan_macip_lens = cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); @@@ -2200,6 -2206,7 +2206,7 @@@ static void ixgbevf_configure(struct ix ixgbevf_set_rx_mode(adapter->netdev);
ixgbevf_restore_vlan(adapter); + ixgbevf_ipsec_restore(adapter);
ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); @@@ -2246,7 -2253,8 +2253,8 @@@ static void ixgbevf_init_last_counter_s static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_13, + int api[] = { ixgbe_mbox_api_14, + ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, @@@ -2605,6 -2613,7 +2613,7 @@@ static void ixgbevf_set_num_queues(stru case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@@ -3700,8 -3709,8 +3709,8 @@@ static void ixgbevf_queue_reset_subtask }
static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, - u32 vlan_macip_lens, u32 type_tucmd, - u32 mss_l4len_idx) + u32 vlan_macip_lens, u32 fceof_saidx, + u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@@ -3715,14 -3724,15 +3724,15 @@@ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); }
static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, - u8 *hdr_len) + u8 *hdr_len, + struct ixgbevf_ipsec_tx_data *itd) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; @@@ -3736,6 -3746,7 +3746,7 @@@ unsigned char *hdr; } l4; u32 paylen, l4_offset; + u32 fceof_saidx = 0; int err;
if (skb->ip_summed != CHECKSUM_PARTIAL) @@@ -3761,13 -3772,15 +3772,15 @@@ if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + int len = csum_start - trans_start;
/* IP header will have to cancel out any data that - * is not a part of the outer IP header + * is not a part of the outer IP header, so set to + * a reverse csum if needed, else init check to 0. */ - ip.v4->check = csum_fold(csum_partial(trans_start, - csum_start - trans_start, - 0)); + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? + csum_fold(csum_partial(trans_start, + len, 0)) : 0; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
ip.v4->tot_len = 0; @@@ -3799,13 -3812,16 +3812,16 @@@ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
+ fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, + mss_l4len_idx);
return 1; } @@@ -3820,10 -3836,12 +3836,12 @@@ static inline bool ixgbevf_ipv6_csum_is }
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first) + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; + u32 fceof_saidx = 0; u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) @@@ -3849,6 -3867,10 +3867,10 @@@ skb_checksum_help(skb); goto no_csum; } + + if (first->protocol == htons(ETH_P_IP)) + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - @@@ -3858,7 -3880,11 +3880,11 @@@ no_csum vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + fceof_saidx, type_tucmd, 0); }
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@@ -3892,8 -3918,12 +3918,12 @@@ static void ixgbevf_tx_olinfo_status(un if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
- /* use index 1 context for TSO/FSO/FCOE */ - if (tx_flags & IXGBE_TX_FLAGS_TSO) + /* enable IPsec */ + if (tx_flags & IXGBE_TX_FLAGS_IPSEC) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); + + /* use index 1 context for TSO/FSO/FCOE/IPSEC */ + if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it @@@ -4075,6 -4105,7 +4105,7 @@@ static int ixgbevf_xmit_frame_ring(stru int tso; u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif @@@ -4119,11 -4150,15 +4150,15 @@@ first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb);
- tso = ixgbevf_tso(tx_ring, first, &hdr_len); + #ifdef CONFIG_XFRM_OFFLOAD + if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + goto out_drop; + #endif + tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); if (tso < 0) goto out_drop; else if (!tso) - ixgbevf_tx_csum(tx_ring, first); + ixgbevf_tx_csum(tx_ring, first, &ipsec_tx);
ixgbevf_tx_map(tx_ring, first, hdr_len);
@@@ -4233,6 -4268,24 +4268,6 @@@ static int ixgbevf_change_mtu(struct ne return 0; }
-#ifdef CONFIG_NET_POLL_CONTROLLER -/* Polling 'interrupt' - used by things like netconsole to send skbs - * without having to re-enable interrupts. It's not called while - * the interrupt routine is executing. - */ -static void ixgbevf_netpoll(struct net_device *netdev) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - int i; - - /* if interface is down do nothing */ - if (test_bit(__IXGBEVF_DOWN, &adapter->state)) - return; - for (i = 0; i < adapter->num_rx_queues; i++) - ixgbevf_msix_clean_rings(0, adapter->q_vector[i]); -} -#endif /* CONFIG_NET_POLL_CONTROLLER */ - static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); @@@ -4464,6 -4517,9 +4499,6 @@@ static const struct net_device_ops ixgb .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ixgbevf_netpoll, -#endif .ndo_features_check = ixgbevf_features_check, .ndo_bpf = ixgbevf_xdp, }; @@@ -4613,6 -4669,7 +4648,7 @@@ static int ixgbevf_probe(struct pci_de case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@@ -4648,6 -4705,7 +4684,7 @@@
pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); + ixgbevf_init_ipsec_offload(adapter);
ixgbevf_init_last_counter_stats(adapter);
@@@ -4714,6 -4772,7 +4751,7 @@@ static void ixgbevf_remove(struct pci_d if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev);
+ ixgbevf_stop_ipsec_offload(adapter); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter);
diff --combined drivers/net/ethernet/marvell/mvneta.c index b4ed7d394d07,59ed63102e14..a66f8f8552e4 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@@ -1890,8 -1890,8 +1890,8 @@@ static void mvneta_rxq_drop_pkts(struc if (!data || !(rx_desc->buf_phys_addr)) continue;
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, - MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); __free_page(data); } } @@@ -2008,8 -2008,8 +2008,8 @@@ static int mvneta_rx_swbm(struct napi_s skb_add_rx_frag(rxq->skb, frag_num, page, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_single(dev->dev.parent, phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); rxq->left_size -= frag_size; } } else { @@@ -2039,8 -2039,9 +2039,8 @@@ frag_offset, frag_size, PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr, - PAGE_SIZE, - DMA_FROM_DEVICE); + dma_unmap_page(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size; } @@@ -2064,10 -2065,7 +2064,7 @@@ /* Linux processing */ rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- if (dev->features & NETIF_F_GRO) - napi_gro_receive(napi, rxq->skb); - else - netif_receive_skb(rxq->skb); + napi_gro_receive(napi, rxq->skb);
/* clean uncomplete skb pointer in queue */ rxq->skb = NULL; @@@ -2395,7 -2393,7 +2392,7 @@@ error }
/* Main tx processing */ - static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) + static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); @@@ -2509,12 -2507,13 +2506,13 @@@ static void mvneta_tx_done_gbe(struct m { struct mvneta_tx_queue *txq; struct netdev_queue *nq; + int cpu = smp_processor_id();
while (cause_tx_done) { txq = mvneta_tx_done_policy(pp, cause_tx_done);
nq = netdev_get_tx_queue(pp->dev, txq->id); - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, cpu);
if (txq->count) mvneta_txq_done(pp, txq); @@@ -3792,9 -3791,6 +3790,6 @@@ static int mvneta_open(struct net_devic goto err_free_online_hp; }
- /* In default link is down */ - netif_carrier_off(pp->dev); - ret = mvneta_mdio_probe(pp); if (ret < 0) { netdev_err(dev, "cannot probe MDIO bus\n"); @@@ -4597,7 -4593,8 +4592,8 @@@ static int mvneta_probe(struct platform } }
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --combined drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 38cc01beea79,c2ed71788e4f..2373cd41a625 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@@ -82,13 -82,19 +82,19 @@@ u32 mvpp2_read(struct mvpp2 *priv, u32 return readl(priv->swth_base[0] + offset); }
- u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) + static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) { return readl_relaxed(priv->swth_base[0] + offset); } + + static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) + { + return cpu % priv->nthreads; + } + /* These accessors should be used to access: * - * - per-CPU registers, where each CPU has its own copy of the + * - per-thread registers, where each thread has its own copy of the * register. * * MVPP2_BM_VIRT_ALLOC_REG @@@ -104,8 -110,8 +110,8 @@@ * MVPP2_TXQ_SENT_REG * MVPP2_RXQ_NUM_REG * - * - global registers that must be accessed through a specific CPU - * window, because they are related to an access to a per-CPU + * - global registers that must be accessed through a specific thread + * window, because they are related to an access to a per-thread * register * * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) @@@ -122,28 -128,28 +128,28 @@@ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) */ - void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, + static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel(data, priv->swth_base[cpu] + offset); + writel(data, priv->swth_base[thread] + offset); }
- u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, + static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl(priv->swth_base[cpu] + offset); + return readl(priv->swth_base[thread] + offset); }
- void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, + static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel_relaxed(data, priv->swth_base[cpu] + offset); + writel_relaxed(data, priv->swth_base[thread] + offset); }
- static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, + static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl_relaxed(priv->swth_base[cpu] + offset); + return readl_relaxed(priv->swth_base[thread] + offset); }
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, @@@ -385,17 -391,17 +391,17 @@@ static void mvpp2_bm_bufs_get_addrs(str dma_addr_t *dma_addr, phys_addr_t *phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
- *dma_addr = mvpp2_percpu_read(priv, cpu, + *dma_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
if (priv->hw_version == MVPP22) { u32 val; u32 dma_addr_highbits, phys_addr_highbits;
- val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; @@@ -626,7 -632,11 +632,11 @@@ static inline void mvpp2_bm_pool_put(st dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + unsigned long flags = 0; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->bm_lock[thread], flags);
if (port->priv->hw_version == MVPP22) { u32 val = 0; @@@ -640,7 -650,7 +650,7 @@@ << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP22_BM_ADDR_HIGH_RLS_REG, val); }
@@@ -649,11 -659,14 +659,14 @@@ * descriptor. Instead of storing the virtual address, we * store the physical address */ - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+ if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->bm_lock[thread], flags); + put_cpu(); }
@@@ -886,7 -899,7 +899,7 @@@ static inline void mvpp2_qvec_interrupt MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); }
- /* Mask the current CPU's Rx/Tx interrupts + /* Mask the current thread's Rx/Tx interrupts * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@@ -894,11 -907,16 +907,16 @@@ static void mvpp2_interrupts_mask(void { struct mvpp2_port *port = arg;
- mvpp2_percpu_write(port->priv, smp_processor_id(), + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); }
- /* Unmask the current CPU's Rx/Tx interrupts. + /* Unmask the current thread's Rx/Tx interrupts. * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@@ -907,12 -925,17 +925,17 @@@ static void mvpp2_interrupts_unmask(voi struct mvpp2_port *port = arg; u32 val;
+ /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + val = MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); if (port->has_tx_irqs) val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), val); }
@@@ -928,7 -951,7 +951,7 @@@ mvpp2_shared_interrupt_mask_unmask(stru if (mask) val = 0; else - val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *v = port->qvecs + i; @@@ -936,7 -959,7 +959,7 @@@ if (v->type != MVPP2_QUEUE_VECTOR_SHARED) continue;
- mvpp2_percpu_write(port->priv, v->sw_thread_id, + mvpp2_thread_write(port->priv, v->sw_thread_id, MVPP2_ISR_RX_TX_MASK_REG(port->id), val); } } @@@ -1425,6 -1448,9 +1448,9 @@@ static void mvpp2_defaults_set(struct m tx_port_num); mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+ /* Set TXQ scheduling to Round-Robin */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0); + /* Close bandwidth for all queues */ for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { ptxq = mvpp2_txq_phys(port->id, queue); @@@ -1624,7 -1650,8 +1650,8 @@@ mvpp2_txq_next_desc_get(struct mvpp2_tx static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_AGGR_TXQ_UPDATE_REG, pending); }
@@@ -1634,14 -1661,15 +1661,15 @@@ * Called only from mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ - static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, + static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, struct mvpp2_tx_queue *aggr_txq, int num) { if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ - int cpu = smp_processor_id(); - u32 val = mvpp2_read_relaxed(priv, - MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + unsigned int thread = + mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + u32 val = mvpp2_read_relaxed(port->priv, + MVPP2_AGGR_TXQ_STATUS_REG(thread));
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
@@@ -1657,16 -1685,17 +1685,17 @@@ * only by mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ - static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, + static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, int num) { + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2 *priv = port->priv; u32 val; - int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@@ -1674,12 -1703,13 +1703,13 @@@ /* Check if there are enough reserved descriptors for transmission. * If not, request chunk of reserved descriptors and check again. */ - static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, + static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { - int req, cpu, desc_count; + int req, desc_count; + unsigned int thread;
if (txq_pcpu->reserved_num >= num) return 0; @@@ -1690,10 -1720,10 +1720,10 @@@
desc_count = 0; /* Compute total of used descriptors */ - for_each_present_cpu(cpu) { + for (thread = 0; thread < port->priv->nthreads; thread++) { struct mvpp2_txq_pcpu *txq_pcpu_aux;
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); desc_count += txq_pcpu_aux->count; desc_count += txq_pcpu_aux->reserved_num; } @@@ -1702,10 -1732,10 +1732,10 @@@ desc_count += req;
if (desc_count > - (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) + (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) return -ENOMEM;
- txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
/* OK, the descriptor could have been updated: check again. */ if (txq_pcpu->reserved_num < num) @@@ -1759,7 -1789,7 +1789,7 @@@ static u32 mvpp2_txq_desc_csum(int l3_o
/* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. - * Per-CPU access + * Per-thread access * * Called only from mvpp2_txq_done(), called from mvpp2_tx() * (migration disabled) and from the TX completion tasklet (migration @@@ -1771,7 -1801,8 +1801,8 @@@ static inline int mvpp2_txq_sent_desc_p u32 val;
/* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + val = mvpp2_thread_read_relaxed(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> @@@ -1786,10 -1817,15 +1817,15 @@@ static void mvpp2_txq_sent_counter_clea struct mvpp2_port *port = arg; int queue;
+ /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id;
- mvpp2_percpu_read(port->priv, smp_processor_id(), + mvpp2_thread_read(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(id)); } } @@@ -1849,13 -1885,13 +1885,13 @@@ static void mvpp2_txp_max_tx_size_set(s static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
put_cpu(); @@@ -1865,15 -1901,15 +1901,15 @@@ static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
put_cpu(); } @@@ -1974,7 -2010,7 +2010,7 @@@ static void mvpp2_txq_done(struct mvpp2 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); int tx_done;
- if (txq_pcpu->cpu != smp_processor_id()) + if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
tx_done = mvpp2_txq_sent_desc_proc(port, txq); @@@ -1990,7 -2026,7 +2026,7 @@@ }
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, - int cpu) + unsigned int thread) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; @@@ -2001,7 -2037,7 +2037,7 @@@ if (!txq) break;
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); @@@ -2017,8 -2053,8 +2053,8 @@@
/* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, int cpu, - struct mvpp2 *priv) + struct mvpp2_tx_queue *aggr_txq, + unsigned int thread, struct mvpp2 *priv) { u32 txq_dma;
@@@ -2033,7 -2069,7 +2069,7 @@@
/* Aggr TXQ no reset WA */ aggr_txq->next_desc_to_proc = mvpp2_read(priv, - MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + MVPP2_AGGR_TXQ_INDEX_REG(thread));
/* Set Tx descriptors queue starting address indirect * access @@@ -2044,8 -2080,8 +2080,8 @@@ txq_dma = aggr_txq->descs_dma >> MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), MVPP2_AGGR_TXQ_SIZE);
return 0; @@@ -2056,8 -2092,8 +2092,8 @@@ static int mvpp2_rxq_init(struct mvpp2_ struct mvpp2_rx_queue *rxq)
{ + unsigned int thread; u32 rxq_dma; - int cpu;
rxq->size = port->rx_ring_size;
@@@ -2074,15 -2110,15 +2110,15 @@@ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; else rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); put_cpu();
/* Set Offset */ @@@ -2127,7 -2163,7 +2163,7 @@@ static void mvpp2_rxq_drop_pkts(struct static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu; + unsigned int thread;
mvpp2_rxq_drop_pkts(port, rxq);
@@@ -2146,10 -2182,10 +2182,10 @@@ * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); put_cpu(); }
@@@ -2158,7 -2194,8 +2194,8 @@@ static int mvpp2_txq_init(struct mvpp2_ struct mvpp2_tx_queue *txq) { u32 val; - int cpu, desc, desc_per_txq, tx_port_num; + unsigned int thread; + int desc, desc_per_txq, tx_port_num; struct mvpp2_txq_pcpu *txq_pcpu;
txq->size = port->tx_ring_size; @@@ -2173,18 -2210,18 +2210,18 @@@ txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
/* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. @@@ -2195,7 -2232,7 +2232,7 @@@ desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); put_cpu(); @@@ -2214,8 -2251,8 +2251,8 @@@ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), val);
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); txq_pcpu->size = txq->size; txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, sizeof(*txq_pcpu->buffs), @@@ -2249,10 -2286,10 +2286,10 @@@ static void mvpp2_txq_deinit(struct mvp struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int cpu; + unsigned int thread;
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); kfree(txq_pcpu->buffs);
if (txq_pcpu->tso_headers) @@@ -2278,10 -2315,10 +2315,10 @@@ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); put_cpu(); }
@@@ -2289,14 -2326,14 +2326,14 @@@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int delay, pending, cpu; + int delay, pending; + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val;
- cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
/* The napi queue has been stopped so wait for all packets * to be transmitted. @@@ -2312,17 -2349,17 +2349,17 @@@ mdelay(1); delay++;
- pending = mvpp2_percpu_read(port->priv, cpu, + pending = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); pending &= MVPP2_TXQ_PENDING_MASK; } while (pending);
val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); put_cpu();
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
/* Release all packets */ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); @@@ -2389,13 -2426,17 +2426,17 @@@ err_cleanup static int mvpp2_setup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; - int queue, err; + int queue, err, cpu;
for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) goto err_cleanup; + + /* Assign this queue to a CPU */ + cpu = queue % num_present_cpus(); + netif_set_xps_queue(port->dev, cpumask_of(cpu), queue); }
if (port->has_tx_irqs) { @@@ -2503,16 -2544,20 +2544,20 @@@ static void mvpp2_tx_proc_cb(unsigned l { struct net_device *dev = (struct net_device *)data; struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu; unsigned int tx_todo, cause;
+ port_pcpu = per_cpu_ptr(port->pcpu, + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); + if (!netif_running(dev)) return; port_pcpu->timer_scheduled = false;
/* Process all the Tx queues */ cause = (1 << port->ntxqs) - 1; - tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); + tx_todo = mvpp2_tx_done(port, cause, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */ if (tx_todo) @@@ -2728,7 -2773,8 +2773,8 @@@ static inline voi tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
dma_addr_t buf_dma_addr = mvpp2_txdesc_dma_addr_get(port, desc); @@@ -2745,7 -2791,8 +2791,8 @@@ static int mvpp2_tx_frag_process(struc struct mvpp2_tx_queue *aggr_txq, struct mvpp2_tx_queue *txq) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); struct mvpp2_tx_desc *tx_desc; int i; dma_addr_t buf_dma_addr; @@@ -2864,9 -2911,8 +2911,8 @@@ static int mvpp2_tx_tso(struct sk_buff int i, len, descs = 0;
/* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, - tso_count_descs(skb)) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, tso_count_descs(skb))) return 0;
@@@ -2906,21 -2952,28 +2952,28 @@@ release }
/* Main tx processing */ - static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) + static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; dma_addr_t buf_dma_addr; + unsigned long flags = 0; + unsigned int thread; int frags = 0; u16 txq_id; u32 tx_cmd;
+ thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + txq_id = skb_get_queue_mapping(skb); txq = port->txqs[txq_id]; - txq_pcpu = this_cpu_ptr(txq->pcpu); - aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + aggr_txq = &port->priv->aggr_txqs[thread]; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->tx_lock[thread], flags);
if (skb_is_gso(skb)) { frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); @@@ -2929,9 -2982,8 +2982,8 @@@ frags = skb_shinfo(skb)->nr_frags + 1;
/* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, - txq_pcpu, frags)) { + if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { frags = 0; goto out; } @@@ -2973,7 -3025,7 +3025,7 @@@
out: if (frags > 0) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
txq_pcpu->reserved_num -= frags; @@@ -3003,11 -3055,14 +3055,14 @@@ /* Set the timer in case not all frags were processed */ if (!port->has_tx_irqs && txq_pcpu->count <= frags && txq_pcpu->count > 0) { - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
mvpp2_timer_set(port_pcpu); }
+ if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->tx_lock[thread], flags); + return NETDEV_TX_OK; }
@@@ -3027,7 -3082,7 +3082,7 @@@ static int mvpp2_poll(struct napi_struc int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); struct mvpp2_queue_vector *qv; - int cpu = smp_processor_id(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
qv = container_of(napi, struct mvpp2_queue_vector, napi);
@@@ -3041,7 -3096,7 +3096,7 @@@ * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; @@@ -3050,21 -3105,20 +3105,22 @@@
/* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_percpu_write(port->priv, cpu, + mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); }
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - if (cause_tx) { - cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; - mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + if (port->has_tx_irqs) { + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } }
/* Process RX packets */ - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx = cause_rx_tx & + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); cause_rx <<= qv->first_rxq; cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { @@@ -3139,14 -3193,13 +3195,13 @@@ static void mvpp2_start_dev(struct mvpp for (i = 0; i < port->nqvecs; i++) napi_enable(&port->qvecs[i].napi);
- /* Enable interrupts on all CPUs */ + /* Enable interrupts on all threads */ mvpp2_interrupts_enable(port);
if (port->priv->hw_version == MVPP22) mvpp22_mode_reconfigure(port);
if (port->phylink) { - netif_carrier_off(port->dev); phylink_start(port->phylink); } else { /* Phylink isn't used as of now for ACPI, so the MAC has to be @@@ -3169,7 -3222,7 +3224,7 @@@ static void mvpp2_stop_dev(struct mvpp2 { int i;
- /* Disable interrupts on all CPUs */ + /* Disable interrupts on all threads */ mvpp2_interrupts_disable(port);
for (i = 0; i < port->nqvecs; i++) @@@ -3249,9 -3302,18 +3304,18 @@@ static int mvpp2_irqs_init(struct mvpp2 if (err) goto err;
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_affinity_hint(qv->irq, - cpumask_of(qv->sw_thread_id)); + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { + unsigned long mask = 0; + unsigned int cpu; + + for_each_present_cpu(cpu) { + if (mvpp2_cpu_to_thread(port->priv, cpu) == + qv->sw_thread_id) + mask |= BIT(cpu); + } + + irq_set_affinity_hint(qv->irq, to_cpumask(&mask)); + } }
return 0; @@@ -3395,11 -3457,11 +3459,11 @@@ static int mvpp2_stop(struct net_devic { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port_pcpu *port_pcpu; - int cpu; + unsigned int thread;
mvpp2_stop_dev(port);
- /* Mask interrupts on all CPUs */ + /* Mask interrupts on all threads */ on_each_cpu(mvpp2_interrupts_mask, port, 1); mvpp2_shared_interrupt_mask_unmask(port, true);
@@@ -3410,8 -3472,8 +3474,8 @@@
mvpp2_irqs_deinit(port); if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_cancel(&port_pcpu->tx_done_timer); port_pcpu->timer_scheduled = false; @@@ -3556,7 -3618,7 +3620,7 @@@ mvpp2_get_stats64(struct net_device *de { struct mvpp2_port *port = netdev_priv(dev); unsigned int start; - int cpu; + unsigned int cpu;
for_each_possible_cpu(cpu) { struct mvpp2_pcpu_stats *cpu_stats; @@@ -3983,12 -4045,18 +4047,18 @@@ static int mvpp2_simple_queue_vectors_i static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, struct device_node *port_node) { + struct mvpp2 *priv = port->priv; struct mvpp2_queue_vector *v; int i, ret;
- port->nqvecs = num_possible_cpus(); - if (queue_mode == MVPP2_QDIST_SINGLE_MODE) - port->nqvecs += 1; + switch (queue_mode) { + case MVPP2_QDIST_SINGLE_MODE: + port->nqvecs = priv->nthreads + 1; + break; + case MVPP2_QDIST_MULTI_MODE: + port->nqvecs = priv->nthreads; + break; + }
for (i = 0; i < port->nqvecs; i++) { char irqname[16]; @@@ -4000,7 -4068,10 +4070,10 @@@ v->sw_thread_id = i; v->sw_thread_mask = BIT(i);
- snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + if (port->flags & MVPP2_F_DT_COMPAT) + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + else + snprintf(irqname, sizeof(irqname), "hif%d", i);
if (queue_mode == MVPP2_QDIST_MULTI_MODE) { v->first_rxq = i * MVPP2_DEFAULT_RXQ; @@@ -4010,7 -4081,9 +4083,9 @@@ v->first_rxq = 0; v->nrxqs = port->nrxqs; v->type = MVPP2_QUEUE_VECTOR_SHARED; - strncpy(irqname, "rx-shared", sizeof(irqname)); + + if (port->flags & MVPP2_F_DT_COMPAT) + strncpy(irqname, "rx-shared", sizeof(irqname)); }
if (port_node) @@@ -4087,7 -4160,8 +4162,8 @@@ static int mvpp2_port_init(struct mvpp2 struct device *dev = port->dev->dev.parent; struct mvpp2 *priv = port->priv; struct mvpp2_txq_pcpu *txq_pcpu; - int queue, cpu, err; + unsigned int thread; + int queue, err;
/* Checks for hardware constraints */ if (port->first_rxq + port->nrxqs > @@@ -4131,9 -4205,9 +4207,9 @@@ txq->id = queue_phy_id; txq->log_id = queue; txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->cpu = cpu; + for (thread = 0; thread < priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + txq_pcpu->thread = thread; }
port->txqs[queue] = txq; @@@ -4206,24 -4280,51 +4282,51 @@@ err_free_percpu return err; }
- /* Checks if the port DT description has the TX interrupts - * described. On PPv2.1, there are no such interrupts. On PPv2.2, - * there are available, but we need to keep support for old DTs. + static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, + unsigned long *flags) + { + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3" }; + int i; + + for (i = 0; i < 5; i++) + if (of_property_match_string(port_node, "interrupt-names", + irqs[i]) < 0) + return false; + + *flags |= MVPP2_F_DT_COMPAT; + return true; + } + + /* Checks if the port dt description has the required Tx interrupts: + * - PPv2.1: there are no such interrupts. + * - PPv2.2: + * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] + * - The new ones have: "hifX" with X in [0..8] + * + * All those variants are supported to keep the backward compatibility. */ - static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, - struct device_node *port_node) + static bool mvpp2_port_has_irqs(struct mvpp2 *priv, + struct device_node *port_node, + unsigned long *flags) { - char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", - "tx-cpu2", "tx-cpu3" }; - int ret, i; + char name[5]; + int i; + + /* ACPI */ + if (!port_node) + return true;
if (priv->hw_version == MVPP21) return false;
- for (i = 0; i < 5; i++) { - ret = of_property_match_string(port_node, "interrupt-names", - irqs[i]); - if (ret < 0) + if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) + return true; + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + snprintf(name, 5, "hif%d", i); + if (of_property_match_string(port_node, "interrupt-names", + name) < 0) return false; }
@@@ -4600,23 -4701,21 +4703,21 @@@ static int mvpp2_port_probe(struct plat struct resource *res; struct phylink *phylink; char *mac_from = ""; - unsigned int ntxqs, nrxqs; + unsigned int ntxqs, nrxqs, thread; + unsigned long flags = 0; bool has_tx_irqs; u32 id; int features; int phy_mode; - int err, i, cpu; + int err, i;
- if (port_node) { - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); - } else { - has_tx_irqs = true; - queue_mode = MVPP2_QDIST_MULTI_MODE; + has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); + if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { + dev_err(&pdev->dev, + "not enough IRQs to support multi queue mode\n"); + return -EINVAL; }
- if (!has_tx_irqs) - queue_mode = MVPP2_QDIST_SINGLE_MODE; - ntxqs = MVPP2_MAX_TXQ; if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); @@@ -4664,6 -4763,7 +4765,7 @@@ port->nrxqs = nrxqs; port->priv = priv; port->has_tx_irqs = has_tx_irqs; + port->flags = flags;
err = mvpp2_queue_vectors_init(port, port_node); if (err) @@@ -4760,8 -4860,8 +4862,8 @@@ }
if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); @@@ -5045,13 -5145,13 +5147,13 @@@ static int mvpp2_init(struct platform_d }
/* Allocate and initialize aggregated TXQs */ - priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), + priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, sizeof(*priv->aggr_txqs), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM;
- for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); @@@ -5098,7 -5198,7 +5200,7 @@@ static int mvpp2_probe(struct platform_ struct mvpp2 *priv; struct resource *res; void __iomem *base; - int i; + int i, shared; int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@@ -5163,6 -5263,15 +5265,15 @@@
mvpp2_setup_bm_pool();
+ + priv->nthreads = min_t(unsigned int, num_present_cpus(), + MVPP2_MAX_THREADS); + + shared = num_present_cpus() - priv->nthreads; + if (shared > 0) + bitmap_fill(&priv->lock_map, + min_t(int, shared, MVPP2_MAX_THREADS)); + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz;
@@@ -5337,7 -5446,7 +5448,7 @@@ static int mvpp2_remove(struct platform mvpp2_bm_pool_destroy(pdev, priv, bm_pool); }
- for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
dma_free_coherent(&pdev->dev, diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 54118b77dc1f,d14c4051edd8..5955b4d844cc --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -3049,8 -3049,8 +3049,8 @@@ static int mlx5e_alloc_drop_cq(struct m return mlx5e_alloc_cq_common(mdev, param, cq); }
- static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, - struct mlx5e_rq *drop_rq) + int mlx5e_open_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *drop_rq) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_cq_param cq_param = {}; @@@ -3094,7 -3094,7 +3094,7 @@@ err_free_cq return err; }
- static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) + void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) { mlx5e_destroy_rq(drop_rq); mlx5e_free_rq(drop_rq); @@@ -4315,6 -4315,22 +4315,6 @@@ static int mlx5e_xdp(struct net_device } }
-#ifdef CONFIG_NET_POLL_CONTROLLER -/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without - * reenabling interrupts. - */ -static void mlx5e_netpoll(struct net_device *dev) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_channels *chs = &priv->channels; - - int i; - - for (i = 0; i < chs->num; i++) - napi_schedule(&chs->c[i]->napi); -} -#endif - static const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, @@@ -4340,6 -4356,9 +4340,6 @@@ #ifdef CONFIG_MLX5_EN_ARFS .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx5e_netpoll, -#endif #ifdef CONFIG_MLX5_ESWITCH /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, @@@ -4707,7 -4726,7 +4707,7 @@@ static void mlx5e_build_nic_netdev(stru mlx5e_tls_build_netdev(priv); }
- static void mlx5e_create_q_counters(struct mlx5e_priv *priv) + void mlx5e_create_q_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@@ -4725,7 -4744,7 +4725,7 @@@ } }
- static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) + void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { if (priv->q_counter) mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); @@@ -4764,9 -4783,17 +4764,17 @@@ static int mlx5e_init_nic_rx(struct mlx struct mlx5_core_dev *mdev = priv->mdev; int err;
+ mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_q_counters; + } + err = mlx5e_create_indirect_rqt(priv); if (err) - return err; + goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv); if (err) @@@ -4802,6 -4829,10 +4810,10 @@@ err_destroy_direct_rqts mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); + err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); + err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); return err; }
@@@ -4813,6 -4844,8 +4825,8 @@@ static void mlx5e_cleanup_nic_rx(struc mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); }
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) @@@ -4956,7 -4989,6 +4970,6 @@@ err_cleanup_nic
int mlx5e_attach_netdev(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; const struct mlx5e_profile *profile; int err;
@@@ -4967,28 -4999,16 +4980,16 @@@ if (err) goto out;
- mlx5e_create_q_counters(priv); - - err = mlx5e_open_drop_rq(priv, &priv->drop_rq); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_q_counters; - } - err = profile->init_rx(priv); if (err) - goto err_close_drop_rq; + goto err_cleanup_tx;
if (profile->enable) profile->enable(priv);
return 0;
- err_close_drop_rq: - mlx5e_close_drop_rq(&priv->drop_rq); - - err_destroy_q_counters: - mlx5e_destroy_q_counters(priv); + err_cleanup_tx: profile->cleanup_tx(priv);
out: @@@ -5006,8 -5026,6 +5007,6 @@@ void mlx5e_detach_netdev(struct mlx5e_p flush_workqueue(priv->wq);
profile->cleanup_rx(priv); - mlx5e_close_drop_rq(&priv->drop_rq); - mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); cancel_delayed_work_sync(&priv->update_stats_work); } diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c index b492152c8881,33a024274a1f..88c33a8474eb --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@@ -44,8 -44,8 +44,8 @@@ #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 1702 -#define MLXSW_SP1_FWREV_SUBMINOR 6 +#define MLXSW_SP1_FWREV_MINOR 1703 +#define MLXSW_SP1_FWREV_SUBMINOR 4 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@@ -331,7 -331,10 +331,10 @@@ static int mlxsw_sp_fw_rev_validate(str return -EINVAL; } if (MLXSW_SP_FWREV_MINOR_TO_BRANCH(rev->minor) == - MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor)) + MLXSW_SP_FWREV_MINOR_TO_BRANCH(req_rev->minor) && + (rev->minor > req_rev->minor || + (rev->minor == req_rev->minor && + rev->subminor >= req_rev->subminor))) return 0;
dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver\n", @@@ -2804,6 -2807,13 +2807,13 @@@ static int mlxsw_sp_port_ets_init(struc MLXSW_REG_QEEC_MAS_DIS); if (err) return err; + + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; }
/* Map all priorities to traffic class 0. */ diff --combined drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 8ed38fd5a852,6dab45c1f21e..d05e37fcc1b2 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@@ -2091,10 -2091,10 +2091,10 @@@ static void nfp_ctrl_poll(unsigned lon { struct nfp_net_r_vector *r_vec = (void *)arg;
- spin_lock_bh(&r_vec->lock); + spin_lock(&r_vec->lock); nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); - spin_unlock_bh(&r_vec->lock); + spin_unlock(&r_vec->lock);
nfp_ctrl_rx(r_vec);
@@@ -2180,9 -2180,13 +2180,13 @@@ nfp_net_tx_ring_alloc(struct nfp_net_d
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds)); tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size, - &tx_ring->dma, GFP_KERNEL); - if (!tx_ring->txds) + &tx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!tx_ring->txds) { + netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + tx_ring->cnt); goto err_alloc; + }
tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs), GFP_KERNEL); @@@ -2334,9 -2338,13 +2338,13 @@@ nfp_net_rx_ring_alloc(struct nfp_net_d rx_ring->cnt = dp->rxd_cnt; rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds)); rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size, - &rx_ring->dma, GFP_KERNEL); - if (!rx_ring->rxds) + &rx_ring->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!rx_ring->rxds) { + netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n", + rx_ring->cnt); goto err_alloc; + }
rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs), GFP_KERNEL); @@@ -3146,12 -3154,28 +3154,13 @@@ nfp_net_vlan_rx_kill_vid(struct net_dev return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); }
-#ifdef CONFIG_NET_POLL_CONTROLLER -static void nfp_net_netpoll(struct net_device *netdev) -{ - struct nfp_net *nn = netdev_priv(netdev); - int i; - - /* nfp_net's NAPIs are statically allocated so even if there is a race - * with reconfig path this will simply try to schedule some disabled - * NAPI instances. - */ - for (i = 0; i < nn->dp.num_stack_tx_rings; i++) - napi_schedule_irqoff(&nn->r_vecs[i].napi); -} -#endif - static void nfp_net_stat64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct nfp_net *nn = netdev_priv(netdev); int r;
+ /* Collect software stats */ for (r = 0; r < nn->max_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; @@@ -3177,6 -3201,14 +3186,14 @@@ stats->tx_bytes += data[1]; stats->tx_errors += data[2]; } + + /* Add in device stats */ + stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES); + stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS); + stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS); + + stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS); + stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS); }
static int nfp_net_set_features(struct net_device *netdev, @@@ -3504,6 -3536,9 +3521,6 @@@ const struct net_device_ops nfp_net_net .ndo_get_stats64 = nfp_net_stat64, .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = nfp_net_netpoll, -#endif .ndo_set_vf_mac = nfp_app_set_vf_mac, .ndo_set_vf_vlan = nfp_app_set_vf_vlan, .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, @@@ -3744,15 -3779,18 +3761,18 @@@ static void nfp_net_netdev_init(struct } if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) netdev->hw_features |= NETIF_F_RXHASH; - if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && - nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) - netdev->hw_features |= NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL; - nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; - - netdev->hw_enc_features = netdev->hw_features; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; } + if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_GRE; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; + } + if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE)) + netdev->hw_enc_features = netdev->hw_features;
netdev->vlan_features = netdev->hw_features;
diff --combined drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f5459de6d60a,6ce9a762cfc0..8e8fa823d611 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@@ -190,8 -190,10 +190,8 @@@ qed_dcbx_dp_protocol(struct qed_hwfn *p
static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, - struct qed_hw_info *p_info, - bool enable, - u8 prio, - u8 tc, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { @@@ -204,30 -206,19 +204,30 @@@ else p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+ /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */ + if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) || + test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits))) + p_data->arr[type].dont_add_vlan0 = true; + /* QM reconf data */ - if (p_info->personality == personality) - qed_hw_info_set_offload_tc(p_info, tc); + if (p_hwfn->hw_info.personality == personality) + qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); + + /* Configure dcbx vlan priority in doorbell block for roce EDPM */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + type == DCBX_PROTOCOL_ROCE) { + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); + } }
/* Update app protocol data and hw_info fields with the TLV info */ static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, - struct qed_hwfn *p_hwfn, - bool enable, - u8 prio, u8 tc, enum dcbx_protocol_type type) + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool enable, u8 prio, u8 tc, + enum dcbx_protocol_type type) { - struct qed_hw_info *p_info = &p_hwfn->hw_info; enum qed_pci_personality personality; enum dcbx_protocol_type id; int i; @@@ -240,7 -231,7 +240,7 @@@
personality = qed_dcbx_app_update[i].personality;
- qed_dcbx_set_params(p_data, p_info, enable, + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, prio, tc, type, personality); } } @@@ -262,8 -253,9 +262,9 @@@ qed_dcbx_get_app_protocol_type(struct q *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n", - app_prio_bitmap); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; }
@@@ -274,7 -266,7 +275,7 @@@ * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ static int -qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, +qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, int count, u8 dcbx_version) @@@ -318,7 -310,7 +319,7 @@@ enable = true; }
- qed_dcbx_update_app_info(p_data, p_hwfn, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, priority, tc, type); } } @@@ -340,7 -332,7 +341,7 @@@ continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; - qed_dcbx_update_app_info(p_data, p_hwfn, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, priority, tc, type); }
@@@ -350,8 -342,7 +351,8 @@@ /* Parse app TLV's to update TC information in hw_info structure for * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ -static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) +static int +qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dcbx_app_priority_feature *p_app; struct dcbx_app_priority_entry *p_tbl; @@@ -375,7 -366,7 +376,7 @@@ p_info = &p_hwfn->hw_info; num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
- rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, + rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, num_entries, dcbx_version); if (rc) return rc; @@@ -901,7 -892,7 +902,7 @@@ qed_dcbx_mib_update_event(struct qed_hw return rc;
if (type == QED_DCBX_OPERATIONAL_MIB) { - rc = qed_dcbx_process_mib_info(p_hwfn); + rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); if (!rc) { /* reconfigure tcs of QM queues according * to negotiation results @@@ -964,7 -955,6 +965,7 @@@ static void qed_dcbx_update_protocol_da p_data->dcb_enable_flag = p_src->arr[type].enable; p_data->dcb_priority = p_src->arr[type].priority; p_data->dcb_tc = p_src->arr[type].tc; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; }
/* Set pf update ramrod command params */ diff --combined drivers/net/ethernet/qlogic/qed/qed_dev.c index 97f073fd3725,128eb63ca54a..0fbeafeef7a0 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@@ -144,6 -144,12 +144,12 @@@ static void qed_qm_info_free(struct qed qm_info->wfq_data = NULL; }
+ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) + { + kfree(p_hwfn->dbg_user_info); + p_hwfn->dbg_user_info = NULL; + } + void qed_resc_free(struct qed_dev *cdev) { int i; @@@ -183,6 -189,7 +189,7 @@@ qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); + qed_dbg_user_data_free(p_hwfn); } }
@@@ -1083,6 -1090,10 +1090,10 @@@ int qed_resc_alloc(struct qed_dev *cdev rc = qed_dcbx_info_alloc(p_hwfn); if (rc) goto alloc_err; + + rc = qed_dbg_alloc_user_data(p_hwfn); + if (rc) + goto alloc_err; }
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); @@@ -1706,7 -1717,7 +1717,7 @@@ static int qed_vf_start(struct qed_hwf int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) { struct qed_load_req_params load_req_params; - u32 load_code, param, drv_mb_param; + u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; int rc = 0, mfw_rc, i; @@@ -1852,19 -1863,6 +1863,19 @@@
if (IS_PF(cdev)) { p_hwfn = QED_LEADING_HWFN(cdev); + + /* Get pre-negotiated values for stag, bandwidth etc. */ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); + drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_GET_OEM_UPDATES, + drv_mb_param, &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to send GET_OEM_UPDATES attention request\n"); + drv_mb_param = STORM_FW_VERSION; rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, diff --combined drivers/net/ethernet/qlogic/qed/qed_hsi.h index 9b3ef00e5782,21ec8091a24a..d4d08383c753 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@@ -274,7 -274,8 +274,8 @@@ struct core_rx_start_ramrod_data u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; - u8 reserved[6]; + u8 wipe_inner_vlan_pri_en; + u8 reserved[5]; };
/* Ramrod data for rx queue stop ramrod */ @@@ -351,7 -352,8 +352,8 @@@ struct core_tx_start_ramrod_data __le16 pbl_size; __le16 qm_pq_id; u8 gsi_offload_flag; - u8 resrved[3]; + u8 vport_id; + u8 resrved[2]; };
/* Ramrod data for tx queue stop ramrod */ @@@ -914,6 -916,16 +916,16 @@@ struct eth_rx_rate_limit __le16 reserved1; };
+ /* Update RSS indirection table entry command */ + struct eth_tstorm_rss_update_data { + u8 valid; + u8 vport_id; + u8 ind_table_index; + u8 reserved; + __le16 ind_table_value; + __le16 reserved1; + }; + struct eth_ustorm_per_pf_stat { struct regpair rcv_lb_ucast_bytes; struct regpair rcv_lb_mcast_bytes; @@@ -1241,6 -1253,10 +1253,10 @@@ struct rl_update_ramrod_data u8 rl_id_first; u8 rl_id_last; u8 rl_dc_qcn_flg; + u8 dcqcn_reset_alpha_on_idle; + u8 rl_bc_stage_th; + u8 rl_timer_stage_th; + u8 reserved1; __le32 rl_bc_rate; __le16 rl_max_rate; __le16 rl_r_ai; @@@ -1249,7 -1265,7 +1265,7 @@@ __le32 dcqcn_k_us; __le32 dcqcn_timeuot_us; __le32 qcn_timeuot_us; - __le32 reserved[2]; + __le32 reserved2; };
/* Slowpath Element (SPQE) */ @@@ -3322,6 -3338,25 +3338,25 @@@ enum dbg_status qed_dbg_read_attn(struc enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results);
+ /******************************* Data Types **********************************/ + + struct mcp_trace_format { + u32 data; + #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff + #define MCP_TRACE_FORMAT_MODULE_SHIFT 0 + #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 + #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 + #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 + #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 + #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 + #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 + #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 + #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 + #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 + #define MCP_TRACE_FORMAT_LEN_SHIFT 24 + char *format_str; + }; + /******************************** Constants **********************************/
#define MAX_NAME_LEN 16 @@@ -3337,6 -3372,13 +3372,13 @@@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
/** + * @brief qed_dbg_alloc_user_data - Allocates user debug data. + * + * @param p_hwfn - HW device data + */ + enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn); + + /** * @brief qed_dbg_get_status_str - Returns a string for the specified status. * * @param status - a debug status code. @@@ -3381,8 -3423,7 +3423,7 @@@ enum dbg_status qed_print_idle_chk_resu u32 *num_warnings);
/** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace - * meta data. + * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. * * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * no NVRAM access). @@@ -3390,7 -3431,8 +3431,8 @@@ * @param data - pointer to MCP Trace meta data * @param size - size of MCP Trace meta data in dwords */ - void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); + void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf);
/** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size @@@ -3425,19 -3467,45 +3467,45 @@@ enum dbg_status qed_print_mcp_trace_res char *results_buf);
/** + * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @param p_hwfn - HW device data + * @param dump_buf - mcp trace dump buffer, starting from the header. + * @param results_buf - buffer for printing the mcp trace results. + * + * @return error if the parsing fails, ok otherwise. + */ + enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + + /** * @brief print_mcp_trace_line - Prints MCP Trace results for a single line * + * @param p_hwfn - HW device data * @param dump_buf - mcp trace dump buffer, starting from the header. * @param num_dumped_bytes - number of bytes that were dumped. * @param results_buf - buffer for printing the mcp trace results. * * @return error if the parsing fails, ok otherwise. */ - enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, + enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf);
/** + * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @param p_hwfn - HW device data + */ + void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + + /** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). * @@@ -4303,154 -4371,161 +4371,161 @@@ void qed_set_rdma_error_level(struct qe (IRO[29].base + ((pf_id) * IRO[29].m1)) #define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+ /* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ + #define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ + (IRO[30].base + ((pf_id) * IRO[30].m1)) + #define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size) + /* Xstorm queue zone */ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[30].base + ((queue_id) * IRO[30].m1)) - #define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) + (IRO[31].base + ((queue_id) * IRO[31].m1)) + #define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
/* Ystorm cqe producer */ #define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[31].base + ((rss_id) * IRO[31].m1)) - #define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) + (IRO[32].base + ((rss_id) * IRO[32].m1)) + #define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
/* Ustorm cqe producer */ #define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[32].base + ((rss_id) * IRO[32].m1)) - #define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + (IRO[33].base + ((rss_id) * IRO[33].m1)) + #define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
/* Ustorm grq producer */ #define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) - #define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) + (IRO[34].base + ((pf_id) * IRO[34].m1)) + #define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
/* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) - #define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1)) + #define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, * BDqueue-id. */ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) - #define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) + #define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) - #define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2)) + #define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
/* Tstorm iSCSI RX stats */ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) - #define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + (IRO[38].base + ((pf_id) * IRO[38].m1)) + #define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
/* Mstorm iSCSI RX stats */ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) - #define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + (IRO[39].base + ((pf_id) * IRO[39].m1)) + #define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
/* Ustorm iSCSI RX stats */ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) - #define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + (IRO[40].base + ((pf_id) * IRO[40].m1)) + #define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
/* Xstorm iSCSI TX stats */ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) - #define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) + #define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
/* Ystorm iSCSI TX stats */ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) - #define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + (IRO[42].base + ((pf_id) * IRO[42].m1)) + #define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
/* Pstorm iSCSI TX stats */ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) - #define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) + (IRO[43].base + ((pf_id) * IRO[43].m1)) + #define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
/* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) - #define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) + (IRO[44].base + ((pf_id) * IRO[44].m1)) + #define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
/* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[44].base + ((pf_id) * IRO[44].m1)) - #define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) + (IRO[45].base + ((pf_id) * IRO[45].m1)) + #define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
/* Pstorm RDMA queue statistics */ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) - #define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) + #define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
/* Tstorm RDMA queue statistics */ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) - #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1)) + #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
/* Xstorm error level for assert */ #define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) - #define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + (IRO[48].base + ((pf_id) * IRO[48].m1)) + #define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
/* Ystorm error level for assert */ #define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[48].base + ((pf_id) * IRO[48].m1)) - #define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + (IRO[49].base + ((pf_id) * IRO[49].m1)) + #define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
/* Pstorm error level for assert */ #define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[49].base + ((pf_id) * IRO[49].m1)) - #define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + (IRO[50].base + ((pf_id) * IRO[50].m1)) + #define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
/* Tstorm error level for assert */ #define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[50].base + ((pf_id) * IRO[50].m1)) - #define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + (IRO[51].base + ((pf_id) * IRO[51].m1)) + #define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
/* Mstorm error level for assert */ #define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) - #define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + (IRO[52].base + ((pf_id) * IRO[52].m1)) + #define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
/* Ustorm error level for assert */ #define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) - #define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) + #define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
/* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[53].base + ((pf_id) * IRO[53].m1)) - #define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) + (IRO[54].base + ((pf_id) * IRO[54].m1)) + #define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
/* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) - #define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) + #define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
/* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) - #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) + #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
/* RoCE Error Statistics */ #define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) - #define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) + #define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
/* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) - #define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) + #define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
/* RoCE CQEs Statistics */ #define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) - #define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + (IRO[59].base + ((roce_pf_id) * IRO[59].m1)) + #define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
- static const struct iro iro_arr[59] = { + static const struct iro iro_arr[60] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@@ -4461,14 -4536,14 +4536,14 @@@ {0x84, 0x8, 0x0, 0x0, 0x2}, {0x4c48, 0x0, 0x0, 0x0, 0x78}, {0x3e38, 0x0, 0x0, 0x0, 0x78}, - {0x2b78, 0x0, 0x0, 0x0, 0x78}, + {0x3ef8, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x4998, 0x0, 0x0, 0x0, 0x78}, {0x7f50, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, {0x6210, 0x10, 0x0, 0x0, 0x10}, {0xb820, 0x30, 0x0, 0x0, 0x30}, - {0x96c0, 0x30, 0x0, 0x0, 0x30}, + {0xa990, 0x30, 0x0, 0x0, 0x30}, {0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x53a8, 0x80, 0x4, 0x0, 0x4}, @@@ -4476,11 -4551,12 +4551,12 @@@ {0x4ba8, 0x80, 0x0, 0x0, 0x20}, {0x8158, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, - {0x2d10, 0x80, 0x0, 0x0, 0x38}, - {0xf2b8, 0x78, 0x0, 0x0, 0x78}, + {0x4090, 0x80, 0x0, 0x0, 0x38}, + {0xfea8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xaf20, 0x0, 0x0, 0x0, 0xf0}, {0xb010, 0x8, 0x0, 0x0, 0x8}, + {0xc00, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, @@@ -4492,23 -4568,23 +4568,23 @@@ {0x12908, 0x18, 0x0, 0x0, 0x10}, {0x11aa8, 0x40, 0x0, 0x0, 0x18}, {0xa588, 0x50, 0x0, 0x0, 0x20}, - {0x8700, 0x40, 0x0, 0x0, 0x28}, - {0x10300, 0x18, 0x0, 0x0, 0x10}, + {0x8f00, 0x40, 0x0, 0x0, 0x28}, + {0x10e30, 0x18, 0x0, 0x0, 0x10}, {0xde48, 0x48, 0x0, 0x0, 0x38}, - {0x10768, 0x20, 0x0, 0x0, 0x20}, - {0x2d48, 0x80, 0x0, 0x0, 0x10}, + {0x11298, 0x20, 0x0, 0x0, 0x20}, + {0x40c8, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, {0xc748, 0x8, 0x0, 0x0, 0x1}, - {0xa128, 0x8, 0x0, 0x0, 0x1}, - {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xa928, 0x8, 0x0, 0x0, 0x1}, + {0x11a30, 0x8, 0x0, 0x0, 0x1}, {0xf030, 0x8, 0x0, 0x0, 0x1}, {0x13028, 0x8, 0x0, 0x0, 0x1}, {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, {0xed90, 0x28, 0x0, 0x0, 0x28}, - {0xa520, 0x18, 0x0, 0x0, 0x18}, - {0xa6a0, 0x8, 0x0, 0x0, 0x8}, - {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0xad20, 0x18, 0x0, 0x0, 0x18}, + {0xaea0, 0x8, 0x0, 0x0, 0x8}, + {0x13c38, 0x8, 0x0, 0x0, 0x8}, {0x13c50, 0x18, 0x0, 0x0, 0x18}, };
@@@ -5661,6 -5737,14 +5737,14 @@@ enum eth_filter_type MAX_ETH_FILTER_TYPE };
+ /* inner to inner vlan priority translation configurations */ + struct eth_in_to_in_pri_map_cfg { + u8 inner_vlan_pri_remap_en; + u8 reserved[7]; + u8 non_rdma_in_to_in_pri_map[8]; + u8 rdma_in_to_in_pri_map[8]; + }; + /* Eth IPv4 Fragment Type */ enum eth_ipv4_frag_type { ETH_IPV4_NOT_FRAG, @@@ -6018,6 -6102,14 +6102,14 @@@ struct tx_queue_update_ramrod_data struct regpair reserved1[5]; };
+ /* Inner to Inner VLAN priority map update mode */ + enum update_in_to_in_pri_map_mode_enum { + ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED, + ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL, + ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL, + MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM + }; + /* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { struct eth_filter_cmd_header filter_cmd_hdr; @@@ -6048,7 -6140,8 +6140,8 @@@ struct vport_start_ramrod_data u8 zero_placement_offset; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[1]; + u8 wipe_inner_vlan_pri_en; + struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; };
/* Ramrod data for vport stop ramrod */ @@@ -6100,7 -6193,9 +6193,9 @@@ struct vport_update_ramrod_data_cmn u8 update_ctl_frame_checks_en_flg; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[15]; + u8 update_in_to_in_pri_map_mode; + u8 in_to_in_pri_map[8]; + u8 reserved[6]; };
struct vport_update_ramrod_mcast { @@@ -6929,11 -7024,6 +7024,6 @@@ struct mstorm_rdma_task_st_ctx struct regpair temp[4]; };
- /* The roce task context of Ustorm */ - struct ustorm_rdma_task_st_ctx { - struct regpair temp[2]; - }; - struct e4_ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; @@@ -7007,8 -7097,6 +7097,6 @@@ struct e4_rdma_task_context struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; };
@@@ -7388,7 -7476,7 +7476,7 @@@ struct e4_ustorm_rdma_conn_ag_ctx #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; - u8 byte3; + u8 nvmf_only; __le16 conn_dpi; __le16 word1; __le32 cq_cons; @@@ -7831,7 -7919,12 +7919,12 @@@ struct roce_create_qp_req_ramrod_data struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; u8 stats_counter_id; - u8 reserved3[7]; + u8 reserved3[6]; + u8 flags2; + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1 + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1 __le16 regular_latency_phy_queue; __le16 dpi; }; @@@ -7954,6 -8047,7 +8047,7 @@@ enum roce_event_opcode ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, + ROCE_EVENT_FUNC_UPDATE, MAX_ROCE_EVENT_OPCODE };
@@@ -7962,7 -8056,13 +8056,13 @@@ struct roce_init_func_params u8 ll2_queue_id; u8 cnp_vlan_priority; u8 cnp_dscp; - u8 reserved; + u8 flags; + #define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 + #define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 + #define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 + #define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 + #define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F + #define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2 __le32 cnp_send_timeout; __le16 rl_offset; u8 rl_count_log; @@@ -8109,9 -8209,24 +8209,24 @@@ enum roce_ramrod_cmd_id ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, + ROCE_RAMROD_FUNC_UPDATE, MAX_ROCE_RAMROD_CMD_ID };
+ /* RoCE func init ramrod data */ + struct roce_update_func_params { + u8 cnp_vlan_priority; + u8 cnp_dscp; + __le16 flags; + #define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 + #define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 + #define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 + #define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 + #define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF + #define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2 + __le32 cnp_send_timeout; + }; + struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; @@@ -12414,7 -12529,6 +12529,7 @@@ struct public_drv_mb #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 +#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 @@@ -12542,9 -12656,6 +12657,9 @@@ #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 diff --combined drivers/net/ethernet/realtek/r8169.c index ab30aaeac6d3,f882be49f493..ed8ffd498c88 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@@ -77,8 -77,6 +77,6 @@@ static const int multicast_filter_limi #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
- #define RTL8169_TX_TIMEOUT (6*HZ) - /* write/read MMIO register */ #define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) #define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) @@@ -1354,7 -1352,8 +1352,8 @@@ static void rtl_irq_enable_all(struct r static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) { rtl_irq_disable(tp); - rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + rtl_ack_events(tp, 0xffff); + /* PCI commit */ RTL_R8(tp, ChipCmd); }
@@@ -4048,38 -4047,17 +4047,26 @@@ static void rtl8169_init_phy(struct net rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { - netif_dbg(tp, drv, dev, - "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); - RTL_W8(tp, 0x82, 0x01); - } - - pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); - - if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); - - if (tp->mac_version == RTL_GIGA_MAC_VER_02) { netif_dbg(tp, drv, dev, "Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); RTL_W8(tp, 0x82, 0x01); - netif_dbg(tp, drv, dev, - "Set PHY Reg 0x0bh = 0x00h\n"); - rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 }
/* We may have called phy_speed_down before */ phy_speed_up(dev->phydev);
genphy_soft_reset(dev->phydev); + + /* It was reported that chip version 33 ends up with 10MBit/Half on a + * 1GBit link after resuming from S3. For whatever reason the PHY on + * this chip doesn't properly start a renegotiation when soft-reset. + * Explicitly requesting a renegotiation fixes this. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_33 && + dev->phydev->autoneg == AUTONEG_ENABLE) + phy_restart_aneg(dev->phydev); }
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) @@@ -7368,11 -7346,9 +7355,9 @@@ static int rtl_init_one(struct pci_dev
tp->cp_cmd = RTL_R16(tp, CPlusCmd);
- if ((sizeof(dma_addr_t) > 4) && - (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && - tp->mac_version >= RTL_GIGA_MAC_VER_18)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (sizeof(dma_addr_t) > 4 && (use_dac == 1 || (use_dac == -1 && + tp->mac_version >= RTL_GIGA_MAC_VER_18)) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
/* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ if (!pci_is_pcie(pdev)) @@@ -7388,14 -7364,12 +7373,12 @@@
rtl_init_rxcfg(tp);
- rtl_irq_disable(tp); + rtl8169_irq_mask_and_ack(tp);
rtl_hw_initialize(tp);
rtl_hw_reset(tp);
- rtl_ack_events(tp, 0xffff); - pci_set_master(pdev);
rtl_init_mdio_ops(tp); @@@ -7435,7 -7409,6 +7418,6 @@@ dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
dev->ethtool_ops = &rtl8169_ethtool_ops; - dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
diff --combined drivers/net/ethernet/renesas/ravb.h index 9b6bf557a2f5,b2b18036380e..1c6e4df94f01 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@@ -428,7 -428,6 +428,7 @@@ enum EIS_BIT EIS_CULF1 = 0x00000080, EIS_TFFF = 0x00000100, EIS_QFS = 0x00010000, + EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), };
/* RIC0 */ @@@ -473,7 -472,6 +473,7 @@@ enum RIS0_BIT RIS0_FRF15 = 0x00008000, RIS0_FRF16 = 0x00010000, RIS0_FRF17 = 0x00020000, + RIS0_RESERVED = GENMASK(31, 18), };
/* RIC1 */ @@@ -530,7 -528,6 +530,7 @@@ enum RIS2_BIT RIS2_QFF16 = 0x00010000, RIS2_QFF17 = 0x00020000, RIS2_RFFF = 0x80000000, + RIS2_RESERVED = GENMASK(30, 18), };
/* TIC */ @@@ -547,7 -544,6 +547,7 @@@ enum TIS_BIT TIS_FTF1 = 0x00000002, /* Undocumented? */ TIS_TFUF = 0x00000100, TIS_TFWF = 0x00000200, + TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) };
/* ISS */ @@@ -621,7 -617,6 +621,7 @@@ enum GIC_BIT enum GIS_BIT { GIS_PTCF = 0x00000001, /* Undocumented? */ GIS_PTMF = 0x00000004, + GIS_RESERVED = GENMASK(15, 10), };
/* GIE (R-Car Gen3 only) */ @@@ -959,7 -954,10 +959,10 @@@ enum RAVB_QUEUE #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 - #define NUM_TX_DESC 2 /* TX descriptors per packet */ + + /* TX descriptors per packet */ + #define NUM_TX_DESC_GEN2 2 + #define NUM_TX_DESC_GEN3 1
struct ravb_tstamp_skb { struct list_head list; @@@ -1038,6 -1036,7 +1041,7 @@@ struct ravb_private unsigned no_avb_link:1; unsigned avb_link_active_low:1; unsigned wol_enabled:1; + int num_tx_desc; /* TX descriptors per packet */ };
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --combined drivers/net/ethernet/renesas/ravb_main.c index d6f753925352,61d125750db9..defed0d0c51d --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@@ -182,6 -182,7 +182,7 @@@ static int ravb_tx_free(struct net_devi { struct ravb_private *priv = netdev_priv(ndev); struct net_device_stats *stats = &priv->stats[q]; + int num_tx_desc = priv->num_tx_desc; struct ravb_tx_desc *desc; int free_num = 0; int entry; @@@ -191,7 -192,7 +192,7 @@@ bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * - NUM_TX_DESC); + num_tx_desc); desc = &priv->tx_ring[q][entry]; txed = desc->die_dt == DT_FEMPTY; if (free_txed_only && !txed) @@@ -200,12 -201,12 +201,12 @@@ dma_rmb(); size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + if (priv->tx_skb[q][entry / num_tx_desc]) { dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); /* Last packet descriptor? */ - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { - entry /= NUM_TX_DESC; + if (entry % num_tx_desc == num_tx_desc - 1) { + entry /= num_tx_desc; dev_kfree_skb_any(priv->tx_skb[q][entry]); priv->tx_skb[q][entry] = NULL; if (txed) @@@ -224,6 -225,7 +225,7 @@@ static void ravb_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; int ring_size; int i;
@@@ -249,7 -251,7 +251,7 @@@ ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; @@@ -278,12 -280,13 +280,13 @@@ static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct ravb_ex_rx_desc *rx_desc; struct ravb_tx_desc *tx_desc; struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * - NUM_TX_DESC; + num_tx_desc; dma_addr_t dma_addr; int i;
@@@ -318,8 -321,10 +321,10 @@@ for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; i++, tx_desc++) { tx_desc->die_dt = DT_EEMPTY; - tx_desc++; - tx_desc->die_dt = DT_EEMPTY; + if (num_tx_desc > 1) { + tx_desc++; + tx_desc->die_dt = DT_EEMPTY; + } } tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->die_dt = DT_LINKFIX; /* type */ @@@ -339,6 -344,7 +344,7 @@@ static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct sk_buff *skb; int ring_size; int i; @@@ -362,11 -368,13 +368,13 @@@ priv->rx_skb[q][i] = skb; }
- /* Allocate rings for the aligned buffers */ - priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + - DPTR_ALIGN - 1, GFP_KERNEL); - if (!priv->tx_align[q]) - goto error; + if (num_tx_desc > 1) { + /* Allocate rings for the aligned buffers */ + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) + goto error; + }
/* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); @@@ -380,7 -388,7 +388,7 @@@
/* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); @@@ -739,11 -747,10 +747,11 @@@ static void ravb_error_interrupt(struc u32 eis, ris2;
eis = ravb_read(ndev, EIS); - ravb_write(ndev, ~EIS_QFS, EIS); + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + RIS2);
/* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) @@@ -796,7 -803,7 +804,7 @@@ static bool ravb_timestamp_interrupt(st u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) { - ravb_write(ndev, ~TIS_TFUF, TIS); + ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); ravb_get_tx_tstamp(ndev); return true; } @@@ -931,7 -938,7 +939,7 @@@ static int ravb_poll(struct napi_struc /* Processing RX Descriptor Ring */ if (ris0 & mask) { /* Clear RX interrupt */ - ravb_write(ndev, ~mask, RIS0); + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); if (ravb_rx(ndev, "a, q)) goto out; } @@@ -939,7 -946,7 +947,7 @@@ if (tis & mask) { spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ - ravb_write(ndev, ~mask, TIS); + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); mmiowb(); @@@ -1074,8 -1081,11 +1082,11 @@@ static int ravb_phy_init(struct net_dev netdev_info(ndev, "limited PHY to 100Mbit/s\n"); }
- /* 10BASE is not supported */ - phydev->supported &= ~PHY_10BT_FEATURES; + /* 10BASE, Pause and Asym Pause is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
phy_attached_info(phydev);
@@@ -1485,6 -1495,7 +1496,7 @@@ static void ravb_tx_timeout_work(struc static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; @@@ -1496,7 -1507,7 +1508,7 @@@
spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * - NUM_TX_DESC) { + num_tx_desc) { netif_err(priv, tx_queued, ndev, "still transmitting with the full ring!\n"); netif_stop_subqueue(ndev, q); @@@ -1507,41 -1518,55 +1519,55 @@@ if (skb_put_padto(skb, ETH_ZLEN)) goto exit;
- entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; - - buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + - entry / NUM_TX_DESC * DPTR_ALIGN; - len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; - /* Zero length DMA descriptors are problematic as they seem to - * terminate DMA transfers. Avoid them by simply using a length of - * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. - * - * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of - * data by the call to skb_put_padto() above this is safe with - * respect to both the length of the first DMA descriptor (len) - * overflowing the available data and the length of the second DMA - * descriptor (skb->len - len) being negative. - */ - if (len == 0) - len = DPTR_ALIGN; + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); + priv->tx_skb[q][entry / num_tx_desc] = skb; + + if (num_tx_desc > 1) { + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / num_tx_desc * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + + /* Zero length DMA descriptors are problematic as they seem + * to terminate DMA transfers. Avoid them by simply using a + * length of DPTR_ALIGN (4) when skb data is aligned to + * DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) + * bytes of data by the call to skb_put_padto() above this + * is safe with respect to both the length of the first DMA + * descriptor (len) overflowing the available data and the + * length of the second DMA descriptor (skb->len - len) + * being negative. + */ + if (len == 0) + len = DPTR_ALIGN;
- memcpy(buffer, skb->data, len); - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto drop; + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop;
- desc = &priv->tx_ring[q][entry]; - desc->ds_tagl = cpu_to_le16(len); - desc->dptr = cpu_to_le32(dma_addr); + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr);
- buffer = skb->data + len; - len = skb->len - len; - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto unmap; + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto unmap;
- desc++; + desc++; + } else { + desc = &priv->tx_ring[q][entry]; + len = skb->len; + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + } desc->ds_tagl = cpu_to_le16(len); desc->dptr = cpu_to_le32(dma_addr);
@@@ -1549,9 -1574,11 +1575,11 @@@ if (q == RAVB_NC) { ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { - desc--; - dma_unmap_single(ndev->dev.parent, dma_addr, len, - DMA_TO_DEVICE); + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } goto unmap; } ts_skb->skb = skb; @@@ -1568,15 -1595,18 +1596,18 @@@ skb_tx_timestamp(skb); /* Descriptor type must be set after all the above writes */ dma_wmb(); - desc->die_dt = DT_FEND; - desc--; - desc->die_dt = DT_FSTART; - + if (num_tx_desc > 1) { + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; + } else { + desc->die_dt = DT_FSINGLE; + } ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
- priv->cur_tx[q] += NUM_TX_DESC; + priv->cur_tx[q] += num_tx_desc; if (priv->cur_tx[q] - priv->dirty_tx[q] > - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + (priv->num_tx_ring[q] - 1) * num_tx_desc && !ravb_tx_free(ndev, q, true)) netif_stop_subqueue(ndev, q);
@@@ -1590,7 -1620,7 +1621,7 @@@ unmap le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); - priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; + priv->tx_skb[q][entry / num_tx_desc] = NULL; goto exit; }
@@@ -2076,6 -2106,9 +2107,9 @@@ static int ravb_probe(struct platform_d ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU;
+ priv->num_tx_desc = chip_id == RCAR_GEN2 ? + NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 75896d6ba6e2,3715a0a4af3c..076a8be18d67 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -148,14 -148,12 +148,14 @@@ static void stmmac_verify_args(void static void stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- napi_disable(&rx_q->napi); + napi_disable(&ch->napi); } }
@@@ -166,14 -164,12 +166,14 @@@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- napi_enable(&rx_q->napi); + napi_enable(&ch->napi); } }
@@@ -991,17 -987,20 +991,20 @@@ static int stmmac_init_phy(struct net_d if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII) || (max_speed < 1000 && max_speed > 0)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100);
/* * Half-duplex mode not supported with multiqueue * half-duplex can only works with single queue */ - if (tx_cnt > 1) - phydev->supported &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); + if (tx_cnt > 1) { + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + }
/* * Broken HW is sometimes missing the pull-up resistor on the @@@ -1847,18 -1846,18 +1850,18 @@@ static void stmmac_dma_operation_mode(s * @queue: TX queue index * Description: it reclaims the transmit resources after transmission completes. */ -static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) +static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry; + unsigned int entry, count = 0;
- netif_tx_lock(priv->dev); + __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
priv->xstats.tx_clean++;
entry = tx_q->dirty_tx; - while (entry != tx_q->cur_tx) { + while ((entry != tx_q->cur_tx) && (count < budget)) { struct sk_buff *skb = tx_q->tx_skbuff[entry]; struct dma_desc *p; int status; @@@ -1874,8 -1873,6 +1877,8 @@@ if (unlikely(status & tx_dma_own)) break;
+ count++; + /* Make sure descriptor fields are read after reading * the own bit. */ @@@ -1943,10 -1940,7 +1946,10 @@@ stmmac_enable_eee_mode(priv); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } - netif_tx_unlock(priv->dev); + + __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); + + return count; }
/** @@@ -2029,33 -2023,6 +2032,33 @@@ static bool stmmac_safety_feat_interrup return false; }
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) +{ + int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan); + struct stmmac_channel *ch = &priv->channel[chan]; + bool needs_work = false; + + if ((status & handle_rx) && ch->has_rx) { + needs_work = true; + } else { + status &= ~handle_rx; + } + + if ((status & handle_tx) && ch->has_tx) { + needs_work = true; + } else { + status &= ~handle_tx; + } + + if (needs_work && napi_schedule_prep(&ch->napi)) { + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + __napi_schedule(&ch->napi); + } + + return status; +} + /** * stmmac_dma_interrupt - DMA ISR * @priv: driver private structure @@@ -2070,14 -2037,57 +2073,14 @@@ static void stmmac_dma_interrupt(struc u32 channels_to_check = tx_channel_count > rx_channel_count ? tx_channel_count : rx_channel_count; u32 chan; - bool poll_scheduled = false; int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) channels_to_check = ARRAY_SIZE(status);
- /* Each DMA channel can be used for rx and tx simultaneously, yet - * napi_struct is embedded in struct stmmac_rx_queue rather than in a - * stmmac_channel struct. - * Because of this, stmmac_poll currently checks (and possibly wakes) - * all tx queues rather than just a single tx queue. - */ for (chan = 0; chan < channels_to_check; chan++) - status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan); - - for (chan = 0; chan < rx_channel_count; chan++) { - if (likely(status[chan] & handle_rx)) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; - - if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); - __napi_schedule(&rx_q->napi); - poll_scheduled = true; - } - } - } - - /* If we scheduled poll, we already know that tx queues will be checked. - * If we didn't schedule poll, see if any DMA channel (used by tx) has a - * completed transmission, if so, call stmmac_poll (once). - */ - if (!poll_scheduled) { - for (chan = 0; chan < tx_channel_count; chan++) { - if (status[chan] & handle_tx) { - /* It doesn't matter what rx queue we choose - * here. We use 0 since it always exists. - */ - struct stmmac_rx_queue *rx_q = - &priv->rx_queue[0]; - - if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, - priv->ioaddr, chan); - __napi_schedule(&rx_q->napi); - } - break; - } - } - } + status[chan] = stmmac_napi_check(priv, chan);
for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { @@@ -2213,7 -2223,8 +2216,7 @@@ static int stmmac_init_dma_engine(struc stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + - (DMA_TX_SIZE * sizeof(struct dma_desc)); + tx_q->tx_tail_addr = tx_q->dma_tx_phy; stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, chan); } @@@ -2225,13 -2236,6 +2228,13 @@@ return ret; }
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); +} + /** * stmmac_tx_timer - mitigation sw timer for tx. * @data: data pointer @@@ -2240,14 -2244,13 +2243,14 @@@ */ static void stmmac_tx_timer(struct timer_list *t) { - struct stmmac_priv *priv = from_timer(priv, t, txtimer); - u32 tx_queues_count = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); + struct stmmac_priv *priv = tx_q->priv_data; + struct stmmac_channel *ch; + + ch = &priv->channel[tx_q->queue_index];
- /* let's scan all the tx queues */ - for (queue = 0; queue < tx_queues_count; queue++) - stmmac_tx_clean(priv, queue); + if (likely(napi_schedule_prep(&ch->napi))) + __napi_schedule(&ch->napi); }
/** @@@ -2260,17 -2263,11 +2263,17 @@@ */ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) { + u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 chan; + priv->tx_coal_frames = STMMAC_TX_FRAMES; priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - timer_setup(&priv->txtimer, stmmac_tx_timer, 0); - priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); - add_timer(&priv->txtimer); + + for (chan = 0; chan < tx_channel_count; chan++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + + timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); + } }
static void stmmac_set_rings_length(struct stmmac_priv *priv) @@@ -2598,7 -2595,6 +2601,7 @@@ static void stmmac_hw_teardown(struct n static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; int ret;
stmmac_check_ether_addr(priv); @@@ -2695,9 -2691,7 +2698,9 @@@ irq_error if (dev->phydev) phy_stop(dev->phydev);
- del_timer_sync(&priv->txtimer); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer); + stmmac_hw_teardown(dev); init_error: free_dma_desc_resources(priv); @@@ -2717,7 -2711,6 +2720,7 @@@ dma_desc_error static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 chan;
if (priv->eee_enabled) del_timer_sync(&priv->eee_ctrl_timer); @@@ -2732,8 -2725,7 +2735,8 @@@
stmmac_disable_all_queues(priv);
- del_timer_sync(&priv->txtimer); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */ free_irq(dev->irq, dev); @@@ -2947,13 -2939,14 +2950,13 @@@ static netdev_tx_t stmmac_tso_xmit(stru priv->xstats.tx_tso_nfrags += nfrags;
/* Manage tx mitigation */ - priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else { - priv->tx_count_frames = 0; + tx_q->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames <= tx_q->tx_count_frames) { stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + tx_q->tx_count_frames = 0; + } else { + stmmac_tx_timer_arm(priv, queue); }
skb_tx_timestamp(skb); @@@ -3002,7 -2995,6 +3005,7 @@@
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK; @@@ -3157,13 -3149,14 +3160,13 @@@ static netdev_tx_t stmmac_xmit(struct s * This approach takes care about the fragments: desc is the first * element in case of no SG. */ - priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else { - priv->tx_count_frames = 0; + tx_q->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames <= tx_q->tx_count_frames) { stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + tx_q->tx_count_frames = 0; + } else { + stmmac_tx_timer_arm(priv, queue); }
skb_tx_timestamp(skb); @@@ -3209,8 -3202,6 +3212,8 @@@ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK; @@@ -3331,7 -3322,6 +3334,7 @@@ static inline void stmmac_rx_refill(str static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; unsigned int entry = rx_q->cur_rx; int coe = priv->hw->rx_csum; unsigned int next_entry; @@@ -3504,7 -3494,7 +3507,7 @@@ else skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&rx_q->napi, skb); + napi_gro_receive(&ch->napi, skb);
priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; @@@ -3527,33 -3517,27 +3530,33 @@@ * Description : * To look at the incoming frames and clear the tx resources. */ -static int stmmac_poll(struct napi_struct *napi, int budget) +static int stmmac_napi_poll(struct napi_struct *napi, int budget) { - struct stmmac_rx_queue *rx_q = - container_of(napi, struct stmmac_rx_queue, napi); - struct stmmac_priv *priv = rx_q->priv_data; - u32 tx_count = priv->plat->tx_queues_to_use; - u32 chan = rx_q->queue_index; - int work_done = 0; - u32 queue; + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, napi); + struct stmmac_priv *priv = ch->priv_data; + int work_done = 0, work_rem = budget; + u32 chan = ch->index;
priv->xstats.napi_poll++;
- /* check all the queues */ - for (queue = 0; queue < tx_count; queue++) - stmmac_tx_clean(priv, queue); + if (ch->has_tx) { + int done = stmmac_tx_clean(priv, work_rem, chan);
- work_done = stmmac_rx(priv, budget, rx_q->queue_index); - if (work_done < budget) { - napi_complete_done(napi, work_done); - stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + work_done += done; + work_rem -= done; + } + + if (ch->has_rx) { + int done = stmmac_rx(priv, work_rem, chan); + + work_done += done; + work_rem -= done; } + + if (work_done < budget && napi_complete_done(napi, work_done)) + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + return work_done; }
@@@ -4217,8 -4201,8 +4220,8 @@@ int stmmac_dvr_probe(struct device *dev { struct net_device *ndev = NULL; struct stmmac_priv *priv; + u32 queue, maxq; int ret = 0; - u32 queue;
ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), MTL_MAX_TX_QUEUES, @@@ -4341,22 -4325,11 +4344,22 @@@ "Enable RX Mitigation via HW Watchdog Timer\n"); }
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + /* Setup channels NAPI */ + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
- netif_napi_add(ndev, &rx_q->napi, stmmac_poll, - (8 * priv->plat->rx_queues_to_use)); + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + ch->priv_data = priv; + ch->index = queue; + + if (queue < priv->plat->rx_queues_to_use) + ch->has_rx = true; + if (queue < priv->plat->tx_queues_to_use) + ch->has_tx = true; + + netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, + NAPI_POLL_WEIGHT); }
mutex_init(&priv->lock); @@@ -4402,10 -4375,10 +4405,10 @@@ error_netdev_register priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: - for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- netif_napi_del(&rx_q->napi); + netif_napi_del(&ch->napi); } error_hw_init: destroy_workqueue(priv->wq); diff --combined drivers/net/tun.c index e2648b5a3861,2a2cd35853b7..3eb88b7147f0 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -113,7 -113,6 +113,6 @@@ do { } while (0) #endif
- #define TUN_HEADROOM 256 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* TUN device flags */ @@@ -869,6 -868,9 +868,9 @@@ static int tun_attach(struct tun_struc tun_napi_init(tun, tfile, napi); }
+ if (rtnl_dereference(tun->xdp_prog)) + sock_set_flag(&tfile->sk, SOCK_XDP); + tun_set_real_num_queues(tun);
/* device is allowed to go away first, so no need to hold extra @@@ -1153,6 -1155,43 +1155,6 @@@ static netdev_features_t tun_net_fix_fe
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void tun_poll_controller(struct net_device *dev) -{ - /* - * Tun only receives frames when: - * 1) the char device endpoint gets data from user space - * 2) the tun socket gets a sendmsg call from user space - * If NAPI is not enabled, since both of those are synchronous - * operations, we are guaranteed never to have pending data when we poll - * for it so there is nothing to do here but return. - * We need this though so netpoll recognizes us as an interface that - * supports polling, which enables bridge devices in virt setups to - * still use netconsole - * If NAPI is enabled, however, we need to schedule polling for all - * queues unless we are using napi_gro_frags(), which we call in - * process context and not in NAPI context. - */ - struct tun_struct *tun = netdev_priv(dev); - - if (tun->flags & IFF_NAPI) { - struct tun_file *tfile; - int i; - - if (tun_napi_frags_enabled(tun)) - return; - - rcu_read_lock(); - for (i = 0; i < tun->numqueues; i++) { - tfile = rcu_dereference(tun->tfiles[i]); - if (tfile->napi_enabled) - napi_schedule(&tfile->napi); - } - rcu_read_unlock(); - } - return; -} -#endif
static void tun_set_headroom(struct net_device *dev, int new_hr) { @@@ -1204,13 -1243,29 +1206,29 @@@ static int tun_xdp_set(struct net_devic struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); + struct tun_file *tfile; struct bpf_prog *old_prog; + int i;
old_prog = rtnl_dereference(tun->xdp_prog); rcu_assign_pointer(tun->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog);
+ for (i = 0; i < tun->numqueues; i++) { + tfile = rtnl_dereference(tun->tfiles[i]); + if (prog) + sock_set_flag(&tfile->sk, SOCK_XDP); + else + sock_reset_flag(&tfile->sk, SOCK_XDP); + } + list_for_each_entry(tfile, &tun->disabled, next) { + if (prog) + sock_set_flag(&tfile->sk, SOCK_XDP); + else + sock_reset_flag(&tfile->sk, SOCK_XDP); + } + return 0; }
@@@ -1246,6 -1301,9 +1264,6 @@@ static const struct net_device_ops tun_ .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_select_queue = tun_select_queue, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tun_poll_controller, -#endif .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, }; @@@ -1325,6 -1383,9 +1343,6 @@@ static const struct net_device_ops tap_ .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_select_queue = tun_select_queue, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tun_poll_controller, -#endif .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, @@@ -1574,6 -1635,55 +1592,55 @@@ static bool tun_can_build_skb(struct tu return true; }
+ static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, + int buflen, int len, int pad) + { + struct sk_buff *skb = build_skb(buf, buflen); + + if (!skb) + return ERR_PTR(-ENOMEM); + + skb_reserve(skb, pad); + skb_put(skb, len); + + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + + return skb; + } + + static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, + struct xdp_buff *xdp, u32 act) + { + int err; + + switch (act) { + case XDP_REDIRECT: + err = xdp_do_redirect(tun->dev, xdp, xdp_prog); + if (err) + return err; + break; + case XDP_TX: + err = tun_xdp_tx(tun->dev, xdp); + if (err < 0) + return err; + break; + case XDP_PASS: + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(tun->dev, xdp_prog, act); + /* fall through */ + case XDP_DROP: + this_cpu_inc(tun->pcpu_stats->rx_dropped); + break; + } + + return act; + } + static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *from, @@@ -1581,18 -1691,17 +1648,17 @@@ int len, int *skb_xdp) { struct page_frag *alloc_frag = ¤t->task_frag; - struct sk_buff *skb; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - unsigned int delta = 0; char *buf; size_t copied; - int err, pad = TUN_RX_PAD; + int pad = TUN_RX_PAD; + int err = 0;
rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) - pad += TUN_HEADROOM; + pad += XDP_PACKET_HEADROOM; buflen += SKB_DATA_ALIGN(len + pad); rcu_read_unlock();
@@@ -1611,17 -1720,18 +1677,18 @@@ * of xdp_prog above, this should be rare and for simplicity * we do XDP on skb in case the headroom is not enough. */ - if (hdr->gso_type || !xdp_prog) + if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; - else - *skb_xdp = 0; + return __tun_build_skb(alloc_frag, buf, buflen, len, pad); + } + + *skb_xdp = 0;
local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); - if (xdp_prog && !*skb_xdp) { + if (xdp_prog) { struct xdp_buff xdp; - void *orig_data; u32 act;
xdp.data_hard_start = buf; @@@ -1629,66 -1739,33 +1696,33 @@@ xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; xdp.rxq = &tfile->xdp_rxq; - orig_data = xdp.data; - act = bpf_prog_run_xdp(xdp_prog, &xdp);
- switch (act) { - case XDP_REDIRECT: - get_page(alloc_frag->page); - alloc_frag->offset += buflen; - err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); - xdp_do_flush_map(); - if (err) - goto err_redirect; - rcu_read_unlock(); - local_bh_enable(); - return NULL; - case XDP_TX: + act = bpf_prog_run_xdp(xdp_prog, &xdp); + if (act == XDP_REDIRECT || act == XDP_TX) { get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (tun_xdp_tx(tun->dev, &xdp) < 0) - goto err_redirect; - rcu_read_unlock(); - local_bh_enable(); - return NULL; - case XDP_PASS: - delta = orig_data - xdp.data; - len = xdp.data_end - xdp.data; - break; - default: - bpf_warn_invalid_xdp_action(act); - /* fall through */ - case XDP_ABORTED: - trace_xdp_exception(tun->dev, xdp_prog, act); - /* fall through */ - case XDP_DROP: - goto err_xdp; } - } + err = tun_xdp_act(tun, xdp_prog, &xdp, act); + if (err < 0) + goto err_xdp; + if (err == XDP_REDIRECT) + xdp_do_flush_map(); + if (err != XDP_PASS) + goto out;
- skb = build_skb(buf, buflen); - if (!skb) { - rcu_read_unlock(); - local_bh_enable(); - return ERR_PTR(-ENOMEM); + pad = xdp.data - xdp.data_hard_start; + len = xdp.data_end - xdp.data; } - - skb_reserve(skb, pad - delta); - skb_put(skb, len); - get_page(alloc_frag->page); - alloc_frag->offset += buflen; - rcu_read_unlock(); local_bh_enable();
- return skb; + return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
- err_redirect: - put_page(alloc_frag->page); err_xdp: + put_page(alloc_frag->page); + out: rcu_read_unlock(); local_bh_enable(); - this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; }
@@@ -2349,18 -2426,133 +2383,133 @@@ static void tun_sock_write_space(struc kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); }
+ static int tun_xdp_one(struct tun_struct *tun, + struct tun_file *tfile, + struct xdp_buff *xdp, int *flush) + { + struct tun_xdp_hdr *hdr = xdp->data_hard_start; + struct virtio_net_hdr *gso = &hdr->gso; + struct tun_pcpu_stats *stats; + struct bpf_prog *xdp_prog; + struct sk_buff *skb = NULL; + u32 rxhash = 0, act; + int buflen = hdr->buflen; + int err = 0; + bool skb_xdp = false; + + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog) { + if (gso->gso_type) { + skb_xdp = true; + goto build; + } + xdp_set_data_meta_invalid(xdp); + xdp->rxq = &tfile->xdp_rxq; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + err = tun_xdp_act(tun, xdp_prog, xdp, act); + if (err < 0) { + put_page(virt_to_head_page(xdp->data)); + return err; + } + + switch (err) { + case XDP_REDIRECT: + *flush = true; + /* fall through */ + case XDP_TX: + return 0; + case XDP_PASS: + break; + default: + put_page(virt_to_head_page(xdp->data)); + return 0; + } + } + + build: + skb = build_skb(xdp->data_hard_start, buflen); + if (!skb) { + err = -ENOMEM; + goto out; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + + if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { + this_cpu_inc(tun->pcpu_stats->rx_frame_errors); + kfree_skb(skb); + err = -EINVAL; + goto out; + } + + skb->protocol = eth_type_trans(skb, tun->dev); + skb_reset_network_header(skb); + skb_probe_transport_header(skb, 0); + + if (skb_xdp) { + err = do_xdp_generic(xdp_prog, skb); + if (err != XDP_PASS) + goto out; + } + + if (!rcu_dereference(tun->steering_prog)) + rxhash = __skb_get_hash_symmetric(skb); + + netif_receive_skb(skb); + + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + put_cpu_ptr(stats); + + if (rxhash) + tun_flow_update(tun, rxhash, tfile); + + out: + return err; + } + static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { - int ret; + int ret, i; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); + struct tun_msg_ctl *ctl = m->msg_control; + struct xdp_buff *xdp;
if (!tun) return -EBADFD;
- ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, + if (ctl && (ctl->type == TUN_MSG_PTR)) { + int n = ctl->num; + int flush = 0; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < n; i++) { + xdp = &((struct xdp_buff *)ctl->ptr)[i]; + tun_xdp_one(tun, tfile, xdp, &flush); + } + + if (flush) + xdp_do_flush_map(); + + rcu_read_unlock(); + local_bh_enable(); + + ret = total_len; + goto out; + } + + ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); + out: tun_put(tun); return ret; } diff --combined include/linux/mlx5/driver.h index 4321962d9b7c,ed73b51f6697..ad252b0a7acc --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@@ -163,7 -163,10 +163,7 @@@ enum mlx5_dcbx_oper_mode };
enum mlx5_dct_atomic_mode { - MLX5_ATOMIC_MODE_DCT_OFF = 20, - MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, - MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, - MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, + MLX5_ATOMIC_MODE_DCT_CX = 2, };
enum { @@@ -580,10 -583,11 +580,11 @@@ struct mlx5_irq_info };
struct mlx5_fc_stats { - struct rb_root counters; - struct list_head addlist; - /* protect addlist add/splice operations */ - spinlock_t addlist_lock; + spinlock_t counters_idr_lock; /* protects counters_idr */ + struct idr counters_idr; + struct list_head counters; + struct llist_head addlist; + struct llist_head dellist;
struct workqueue_struct *wq; struct delayed_work work; @@@ -801,7 -805,7 +802,7 @@@ struct mlx5_pps };
struct mlx5_clock { - rwlock_t lock; + seqlock_t lock; struct cyclecounter cycles; struct timecounter tc; struct hwtstamp_config hwtstamp_config; diff --combined net/batman-adv/soft-interface.c index 626ddca332db,2c7d95727f90..5db5a0a4c959 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -574,20 -574,15 +574,20 @@@ int batadv_softif_create_vlan(struct ba struct batadv_softif_vlan *vlan; int err;
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock); + vlan = batadv_softif_vlan_get(bat_priv, vid); if (vlan) { batadv_softif_vlan_put(vlan); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -EEXIST; }
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); - if (!vlan) + if (!vlan) { + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -ENOMEM; + }
vlan->bat_priv = bat_priv; vlan->vid = vid; @@@ -595,23 -590,17 +595,23 @@@
atomic_set(&vlan->ap_isolation, 0);
+ kref_get(&vlan->refcount); + hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + + /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the + * sleeping behavior of the sysfs functions and the fs_reclaim lock + */ err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); if (err) { - kfree(vlan); + /* ref for the function */ + batadv_softif_vlan_put(vlan); + + /* ref for the list */ + batadv_softif_vlan_put(vlan); return err; }
- spin_lock_bh(&bat_priv->softif_vlan_list_lock); - kref_get(&vlan->refcount); - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); - /* add a new TT local entry. This one will be marked with the NOPURGE * flag */ @@@ -844,7 -833,6 +844,6 @@@ static int batadv_softif_init_late(stru atomic_set(&bat_priv->frag_seqno, random_seqno);
bat_priv->primary_if = NULL; - bat_priv->num_ifaces = 0;
batadv_nc_init_bat_priv(bat_priv);
@@@ -1062,6 -1050,7 +1061,7 @@@ static void batadv_softif_init_early(st dev->needs_free_netdev = true; dev->priv_destructor = batadv_softif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; + dev->features |= NETIF_F_LLTX; dev->priv_flags |= IFF_NO_QUEUE;
/* can't call min_mtu, because the needed variables diff --combined net/core/ethtool.c index 234a0ec2e932,9d4e56d97080..96afc55aa61e --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@@ -539,47 -539,17 +539,17 @@@ struct ethtool_link_usettings } link_modes; };
- /* Internal kernel helper to query a device ethtool_link_settings. - * - * Backward compatibility note: for compatibility with legacy drivers - * that implement only the ethtool_cmd API, this has to work with both - * drivers implementing get_link_ksettings API and drivers - * implementing get_settings API. When drivers implement get_settings - * and report ethtool_cmd deprecated fields - * (transceiver/maxrxpkt/maxtxpkt), these fields are silently ignored - * because the resulting struct ethtool_link_settings does not report them. - */ + /* Internal kernel helper to query a device ethtool_link_settings. */ int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings) { - int err; - struct ethtool_cmd cmd; - ASSERT_RTNL();
- if (dev->ethtool_ops->get_link_ksettings) { - memset(link_ksettings, 0, sizeof(*link_ksettings)); - return dev->ethtool_ops->get_link_ksettings(dev, - link_ksettings); - } - - /* driver doesn't support %ethtool_link_ksettings API. revert to - * legacy %ethtool_cmd API, unless it's not supported either. - * TODO: remove when ethtool_ops::get_settings disappears internally - */ - if (!dev->ethtool_ops->get_settings) + if (!dev->ethtool_ops->get_link_ksettings) return -EOPNOTSUPP;
- memset(&cmd, 0, sizeof(cmd)); - cmd.cmd = ETHTOOL_GSET; - err = dev->ethtool_ops->get_settings(dev, &cmd); - if (err < 0) - return err; - - /* we ignore deprecated fields transceiver/maxrxpkt/maxtxpkt - */ - convert_legacy_settings_to_link_ksettings(link_ksettings, &cmd); - return err; + memset(link_ksettings, 0, sizeof(*link_ksettings)); + return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings); } EXPORT_SYMBOL(__ethtool_get_link_ksettings);
@@@ -635,16 -605,7 +605,7 @@@ store_link_ksettings_for_user(void __us return 0; }
- /* Query device for its ethtool_link_settings. - * - * Backward compatibility note: this function must fail when driver - * does not implement ethtool::get_link_ksettings, even if legacy - * ethtool_ops::get_settings is implemented. This tells new versions - * of ethtool that they should use the legacy API %ETHTOOL_GSET for - * this driver, so that they can correctly access the ethtool_cmd - * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver - * implements ethtool_ops::get_settings anymore. - */ + /* Query device for its ethtool_link_settings. */ static int ethtool_get_link_ksettings(struct net_device *dev, void __user *useraddr) { @@@ -652,7 -613,6 +613,6 @@@ struct ethtool_link_ksettings link_ksettings;
ASSERT_RTNL(); - if (!dev->ethtool_ops->get_link_ksettings) return -EOPNOTSUPP;
@@@ -699,16 -659,7 +659,7 @@@ return store_link_ksettings_for_user(useraddr, &link_ksettings); }
- /* Update device ethtool_link_settings. - * - * Backward compatibility note: this function must fail when driver - * does not implement ethtool::set_link_ksettings, even if legacy - * ethtool_ops::set_settings is implemented. This tells new versions - * of ethtool that they should use the legacy API %ETHTOOL_SSET for - * this driver, so that they can correctly update the ethtool_cmd - * deprecated fields (transceiver/maxrxpkt/maxtxpkt), until no driver - * implements ethtool_ops::get_settings anymore. - */ + /* Update device ethtool_link_settings. */ static int ethtool_set_link_ksettings(struct net_device *dev, void __user *useraddr) { @@@ -746,51 -697,31 +697,31 @@@
/* Query device for its ethtool_cmd settings. * - * Backward compatibility note: for compatibility with legacy ethtool, - * this has to work with both drivers implementing get_link_ksettings - * API and drivers implementing get_settings API. When drivers - * implement get_link_ksettings and report higher link mode bits, a - * kernel warning is logged once (with name of 1st driver/device) to - * recommend user to upgrade ethtool, but the command is successful - * (only the lower link mode bits reported back to user). + * Backward compatibility note: for compatibility with legacy ethtool, this is + * now implemented via get_link_ksettings. When driver reports higher link mode + * bits, a kernel warning is logged once (with name of 1st driver/device) to + * recommend user to upgrade ethtool, but the command is successful (only the + * lower link mode bits reported back to user). Deprecated fields from + * ethtool_cmd (transceiver/maxrxpkt/maxtxpkt) are always set to zero. */ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) { + struct ethtool_link_ksettings link_ksettings; struct ethtool_cmd cmd; + int err;
ASSERT_RTNL(); + if (!dev->ethtool_ops->get_link_ksettings) + return -EOPNOTSUPP;
- if (dev->ethtool_ops->get_link_ksettings) { - /* First, use link_ksettings API if it is supported */ - int err; - struct ethtool_link_ksettings link_ksettings; - - memset(&link_ksettings, 0, sizeof(link_ksettings)); - err = dev->ethtool_ops->get_link_ksettings(dev, - &link_ksettings); - if (err < 0) - return err; - convert_link_ksettings_to_legacy_settings(&cmd, - &link_ksettings); - - /* send a sensible cmd tag back to user */ - cmd.cmd = ETHTOOL_GSET; - } else { - /* driver doesn't support %ethtool_link_ksettings - * API. revert to legacy %ethtool_cmd API, unless it's - * not supported either. - */ - int err; - - if (!dev->ethtool_ops->get_settings) - return -EOPNOTSUPP; + memset(&link_ksettings, 0, sizeof(link_ksettings)); + err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); + if (err < 0) + return err; + convert_link_ksettings_to_legacy_settings(&cmd, &link_ksettings);
- memset(&cmd, 0, sizeof(cmd)); - cmd.cmd = ETHTOOL_GSET; - err = dev->ethtool_ops->get_settings(dev, &cmd); - if (err < 0) - return err; - } + /* send a sensible cmd tag back to user */ + cmd.cmd = ETHTOOL_GSET;
if (copy_to_user(useraddr, &cmd, sizeof(cmd))) return -EFAULT; @@@ -800,48 -731,29 +731,29 @@@
/* Update device link settings with given ethtool_cmd. * - * Backward compatibility note: for compatibility with legacy ethtool, - * this has to work with both drivers implementing set_link_ksettings - * API and drivers implementing set_settings API. When drivers - * implement set_link_ksettings and user's request updates deprecated - * ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel - * warning is logged once (with name of 1st driver/device) to - * recommend user to upgrade ethtool, and the request is rejected. + * Backward compatibility note: for compatibility with legacy ethtool, this is + * now always implemented via set_link_settings. When user's request updates + * deprecated ethtool_cmd fields (transceiver/maxrxpkt/maxtxpkt), a kernel + * warning is logged once (with name of 1st driver/device) to recommend user to + * upgrade ethtool, and the request is rejected. */ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) { + struct ethtool_link_ksettings link_ksettings; struct ethtool_cmd cmd;
ASSERT_RTNL();
if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; - - /* first, try new %ethtool_link_ksettings API. */ - if (dev->ethtool_ops->set_link_ksettings) { - struct ethtool_link_ksettings link_ksettings; - - if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, - &cmd)) - return -EINVAL; - - link_ksettings.base.cmd = ETHTOOL_SLINKSETTINGS; - link_ksettings.base.link_mode_masks_nwords - = __ETHTOOL_LINK_MODE_MASK_NU32; - return dev->ethtool_ops->set_link_ksettings(dev, - &link_ksettings); - } - - /* legacy %ethtool_cmd API */ - - /* TODO: return -EOPNOTSUPP when ethtool_ops::get_settings - * disappears internally - */ - - if (!dev->ethtool_ops->set_settings) + if (!dev->ethtool_ops->set_link_ksettings) return -EOPNOTSUPP;
- return dev->ethtool_ops->set_settings(dev, &cmd); + if (!convert_legacy_settings_to_link_ksettings(&link_ksettings, &cmd)) + return -EINVAL; + link_ksettings.base.link_mode_masks_nwords = + __ETHTOOL_LINK_MODE_MASK_NU32; + return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); }
static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, @@@ -2624,7 -2536,6 +2536,7 @@@ int dev_ethtool(struct net *net, struc case ETHTOOL_GPHYSTATS: case ETHTOOL_GTSO: case ETHTOOL_GPERMADDR: + case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: case ETHTOOL_GFLAGS: diff --combined net/ipv6/addrconf.c index c63ccce6425f,bfe3ec7ecb14..a9a317322388 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@@ -997,6 -997,7 +997,7 @@@ ipv6_add_addr(struct inet6_dev *idev, s if (addr_type == IPV6_ADDR_ANY || addr_type & IPV6_ADDR_MULTICAST || (!(idev->dev->flags & IFF_LOOPBACK) && + !netif_is_l3_master(idev->dev) && addr_type & IPV6_ADDR_LOOPBACK)) return ERR_PTR(-EADDRNOTAVAIL);
@@@ -4201,6 -4202,7 +4202,6 @@@ static struct inet6_ifaddr *if6_get_fir p++; continue; } - state->offset++; return ifa; }
@@@ -4224,12 -4226,13 +4225,12 @@@ static struct inet6_ifaddr *if6_get_nex return ifa; }
+ state->offset = 0; while (++state->bucket < IN6_ADDR_HSIZE) { - state->offset = 0; hlist_for_each_entry_rcu(ifa, &inet6_addr_lst[state->bucket], addr_lst) { if (!net_eq(dev_net(ifa->idev->dev), net)) continue; - state->offset++; return ifa; } } @@@ -4489,6 -4492,7 +4490,7 @@@ static const struct nla_policy ifa_ipv6 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) }, [IFA_FLAGS] = { .len = sizeof(u32) }, [IFA_RT_PRIORITY] = { .len = sizeof(u32) }, + [IFA_TARGET_NETNSID] = { .type = NLA_S32 }, };
static int @@@ -4791,19 -4795,32 +4793,32 @@@ static inline int inet6_ifaddr_msgsize( + nla_total_size(4) /* IFA_RT_PRIORITY */; }
+ struct inet6_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + }; + static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, - u32 portid, u32 seq, int event, unsigned int flags) + struct inet6_fill_args *args) { struct nlmsghdr *nlh; u32 preferred, valid;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope), ifa->idev->dev->ifindex);
+ if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + goto error; + if (!((ifa->flags&IFA_F_PERMANENT) && (ifa->prefered_lft == INFINITY_LIFE_TIME))) { preferred = ifa->prefered_lft; @@@ -4853,7 -4870,7 +4868,7 @@@ error }
static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, - u32 portid, u32 seq, int event, u16 flags) + struct inet6_fill_args *args) { struct nlmsghdr *nlh; u8 scope = RT_SCOPE_UNIVERSE; @@@ -4862,10 -4879,15 +4877,15 @@@ if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE) scope = RT_SCOPE_SITE;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE;
+ if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + return -EMSGSIZE; + put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 || put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp, @@@ -4879,7 -4901,7 +4899,7 @@@ }
static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, - u32 portid, u32 seq, int event, unsigned int flags) + struct inet6_fill_args *args) { struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt); int ifindex = dev ? dev->ifindex : 1; @@@ -4889,10 -4911,15 +4909,15 @@@ if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE) scope = RT_SCOPE_SITE;
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE;
+ if (args->netnsid >= 0 && + nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) + return -EMSGSIZE; + put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex); if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 || put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp, @@@ -4914,8 -4941,14 +4939,14 @@@ enum addr_type_t /* called with rcu_read_lock() */ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, struct netlink_callback *cb, enum addr_type_t type, - int s_ip_idx, int *p_ip_idx) + int s_ip_idx, int *p_ip_idx, int netnsid) { + struct inet6_fill_args fillargs = { + .portid = NETLINK_CB(cb->skb).portid, + .seq = cb->nlh->nlmsg_seq, + .flags = NLM_F_MULTI, + .netnsid = netnsid, + }; struct ifmcaddr6 *ifmca; struct ifacaddr6 *ifaca; int err = 1; @@@ -4925,16 -4958,13 +4956,13 @@@ switch (type) { case UNICAST_ADDR: { struct inet6_ifaddr *ifa; + fillargs.event = RTM_NEWADDR;
/* unicast address incl. temp addr */ list_for_each_entry(ifa, &idev->addr_list, if_list) { if (++ip_idx < s_ip_idx) continue; - err = inet6_fill_ifaddr(skb, ifa, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_NEWADDR, - NLM_F_MULTI); + err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) break; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); @@@ -4942,31 -4972,26 +4970,26 @@@ break; } case MULTICAST_ADDR: + fillargs.event = RTM_GETMULTICAST; + /* multicast address */ for (ifmca = idev->mc_list; ifmca; ifmca = ifmca->next, ip_idx++) { if (ip_idx < s_ip_idx) continue; - err = inet6_fill_ifmcaddr(skb, ifmca, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_GETMULTICAST, - NLM_F_MULTI); + err = inet6_fill_ifmcaddr(skb, ifmca, &fillargs); if (err < 0) break; } break; case ANYCAST_ADDR: + fillargs.event = RTM_GETANYCAST; /* anycast address */ for (ifaca = idev->ac_list; ifaca; ifaca = ifaca->aca_next, ip_idx++) { if (ip_idx < s_ip_idx) continue; - err = inet6_fill_ifacaddr(skb, ifaca, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - RTM_GETANYCAST, - NLM_F_MULTI); + err = inet6_fill_ifacaddr(skb, ifaca, &fillargs); if (err < 0) break; } @@@ -4983,6 -5008,9 +5006,9 @@@ static int inet6_dump_addr(struct sk_bu enum addr_type_t type) { struct net *net = sock_net(skb->sk); + struct nlattr *tb[IFA_MAX+1]; + struct net *tgt_net = net; + int netnsid = -1; int h, s_h; int idx, ip_idx; int s_idx, s_ip_idx; @@@ -4994,11 -5022,22 +5020,22 @@@ s_idx = idx = cb->args[1]; s_ip_idx = ip_idx = cb->args[2];
+ if (nlmsg_parse(cb->nlh, sizeof(struct ifaddrmsg), tb, IFA_MAX, + ifa_ipv6_policy, NULL) >= 0) { + if (tb[IFA_TARGET_NETNSID]) { + netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]); + + tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid); + if (IS_ERR(tgt_net)) + return PTR_ERR(tgt_net); + } + } + rcu_read_lock(); - cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq; + cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq; for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; - head = &net->dev_index_head[h]; + head = &tgt_net->dev_index_head[h]; hlist_for_each_entry_rcu(dev, head, index_hlist) { if (idx < s_idx) goto cont; @@@ -5010,7 -5049,7 +5047,7 @@@ goto cont;
if (in6_dump_addrs(idev, skb, cb, type, - s_ip_idx, &ip_idx) < 0) + s_ip_idx, &ip_idx, netnsid) < 0) goto done; cont: idx++; @@@ -5021,6 -5060,8 +5058,8 @@@ done cb->args[0] = h; cb->args[1] = idx; cb->args[2] = ip_idx; + if (netnsid >= 0) + put_net(tgt_net);
return skb->len; } @@@ -5051,6 -5092,14 +5090,14 @@@ static int inet6_rtm_getaddr(struct sk_ struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); + struct inet6_fill_args fillargs = { + .portid = NETLINK_CB(in_skb).portid, + .seq = nlh->nlmsg_seq, + .event = RTM_NEWADDR, + .flags = 0, + .netnsid = -1, + }; + struct net *tgt_net = net; struct ifaddrmsg *ifm; struct nlattr *tb[IFA_MAX+1]; struct in6_addr *addr = NULL, *peer; @@@ -5064,15 -5113,24 +5111,24 @@@ if (err < 0) return err;
+ if (tb[IFA_TARGET_NETNSID]) { + fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]); + + tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk, + fillargs.netnsid); + if (IS_ERR(tgt_net)) + return PTR_ERR(tgt_net); + } + addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer); if (!addr) return -EINVAL;
ifm = nlmsg_data(nlh); if (ifm->ifa_index) - dev = dev_get_by_index(net, ifm->ifa_index); + dev = dev_get_by_index(tgt_net, ifm->ifa_index);
- ifa = ipv6_get_ifaddr(net, addr, dev, 1); + ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1); if (!ifa) { err = -EADDRNOTAVAIL; goto errout; @@@ -5084,20 -5142,22 +5140,22 @@@ goto errout_ifa; }
- err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, RTM_NEWADDR, 0); + err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout_ifa; } - err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); + err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid); errout_ifa: in6_ifa_put(ifa); errout: if (dev) dev_put(dev); + if (fillargs.netnsid >= 0) + put_net(tgt_net); + return err; }
@@@ -5105,13 -5165,20 +5163,20 @@@ static void inet6_ifa_notify(int event { struct sk_buff *skb; struct net *net = dev_net(ifa->idev->dev); + struct inet6_fill_args fillargs = { + .portid = 0, + .seq = 0, + .event = event, + .flags = 0, + .netnsid = -1, + }; int err = -ENOBUFS;
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC); if (!skb) goto errout;
- err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0); + err = inet6_fill_ifaddr(skb, ifa, &fillargs); if (err < 0) { /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */ WARN_ON(err == -EMSGSIZE); diff --combined net/ipv6/route.c index 826b14de7dbb,938db8ae2316..d28f83e01593 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -364,14 -364,11 +364,14 @@@ EXPORT_SYMBOL(ip6_dst_alloc)
static void ip6_dst_destroy(struct dst_entry *dst) { + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); struct rt6_info *rt = (struct rt6_info *)dst; struct fib6_info *from; struct inet6_dev *idev;
- dst_destroy_metrics_generic(dst); + if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) + kfree(p); + rt6_uncached_list_del(rt);
idev = rt->rt6i_idev; @@@ -979,10 -976,6 +979,10 @@@ static void rt6_set_from(struct rt6_inf rt->rt6i_flags &= ~RTF_EXPIRES; rcu_assign_pointer(rt->from, from); dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); + if (from->fib6_metrics != &dst_default_metrics) { + rt->dst._metrics |= DST_METRICS_REFCOUNTED; + refcount_inc(&from->fib6_metrics->refcnt); + } }
/* Caller must already hold reference to @ort */ @@@ -1000,7 -993,6 +1000,6 @@@ static void ip6_rt_copy_init(struct rt6 #ifdef CONFIG_IPV6_SUBTREES rt->rt6i_src = ort->fib6_src; #endif - rt->rt6i_prefsrc = ort->fib6_prefsrc; }
static struct fib6_node* fib6_backtrack(struct fib6_node *fn, @@@ -1454,11 -1446,6 +1453,6 @@@ static int rt6_insert_exception(struct if (ort->fib6_src.plen) src_key = &nrt->rt6i_src.addr; #endif - - /* Update rt6i_prefsrc as it could be changed - * in rt6_remove_prefsrc() - */ - nrt->rt6i_prefsrc = ort->fib6_prefsrc; /* rt6_mtu_change() might lower mtu on ort. * Only insert this exception route if its mtu * is less than ort's mtu value. @@@ -1640,25 -1627,6 +1634,6 @@@ static void rt6_update_exception_stamp_ rcu_read_unlock(); }
- static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt) - { - struct rt6_exception_bucket *bucket; - struct rt6_exception *rt6_ex; - int i; - - bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, - lockdep_is_held(&rt6_exception_lock)); - - if (bucket) { - for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { - hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { - rt6_ex->rt6i->rt6i_prefsrc.plen = 0; - } - bucket++; - } - } - } - static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, struct rt6_info *rt, int mtu) { @@@ -2103,7 -2071,8 +2078,8 @@@ struct dst_entry *ip6_route_output_flag { bool any_src;
- if (rt6_need_strict(&fl6->daddr)) { + if (ipv6_addr_type(&fl6->daddr) & + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) { struct dst_entry *dst;
dst = l3mdev_link_scope_lookup(net, fl6); @@@ -3142,8 -3111,6 +3118,6 @@@ install_route rt->fib6_nh.nh_dev = dev; rt->fib6_table = table;
- cfg->fc_nlinfo.nl_net = dev_net(dev); - if (idev) in6_dev_put(idev);
@@@ -3800,8 -3767,6 +3774,6 @@@ static int fib6_remove_prefsrc(struct f spin_lock_bh(&rt6_exception_lock); /* remove prefsrc entry */ rt->fib6_prefsrc.plen = 0; - /* need to update cache as well */ - rt6_exceptions_remove_prefsrc(rt); spin_unlock_bh(&rt6_exception_lock); } return 0; diff --combined net/mac80211/tx.c index 25ba24bef8f5,c42bfa1dcd2c..e0ccee23fbcd --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@@ -214,7 -214,6 +214,7 @@@ ieee80211_tx_h_dynamic_ps(struct ieee80 { struct ieee80211_local *local = tx->local; struct ieee80211_if_managed *ifmgd; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
/* driver doesn't support power save */ if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) @@@ -243,9 -242,6 +243,9 @@@ if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) return TX_CONTINUE;
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) + return TX_CONTINUE; + ifmgd = &tx->sdata->u.mgd;
/* @@@ -1253,10 -1249,18 +1253,18 @@@ static struct txq_info *ieee80211_get_t (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) return NULL;
- if (!ieee80211_is_data_present(hdr->frame_control)) - return NULL; - - if (sta) { + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) { + if ((!ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_bufferable_mmpdu(hdr->frame_control) || + vif->type == NL80211_IFTYPE_STATION) && + sta && sta->uploaded) { + /* + * This will be NULL if the driver didn't set the + * opt-in hardware flag. + */ + txq = sta->sta.txq[IEEE80211_NUM_TIDS]; + } + } else if (sta) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (!sta->uploaded) @@@ -1444,16 -1448,33 +1452,33 @@@ void ieee80211_txq_init(struct ieee8021
txqi->txq.vif = &sdata->vif;
- if (sta) { - txqi->txq.sta = &sta->sta; - sta->sta.txq[tid] = &txqi->txq; - txqi->txq.tid = tid; - txqi->txq.ac = ieee80211_ac_from_tid(tid); - } else { + if (!sta) { sdata->vif.txq = &txqi->txq; txqi->txq.tid = 0; txqi->txq.ac = IEEE80211_AC_BE; + + return; } + + if (tid == IEEE80211_NUM_TIDS) { + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* Drivers need to opt in to the management MPDU TXQ */ + if (!ieee80211_hw_check(&sdata->local->hw, + STA_MMPDU_TXQ)) + return; + } else if (!ieee80211_hw_check(&sdata->local->hw, + BUFF_MMPDU_TXQ)) { + /* Drivers need to opt in to the bufferable MMPDU TXQ */ + return; + } + txqi->txq.ac = IEEE80211_AC_VO; + } else { + txqi->txq.ac = ieee80211_ac_from_tid(tid); + } + + txqi->txq.sta = &sta->sta; + txqi->txq.tid = tid; + sta->sta.txq[tid] = &txqi->txq; }
void ieee80211_txq_purge(struct ieee80211_local *local, @@@ -1894,7 -1915,7 +1919,7 @@@ static bool ieee80211_tx(struct ieee802 sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
if (invoke_tx_handlers_early(&tx)) - return false; + return true;
if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb)) return true; @@@ -2955,6 -2976,10 +2980,10 @@@ void ieee80211_check_fast_xmit(struct s if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) goto out;
+ /* Key is being removed */ + if (build.key->flags & KEY_FLAG_TAINTED) + goto out; + switch (build.key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: @@@ -3200,6 -3225,10 +3229,10 @@@ static bool ieee80211_amsdu_aggregate(s max_amsdu_len = min_t(int, max_amsdu_len, sta->sta.max_rc_amsdu_len);
+ if (sta->sta.max_tid_amsdu_len[tid]) + max_amsdu_len = min_t(int, max_amsdu_len, + sta->sta.max_tid_amsdu_len[tid]); + spin_lock_bh(&fq->lock);
/* TODO: Ideally aggregation should be done on dequeue to remain @@@ -3232,6 -3261,9 +3265,9 @@@ if (max_frags && nfrags > max_frags) goto out;
+ if (!drv_can_aggregate_in_amsdu(local, head, skb)) + goto out; + if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) goto out;
@@@ -3476,13 -3508,19 +3512,19 @@@ struct sk_buff *ieee80211_tx_dequeue(st struct ieee80211_tx_info *info; struct ieee80211_tx_data tx; ieee80211_tx_result r; - struct ieee80211_vif *vif; + struct ieee80211_vif *vif = txq->vif;
spin_lock_bh(&fq->lock);
- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags)) + if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) || + test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) goto out;
+ if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) { + set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags); + goto out; + } + /* Make sure fragments stay together. */ skb = __skb_dequeue(&txqi->frags); if (skb) @@@ -3577,6 -3615,7 +3619,7 @@@ begin }
IEEE80211_SKB_CB(skb)->control.vif = vif; + out: spin_unlock_bh(&fq->lock);
@@@ -3605,13 -3644,7 +3648,7 @@@ void __ieee80211_subif_start_xmit(struc if (!IS_ERR_OR_NULL(sta)) { struct ieee80211_fast_tx *fast_tx;
- /* We need a bit of data queued to build aggregates properly, so - * instruct the TCP stack to allow more than a single ms of data - * to be queued in the stack. The value is a bit-shift of 1 - * second, so 8 is ~4ms of queued data. Only affects local TCP - * sockets. - */ - sk_pacing_shift_update(skb->sk, 8); + sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
fast_tx = rcu_dereference(sta->fast_tx);
diff --combined net/wireless/reg.c index 765dedb12361,56be68a27bb9..5ad5b9f98e8f --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@@ -847,22 -847,36 +847,36 @@@ static bool valid_regdb(const u8 *data return true; }
- static void set_wmm_rule(struct ieee80211_reg_rule *rrule, - struct fwdb_wmm_rule *wmm) - { - struct ieee80211_wmm_rule *rule = &rrule->wmm_rule; - unsigned int i; + static void set_wmm_rule(const struct fwdb_header *db, + const struct fwdb_country *country, + const struct fwdb_rule *rule, + struct ieee80211_reg_rule *rrule) + { + struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule; + struct fwdb_wmm_rule *wmm; + unsigned int i, wmm_ptr; + + wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; + wmm = (void *)((u8 *)db + wmm_ptr); + + if (!valid_wmm(wmm)) { + pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n", + be32_to_cpu(rule->start), be32_to_cpu(rule->end), + country->alpha2[0], country->alpha2[1]); + return; + }
for (i = 0; i < IEEE80211_NUM_ACS; i++) { - rule->client[i].cw_min = + wmm_rule->client[i].cw_min = ecw2cw((wmm->client[i].ecw & 0xf0) >> 4); - rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); - rule->client[i].aifsn = wmm->client[i].aifsn; - rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot); - rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); - rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); - rule->ap[i].aifsn = wmm->ap[i].aifsn; - rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); + wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); + wmm_rule->client[i].aifsn = wmm->client[i].aifsn; + wmm_rule->client[i].cot = + 1000 * be16_to_cpu(wmm->client[i].cot); + wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); + wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); + wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn; + wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); }
rrule->has_wmm = true; @@@ -870,7 -884,7 +884,7 @@@
static int __regdb_query_wmm(const struct fwdb_header *db, const struct fwdb_country *country, int freq, - struct ieee80211_reg_rule *rule) + struct ieee80211_reg_rule *rrule) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); @@@ -879,18 -893,14 +893,14 @@@ for (i = 0; i < coll->n_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; - struct fwdb_rule *rrule = (void *)((u8 *)db + rule_ptr); - struct fwdb_wmm_rule *wmm; - unsigned int wmm_ptr; + struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr);
- if (rrule->len < offsetofend(struct fwdb_rule, wmm_ptr)) + if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr)) continue;
- if (freq >= KHZ_TO_MHZ(be32_to_cpu(rrule->start)) && - freq <= KHZ_TO_MHZ(be32_to_cpu(rrule->end))) { - wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; - wmm = (void *)((u8 *)db + wmm_ptr); - set_wmm_rule(rule, wmm); + if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) && + freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) { + set_wmm_rule(db, country, rule, rrule); return 0; } } @@@ -972,12 -982,8 +982,8 @@@ static int regdb_query_country(const st if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout)) rrule->dfs_cac_ms = 1000 * be16_to_cpu(rule->cac_timeout); - if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { - u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; - struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr); - - set_wmm_rule(rrule, wmm); - } + if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) + set_wmm_rule(db, country, rule, rrule); }
return reg_schedule_apply(regdom); @@@ -2867,7 -2873,6 +2873,7 @@@ static int regulatory_hint_core(const c request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; + request->wiphy_idx = WIPHY_IDX_INVALID;
queue_regulatory_request(request);
@@@ -3185,13 -3190,59 +3191,59 @@@ static void restore_regulatory_settings schedule_work(®_work); }
+ static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag) + { + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + wdev_lock(wdev); + if (!(wdev->wiphy->regulatory_flags & flag)) { + wdev_unlock(wdev); + return false; + } + wdev_unlock(wdev); + } + } + + return true; + } + void regulatory_hint_disconnect(void) { + /* Restore of regulatory settings is not required when wiphy(s) + * ignore IE from connected access point but clearance of beacon hints + * is required when wiphy(s) supports beacon hints. + */ + if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) { + struct reg_beacon *reg_beacon, *btmp; + + if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS)) + return; + + spin_lock_bh(®_pending_beacons_lock); + list_for_each_entry_safe(reg_beacon, btmp, + ®_pending_beacons, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + spin_unlock_bh(®_pending_beacons_lock); + + list_for_each_entry_safe(reg_beacon, btmp, + ®_beacon_list, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + + return; + } + pr_debug("All devices are disconnected, going to restore regulatory settings\n"); restore_regulatory_settings(false); }
- static bool freq_is_chan_12_13_14(u16 freq) + static bool freq_is_chan_12_13_14(u32 freq) { if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) || diff --combined net/xfrm/xfrm_output.c index 261995d37ced,2d42cb0c94b8..4ae87c5ce2e3 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@@ -100,10 -100,6 +100,10 @@@ static int xfrm_output_one(struct sk_bu spin_unlock_bh(&x->lock);
skb_dst_force(skb); + if (!skb_dst(skb)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); + goto error_nolock; + }
if (xfrm_offload(skb)) { x->type_offload->encap(x, skb); @@@ -193,7 -189,7 +193,7 @@@ static int xfrm_output_gso(struct net * struct sk_buff *nskb = segs->next; int err;
- segs->next = NULL; + skb_mark_not_on_list(segs); err = xfrm_output2(net, sk, segs);
if (unlikely(err)) {
linux-merge@lists.open-mesh.org