The following commit has been merged in the master branch: commit 476a063be586184af23825a64525816a97e4896b Merge: db7633920da31cab07e958cf8eaae06a50e67861 5678cb3c96eeea907f5319a3644bbada26508e58 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Fri Sep 21 10:24:09 2018 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined Documentation/ABI/testing/sysfs-class-net index ec2232f6a949,e2e0fe553ad8..664a8f6a634f --- a/Documentation/ABI/testing/sysfs-class-net +++ b/Documentation/ABI/testing/sysfs-class-net @@@ -91,24 -91,6 +91,24 @@@ Description stacked (e.g: VLAN interfaces) but still have the same MAC address as their parent device.
+What: /sys/class/net/<iface>/dev_port +Date: February 2014 +KernelVersion: 3.15 +Contact: netdev@vger.kernel.org +Description: + Indicates the port number of this network device, formatted + as a decimal value. Some NICs have multiple independent ports + on the same PCI bus, device and function. This attribute allows + userspace to distinguish the respective interfaces. + + Note: some device drivers started to use 'dev_id' for this + purpose since long before 3.15 and have not adopted the new + attribute ever since. To query the port number, some tools look + exclusively at 'dev_port', while others only consult 'dev_id'. + If a network device has multiple client adapter ports as + described in the previous paragraph and does not set this + attribute to its port number, it's a kernel bug. + What: /sys/class/net/<iface>/dormant Date: March 2006 KernelVersion: 2.6.17 @@@ -135,7 -117,7 +135,7 @@@ Description full: full duplex
Note: This attribute is only valid for interfaces that implement - the ethtool get_settings method (mostly Ethernet). + the ethtool get_link_ksettings method (mostly Ethernet).
What: /sys/class/net/<iface>/flags Date: April 2005 @@@ -242,7 -224,7 +242,7 @@@ Description an integer representing the link speed in Mbits/sec.
Note: this attribute is only valid for interfaces that implement - the ethtool get_settings method (mostly Ethernet ). + the ethtool get_link_ksettings method (mostly Ethernet).
What: /sys/class/net/<iface>/tx_queue_len Date: April 2005 diff --combined MAINTAINERS index 37c38eb55c6f,7233a9ed0f5b..3621d097d2fe --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -840,7 -840,7 +840,7 @@@ ANALOG DEVICES INC ADGS1408 DRIVE M: Mircea Caprioru mircea.caprioru@analog.com S: Supported F: drivers/mux/adgs1408.c -F: Documentation/devicetree/bindings/mux/adgs1408.txt +F: Documentation/devicetree/bindings/mux/adi,adgs1408.txt
ANALOG DEVICES INC ADP5061 DRIVER M: Stefan Popa stefan.popa@analog.com @@@ -1175,21 -1175,18 +1175,21 @@@ T: git git://git.kernel.org/pub/scm/lin
ARM/ACTIONS SEMI ARCHITECTURE M: Andreas Färber afaerber@suse.de +R: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: owl F: arch/arm/mach-actions/ F: arch/arm/boot/dts/owl-* F: arch/arm64/boot/dts/actions/ +F: drivers/clk/actions/ F: drivers/clocksource/owl-* F: drivers/pinctrl/actions/* F: drivers/soc/actions/ F: include/dt-bindings/power/owl-* F: include/linux/soc/actions/ F: Documentation/devicetree/bindings/arm/actions.txt +F: Documentation/devicetree/bindings/clock/actions,s900-cmu.txt F: Documentation/devicetree/bindings/pinctrl/actions,s900-pinctrl.txt F: Documentation/devicetree/bindings/power/actions,owl-sps.txt F: Documentation/devicetree/bindings/timer/actions,owl-timer.txt @@@ -1706,10 -1703,9 +1706,10 @@@ S: Odd Fixe ARM/Microchip (AT91) SoC support M: Nicolas Ferre nicolas.ferre@microchip.com M: Alexandre Belloni alexandre.belloni@bootlin.com +M: Ludovic Desroches ludovic.desroches@microchip.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.linux4sam.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git S: Supported N: at91 N: atmel @@@ -2475,6 -2471,42 +2475,6 @@@ F: drivers/atm F: include/linux/atm* F: include/uapi/linux/atm*
-ATMEL AT91 / AT32 MCI DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -S: Maintained -F: drivers/mmc/host/atmel-mci.c - -ATMEL AT91 SAMA5D2-Compatible Shutdown Controller -M: Nicolas Ferre nicolas.ferre@microchip.com -S: Supported -F: drivers/power/reset/at91-sama5d2_shdwc.c - -ATMEL Audio ALSA driver -M: Nicolas Ferre nicolas.ferre@microchip.com -L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Supported -F: sound/soc/atmel - -ATMEL I2C DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-i2c@vger.kernel.org -S: Supported -F: drivers/i2c/busses/i2c-at91.c - -ATMEL ISI DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-media@vger.kernel.org -S: Supported -F: drivers/media/platform/atmel/atmel-isi.c -F: include/media/atmel-isi.h - -ATMEL LCDFB DRIVER -M: Nicolas Ferre nicolas.ferre@microchip.com -L: linux-fbdev@vger.kernel.org -S: Maintained -F: drivers/video/fbdev/atmel_lcdfb.c -F: include/video/atmel_lcdc.h - ATMEL MACB ETHERNET DRIVER M: Nicolas Ferre nicolas.ferre@microchip.com S: Supported @@@ -2487,6 -2519,43 +2487,6 @@@ S: Maintaine F: Documentation/devicetree/bindings/input/atmel,maxtouch.txt F: drivers/input/touchscreen/atmel_mxt_ts.c
-ATMEL SAMA5D2 ADC DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-iio@vger.kernel.org -S: Supported -F: drivers/iio/adc/at91-sama5d2_adc.c - -ATMEL SDMMC DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-mmc@vger.kernel.org -S: Supported -F: drivers/mmc/host/sdhci-of-at91.c - -ATMEL SPI DRIVER -M: Nicolas Ferre nicolas.ferre@microchip.com -S: Supported -F: drivers/spi/spi-atmel.* - -ATMEL SSC DRIVER -M: Nicolas Ferre nicolas.ferre@microchip.com -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Supported -F: drivers/misc/atmel-ssc.c -F: include/linux/atmel-ssc.h - -ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS -M: Nicolas Ferre nicolas.ferre@microchip.com -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Supported -F: drivers/misc/atmel_tclib.c -F: drivers/clocksource/tcb_clksrc.c - -ATMEL USBA UDC DRIVER -M: Nicolas Ferre nicolas.ferre@microchip.com -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Supported -F: drivers/usb/gadget/udc/atmel_usba_udc.* - ATMEL WIRELESS DRIVER M: Simon Kelley simon@thekelleys.org.uk L: linux-wireless@vger.kernel.org @@@ -2495,6 -2564,13 +2495,6 @@@ W: http://atmelwlandriver.sourceforge.n S: Maintained F: drivers/net/wireless/atmel/atmel*
-ATMEL XDMA DRIVER -M: Ludovic Desroches ludovic.desroches@microchip.com -L: linux-arm-kernel@lists.infradead.org -L: dmaengine@vger.kernel.org -S: Supported -F: drivers/dma/at_xdmac.c - ATOMIC INFRASTRUCTURE M: Will Deacon will.deacon@arm.com M: Peter Zijlstra peterz@infradead.org @@@ -4410,12 -4486,11 +4410,12 @@@ S: Maintaine F: Documentation/ F: scripts/kernel-doc X: Documentation/ABI/ +X: Documentation/acpi/ X: Documentation/devicetree/ -X: Documentation/acpi -X: Documentation/power -X: Documentation/spi -X: Documentation/media +X: Documentation/i2c/ +X: Documentation/media/ +X: Documentation/power/ +X: Documentation/spi/ T: git git://git.lwn.net/linux.git docs-next
DOCUMENTATION/ITALIAN @@@ -4430,7 -4505,6 +4430,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/dw9714.c +F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.txt
DONGWOON DW9807 LENS VOICE COIL DRIVER M: Sakari Ailus sakari.ailus@linux.intel.com @@@ -4438,7 -4512,6 +4438,7 @@@ L: linux-media@vger.kernel.or T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/i2c/dw9807.c +F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9807-vcm.txt
DOUBLETALK DRIVER M: "James R. Van Zandt" jrv@vanzandt.mv.com @@@ -4455,9 -4528,9 +4455,9 @@@ F: drivers/soc/fsl/dpi
DPAA2 ETHERNET DRIVER M: Ioana Radulescu ruxandra.radulescu@nxp.com - L: linux-kernel@vger.kernel.org + L: netdev@vger.kernel.org S: Maintained - F: drivers/staging/fsl-dpaa2/ethernet + F: drivers/net/ethernet/freescale/dpaa2
DPAA2 ETHERNET SWITCH DRIVER M: Ioana Radulescu ruxandra.radulescu@nxp.com @@@ -5441,7 -5514,7 +5441,7 @@@ W: http://ext4.wiki.kernel.or Q: http://patchwork.ozlabs.org/project/linux-ext4/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4.git S: Maintained -F: Documentation/filesystems/ext4.txt +F: Documentation/filesystems/ext4/ext4.rst F: fs/ext4/
Extended Verification Module (EVM) @@@ -7275,7 -7348,7 +7275,7 @@@ F: Documentation/networking/ixgb.tx F: Documentation/networking/ixgbe.txt F: Documentation/networking/ixgbevf.txt F: Documentation/networking/i40e.txt - F: Documentation/networking/i40evf.txt + F: Documentation/networking/iavf.txt F: Documentation/networking/ice.txt F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ @@@ -8110,6 -8183,15 +8110,15 @@@ S: Maintaine F: net/l3mdev F: include/net/l3mdev.h
+ LANTIQ / INTEL Ethernet drivers + M: Hauke Mehrtens hauke@hauke-m.de + L: netdev@vger.kernel.org + S: Maintained + F: net/dsa/tag_gswip.c + F: drivers/net/ethernet/lantiq_xrx200.c + F: drivers/net/dsa/lantiq_pce.h + F: drivers/net/dsa/intel_gswip.c + LANTIQ MIPS ARCHITECTURE M: John Crispin john@phrozen.org L: linux-mips@linux-mips.org @@@ -8919,10 -9001,11 +8928,10 @@@ F: drivers/media/dvb-frontends/cxd2880/ F: drivers/media/spi/cxd2880*
MEDIA DRIVERS FOR DIGITAL DEVICES PCIE DEVICES -M: Daniel Scheller d.scheller.oss@gmail.com L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Orphan F: drivers/media/pci/ddbridge/*
MEDIA DRIVERS FOR FREESCALE IMX @@@ -8937,13 -9020,6 +8946,13 @@@ F: drivers/staging/media/imx F: include/linux/imx-media.h F: include/media/imx.h
+MEDIA DRIVER FOR FREESCALE IMX PXP +M: Philipp Zabel p.zabel@pengutronix.de +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/platform/imx-pxp.[ch] + MEDIA DRIVERS FOR HELENE M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org @@@ -8974,10 -9050,11 +8983,10 @@@ S: Supporte F: drivers/media/dvb-frontends/lnbh25*
MEDIA DRIVERS FOR MXL5XX TUNER DEMODULATORS -M: Daniel Scheller d.scheller.oss@gmail.com L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Orphan F: drivers/media/dvb-frontends/mxl5xx*
MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices @@@ -9020,7 -9097,7 +9029,7 @@@ F: drivers/media/platform/rcar-fcp. F: include/media/rcar-fcp.h
MEDIA DRIVERS FOR RENESAS - FDP1 -M: Kieran Bingham kieran@bingham.xyz +M: Kieran Bingham kieran.bingham+renesas@ideasonboard.com L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git @@@ -9040,7 -9117,6 +9049,7 @@@ F: drivers/media/platform/rcar-vin
MEDIA DRIVERS FOR RENESAS - VSP1 M: Laurent Pinchart laurent.pinchart@ideasonboard.com +M: Kieran Bingham kieran.bingham+renesas@ideasonboard.com L: linux-media@vger.kernel.org L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git @@@ -9049,17 -9125,19 +9058,17 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/platform/vsp1/
MEDIA DRIVERS FOR ST STV0910 DEMODULATOR ICs -M: Daniel Scheller d.scheller.oss@gmail.com L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Orphan F: drivers/media/dvb-frontends/stv0910*
MEDIA DRIVERS FOR ST STV6111 TUNER ICs -M: Daniel Scheller d.scheller.oss@gmail.com L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Orphan F: drivers/media/dvb-frontends/stv6111*
MEDIA DRIVERS FOR STM32 - DCMI @@@ -9445,19 -9523,13 +9454,19 @@@ T: git git://git.monstr.eu/linux-2.6-mi S: Supported F: arch/microblaze/
-MICROCHIP / ATMEL AT91 SERIAL DRIVER +MICROCHIP AT91 SERIAL DRIVER M: Richard Genoud richard.genoud@gmail.com S: Maintained F: drivers/tty/serial/atmel_serial.c F: drivers/tty/serial/atmel_serial.h
-MICROCHIP / ATMEL DMA DRIVER +MICROCHIP AUDIO ASOC DRIVERS +M: Codrin Ciubotariu codrin.ciubotariu@microchip.com +L: alsa-devel@alsa-project.org (moderated for non-subscribers) +S: Supported +F: sound/soc/atmel + +MICROCHIP DMA DRIVER M: Ludovic Desroches ludovic.desroches@microchip.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: dmaengine@vger.kernel.org @@@ -9465,35 -9537,27 +9474,35 @@@ S: Supporte F: drivers/dma/at_hdmac.c F: drivers/dma/at_hdmac_regs.h F: include/linux/platform_data/dma-atmel.h +F: Documentation/devicetree/bindings/dma/atmel-dma.txt +F: include/dt-bindings/dma/at91.h
-MICROCHIP / ATMEL ECC DRIVER +MICROCHIP ECC DRIVER M: Tudor Ambarus tudor.ambarus@microchip.com L: linux-crypto@vger.kernel.org S: Maintained F: drivers/crypto/atmel-ecc.*
-MICROCHIP / ATMEL ISC DRIVER -M: Songjun Wu songjun.wu@microchip.com +MICROCHIP I2C DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +L: linux-i2c@vger.kernel.org +S: Supported +F: drivers/i2c/busses/i2c-at91.c + +MICROCHIP ISC DRIVER +M: Eugen Hristev eugen.hristev@microchip.com L: linux-media@vger.kernel.org S: Supported F: drivers/media/platform/atmel/atmel-isc.c F: drivers/media/platform/atmel/atmel-isc-regs.h F: devicetree/bindings/media/atmel-isc.txt
-MICROCHIP / ATMEL NAND DRIVER -M: Josh Wu rainyfeeling@outlook.com -L: linux-mtd@lists.infradead.org +MICROCHIP ISI DRIVER +M: Eugen Hristev eugen.hristev@microchip.com +L: linux-media@vger.kernel.org S: Supported -F: drivers/mtd/nand/raw/atmel/* -F: Documentation/devicetree/bindings/mtd/atmel-nand.txt +F: drivers/media/platform/atmel/atmel-isi.c +F: include/media/atmel-isi.h
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER M: Woojung Huh Woojung.Huh@microchip.com @@@ -9512,72 -9576,6 +9521,72 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/ethernet/microchip/lan743x_*
+MICROCHIP LCDFB DRIVER +M: Nicolas Ferre nicolas.ferre@microchip.com +L: linux-fbdev@vger.kernel.org +S: Maintained +F: drivers/video/fbdev/atmel_lcdfb.c +F: include/video/atmel_lcdc.h + +MICROCHIP MMC/SD/SDIO MCI DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +S: Maintained +F: drivers/mmc/host/atmel-mci.c + +MICROCHIP NAND DRIVER +M: Tudor Ambarus tudor.ambarus@microchip.com +L: linux-mtd@lists.infradead.org +S: Supported +F: drivers/mtd/nand/raw/atmel/* +F: Documentation/devicetree/bindings/mtd/atmel-nand.txt + +MICROCHIP PWM DRIVER +M: Claudiu Beznea claudiu.beznea@microchip.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-pwm@vger.kernel.org +S: Supported +F: drivers/pwm/pwm-atmel.c +F: Documentation/devicetree/bindings/pwm/atmel-pwm.txt + +MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +M: Eugen Hristev eugen.hristev@microchip.com +L: linux-iio@vger.kernel.org +S: Supported +F: drivers/iio/adc/at91-sama5d2_adc.c +F: Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt +F: include/dt-bindings/iio/adc/at91-sama5d2_adc.h + +MICROCHIP SAMA5D2-COMPATIBLE SHUTDOWN CONTROLLER +M: Nicolas Ferre nicolas.ferre@microchip.com +S: Supported +F: drivers/power/reset/at91-sama5d2_shdwc.c + +MICROCHIP SPI DRIVER +M: Nicolas Ferre nicolas.ferre@microchip.com +S: Supported +F: drivers/spi/spi-atmel.* + +MICROCHIP SSC DRIVER +M: Nicolas Ferre nicolas.ferre@microchip.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/misc/atmel-ssc.c +F: include/linux/atmel-ssc.h + +MICROCHIP TIMER COUNTER (TC) AND CLOCKSOURCE DRIVERS +M: Nicolas Ferre nicolas.ferre@microchip.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/misc/atmel_tclib.c +F: drivers/clocksource/tcb_clksrc.c + +MICROCHIP USBA UDC DRIVER +M: Cristian Birsan cristian.birsan@microchip.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: drivers/usb/gadget/udc/atmel_usba_udc.* + MICROCHIP USB251XB DRIVER M: Richard Leitner richard.leitner@skidata.com L: linux-usb@vger.kernel.org @@@ -9585,13 -9583,6 +9594,13 @@@ S: Maintaine F: drivers/usb/misc/usb251xb.c F: Documentation/devicetree/bindings/usb/usb251xb.txt
+MICROCHIP XDMA DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +L: linux-arm-kernel@lists.infradead.org +L: dmaengine@vger.kernel.org +S: Supported +F: drivers/dma/at_xdmac.c + MICROSEMI MIPS SOCS M: Alexandre Belloni alexandre.belloni@bootlin.com L: linux-mips@linux-mips.org @@@ -9716,19 -9707,6 +9725,19 @@@ S: Maintaine F: arch/arm/boot/dts/mmp* F: arch/arm/mach-mmp/
+MMU GATHER AND TLB INVALIDATION +M: Will Deacon will.deacon@arm.com +M: "Aneesh Kumar K.V" aneesh.kumar@linux.vnet.ibm.com +M: Andrew Morton akpm@linux-foundation.org +M: Nick Piggin npiggin@gmail.com +M: Peter Zijlstra peterz@infradead.org +L: linux-arch@vger.kernel.org +L: linux-mm@kvack.org +S: Maintained +F: arch/*/include/asm/tlb.h +F: include/asm-generic/tlb.h +F: mm/mmu_gather.c + MN88472 MEDIA DRIVER M: Antti Palosaari crope@iki.fi L: linux-media@vger.kernel.org @@@ -9747,6 -9725,13 +9756,6 @@@ Q: http://patchwork.linuxtv.org/project S: Maintained F: drivers/media/dvb-frontends/mn88473*
-PCI DRIVER FOR MOBIVEIL PCIE IP -M: Subrahmanya Lingappa l.subrahmanya@mobiveil.co.in -L: linux-pci@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt -F: drivers/pci/controller/pcie-mobiveil.c - MODULE SUPPORT M: Jessica Yu jeyu@kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next @@@ -11161,13 -11146,6 +11170,13 @@@ F: include/uapi/linux/switchtec_ioctl. F: include/linux/switchtec.h F: drivers/ntb/hw/mscc/
+PCI DRIVER FOR MOBIVEIL PCIE IP +M: Subrahmanya Lingappa l.subrahmanya@mobiveil.co.in +L: linux-pci@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt +F: drivers/pci/controller/pcie-mobiveil.c + PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) M: Thomas Petazzoni thomas.petazzoni@free-electrons.com M: Jason Cooper jason@lakedaemon.net @@@ -11234,14 -11212,8 +11243,14 @@@ F: tools/pci
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC M: Russell Currey ruscur@russell.cc +M: Sam Bobroff sbobroff@linux.ibm.com +M: Oliver O'Halloran oohall@gmail.com L: linuxppc-dev@lists.ozlabs.org S: Supported +F: Documentation/PCI/pci-error-recovery.txt +F: drivers/pci/pcie/aer.c +F: drivers/pci/pcie/dpc.c +F: drivers/pci/pcie/err.c F: Documentation/powerpc/eeh-pci-error-recovery.txt F: arch/powerpc/kernel/eeh*.c F: arch/powerpc/platforms/*/eeh*.c @@@ -13046,12 -13018,6 +13055,12 @@@ L: linux-mmc@vger.kernel.or S: Maintained F: drivers/mmc/host/sdhci-pci-dwc-mshc.c
+SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) MICROCHIP DRIVER +M: Ludovic Desroches ludovic.desroches@microchip.com +L: linux-mmc@vger.kernel.org +S: Supported +F: drivers/mmc/host/sdhci-of-at91.c + SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER M: Ben Dooks ben-linux@fluff.org M: Jaehoon Chung jh80.chung@samsung.com @@@ -13492,8 -13458,9 +13501,8 @@@ F: drivers/i2c/busses/i2c-synquacer. F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
SOCIONEXT UNIPHIER SOUND DRIVER -M: Katsuhiro Suzuki suzuki.katsuhiro@socionext.com L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Maintained +S: Orphan F: sound/soc/uniphier/
SOEKRIS NET48XX LED SUPPORT diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index e1594c9df4c6,f4ba9b3f8819..749f63beddd8 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@@ -75,23 -75,17 +75,23 @@@ static int bnxt_tc_parse_redir(struct b return 0; }
-static void bnxt_tc_parse_vlan(struct bnxt *bp, - struct bnxt_tc_actions *actions, - const struct tc_action *tc_act) +static int bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) { - if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { + switch (tcf_vlan_action(tc_act)) { + case TCA_VLAN_ACT_POP: actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; - } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { + break; + case TCA_VLAN_ACT_PUSH: actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + break; + default: + return -EOPNOTSUPP; } + return 0; }
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, @@@ -140,9 -134,7 +140,9 @@@ static int bnxt_tc_parse_actions(struc
/* Push/pop VLAN */ if (is_tcf_vlan(tc_act)) { - bnxt_tc_parse_vlan(bp, actions, tc_act); + rc = bnxt_tc_parse_vlan(bp, actions, tc_act); + if (rc) + return rc; continue; }
@@@ -189,7 -181,6 +189,6 @@@ static int bnxt_tc_parse_flow(struct bn struct bnxt_tc_flow *flow) { struct flow_dissector *dissector = tc_flow_cmd->dissector; - u16 addr_type = 0;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || @@@ -199,13 -190,6 +198,6 @@@ return -EOPNOTSUPP; }
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); @@@ -301,13 -285,6 +293,6 @@@ flow->l4_mask.icmp.code = mask->code; }
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); diff --combined drivers/net/ethernet/ibm/emac/core.c index 129f4e9f38da,5107c9450a19..760b2ad8e295 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@@ -423,7 -423,7 +423,7 @@@ static void emac_hash_mc(struct emac_in { const int regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); - u32 gaht_temp[regs]; + u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i;
@@@ -1409,7 -1409,7 +1409,7 @@@ static inline u16 emac_tx_csum(struct e return 0; }
- static inline int emac_xmit_finish(struct emac_instance *dev, int len) + static inline netdev_tx_t emac_xmit_finish(struct emac_instance *dev, int len) { struct emac_regs __iomem *p = dev->emacp; struct net_device *ndev = dev->ndev; @@@ -1436,7 -1436,7 +1436,7 @@@ }
/* Tx lock BH */ - static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) + static netdev_tx_t emac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); unsigned int len = skb->len; @@@ -1494,7 -1494,8 +1494,8 @@@ static inline int emac_xmit_split(struc }
/* Tx lock BH disabled (SG version for TAH equipped EMACs) */ - static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) + static netdev_tx_t + emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); int nr_frags = skb_shinfo(skb)->nr_frags; @@@ -2677,17 -2678,12 +2678,17 @@@ static int emac_init_phy(struct emac_in if (of_phy_is_fixed_link(np)) { int res = emac_dt_mdio_probe(dev);
- if (!res) { - res = of_phy_register_fixed_link(np); - if (res) - mdiobus_unregister(dev->mii_bus); + if (res) + return res; + + res = of_phy_register_fixed_link(np); + dev->phy_dev = of_phy_find_device(np); + if (res || !dev->phy_dev) { + mdiobus_unregister(dev->mii_bus); + return res ? res : -EINVAL; } - return res; + emac_adjust_link(dev->ndev); + put_device(&dev->phy_dev->mdio.dev); } return 0; } @@@ -2969,6 -2965,10 +2970,10 @@@ static int emac_init_config(struct emac dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT; }
+ /* This should never happen */ + if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS)) + return -ENXIO; + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); diff --combined drivers/net/ethernet/marvell/mvneta.c index 2db9708f2e24,59ed63102e14..e7d7ad9a19e3 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@@ -2008,8 -2008,8 +2008,8 @@@ static int mvneta_rx_swbm(struct napi_s skb_add_rx_frag(rxq->skb, frag_num, page, frag_offset, frag_size, PAGE_SIZE); - dma_unmap_single(dev->dev.parent, phys_addr, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(dev->dev.parent, phys_addr, + PAGE_SIZE, DMA_FROM_DEVICE); rxq->left_size -= frag_size; } } else { @@@ -2065,10 -2065,7 +2065,7 @@@ /* Linux processing */ rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
- if (dev->features & NETIF_F_GRO) - napi_gro_receive(napi, rxq->skb); - else - netif_receive_skb(rxq->skb); + napi_gro_receive(napi, rxq->skb);
/* clean uncomplete skb pointer in queue */ rxq->skb = NULL; @@@ -2396,7 -2393,7 +2393,7 @@@ error }
/* Main tx processing */ - static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) + static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); @@@ -2510,12 -2507,13 +2507,13 @@@ static void mvneta_tx_done_gbe(struct m { struct mvneta_tx_queue *txq; struct netdev_queue *nq; + int cpu = smp_processor_id();
while (cause_tx_done) { txq = mvneta_tx_done_policy(pp, cause_tx_done);
nq = netdev_get_tx_queue(pp->dev, txq->id); - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, cpu);
if (txq->count) mvneta_txq_done(pp, txq); @@@ -3793,9 -3791,6 +3791,6 @@@ static int mvneta_open(struct net_devic goto err_free_online_hp; }
- /* In default link is down */ - netif_carrier_off(pp->dev); - ret = mvneta_mdio_probe(pp); if (ret < 0) { netdev_err(dev, "cannot probe MDIO bus\n"); @@@ -4598,7 -4593,8 +4593,8 @@@ static int mvneta_probe(struct platform } }
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --combined drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 38cc01beea79,d30ccc515bb7..7fbb31635ccc --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@@ -82,13 -82,19 +82,19 @@@ u32 mvpp2_read(struct mvpp2 *priv, u32 return readl(priv->swth_base[0] + offset); }
- u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) + static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset) { return readl_relaxed(priv->swth_base[0] + offset); } + + static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) + { + return cpu % priv->nthreads; + } + /* These accessors should be used to access: * - * - per-CPU registers, where each CPU has its own copy of the + * - per-thread registers, where each thread has its own copy of the * register. * * MVPP2_BM_VIRT_ALLOC_REG @@@ -104,8 -110,8 +110,8 @@@ * MVPP2_TXQ_SENT_REG * MVPP2_RXQ_NUM_REG * - * - global registers that must be accessed through a specific CPU - * window, because they are related to an access to a per-CPU + * - global registers that must be accessed through a specific thread + * window, because they are related to an access to a per-thread * register * * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG) @@@ -122,28 -128,28 +128,28 @@@ * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG) */ - void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, + static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel(data, priv->swth_base[cpu] + offset); + writel(data, priv->swth_base[thread] + offset); }
- u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, + static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl(priv->swth_base[cpu] + offset); + return readl(priv->swth_base[thread] + offset); }
- void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu, + static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset, u32 data) { - writel_relaxed(data, priv->swth_base[cpu] + offset); + writel_relaxed(data, priv->swth_base[thread] + offset); }
- static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu, + static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread, u32 offset) { - return readl_relaxed(priv->swth_base[cpu] + offset); + return readl_relaxed(priv->swth_base[thread] + offset); }
static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, @@@ -385,17 -391,17 +391,17 @@@ static void mvpp2_bm_bufs_get_addrs(str dma_addr_t *dma_addr, phys_addr_t *phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
- *dma_addr = mvpp2_percpu_read(priv, cpu, + *dma_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); - *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG); + *phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
if (priv->hw_version == MVPP22) { u32 val; u32 dma_addr_highbits, phys_addr_highbits;
- val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC); + val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC); dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK); phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >> MVPP22_BM_ADDR_HIGH_VIRT_SHIFT; @@@ -626,7 -632,11 +632,11 @@@ static inline void mvpp2_bm_pool_put(st dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + unsigned long flags = 0; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->bm_lock[thread], flags);
if (port->priv->hw_version == MVPP22) { u32 val = 0; @@@ -640,7 -650,7 +650,7 @@@ << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
- mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP22_BM_ADDR_HIGH_RLS_REG, val); }
@@@ -649,11 -659,14 +659,14 @@@ * descriptor. Instead of storing the virtual address, we * store the physical address */ - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); - mvpp2_percpu_write_relaxed(port->priv, cpu, + mvpp2_thread_write_relaxed(port->priv, thread, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+ if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->bm_lock[thread], flags); + put_cpu(); }
@@@ -886,7 -899,7 +899,7 @@@ static inline void mvpp2_qvec_interrupt MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); }
- /* Mask the current CPU's Rx/Tx interrupts + /* Mask the current thread's Rx/Tx interrupts * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@@ -894,11 -907,16 +907,16 @@@ static void mvpp2_interrupts_mask(void { struct mvpp2_port *port = arg;
- mvpp2_percpu_write(port->priv, smp_processor_id(), + /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); }
- /* Unmask the current CPU's Rx/Tx interrupts. + /* Unmask the current thread's Rx/Tx interrupts. * Called by on_each_cpu(), guaranteed to run with migration disabled, * using smp_processor_id() is OK. */ @@@ -907,12 -925,17 +925,17 @@@ static void mvpp2_interrupts_unmask(voi struct mvpp2_port *port = arg; u32 val;
+ /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + val = MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); if (port->has_tx_irqs) val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_ISR_RX_TX_MASK_REG(port->id), val); }
@@@ -928,7 -951,7 +951,7 @@@ mvpp2_shared_interrupt_mask_unmask(stru if (mask) val = 0; else - val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *v = port->qvecs + i; @@@ -936,7 -959,7 +959,7 @@@ if (v->type != MVPP2_QUEUE_VECTOR_SHARED) continue;
- mvpp2_percpu_write(port->priv, v->sw_thread_id, + mvpp2_thread_write(port->priv, v->sw_thread_id, MVPP2_ISR_RX_TX_MASK_REG(port->id), val); } } @@@ -1624,7 -1647,8 +1647,8 @@@ mvpp2_txq_next_desc_get(struct mvpp2_tx static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) { /* aggregated access - relevant TXQ number is written in TX desc */ - mvpp2_percpu_write(port->priv, smp_processor_id(), + mvpp2_thread_write(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_AGGR_TXQ_UPDATE_REG, pending); }
@@@ -1634,14 -1658,15 +1658,15 @@@ * Called only from mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ - static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, + static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port, struct mvpp2_tx_queue *aggr_txq, int num) { if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ - int cpu = smp_processor_id(); - u32 val = mvpp2_read_relaxed(priv, - MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + unsigned int thread = + mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + u32 val = mvpp2_read_relaxed(port->priv, + MVPP2_AGGR_TXQ_STATUS_REG(thread));
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
@@@ -1657,16 -1682,17 +1682,17 @@@ * only by mvpp2_tx(), so migration is disabled, using * smp_processor_id() is OK. */ - static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, + static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, int num) { + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2 *priv = port->priv; u32 val; - int cpu = smp_processor_id();
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; - mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val); + mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
- val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG); + val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
return val & MVPP2_TXQ_RSVD_RSLT_MASK; } @@@ -1674,12 -1700,13 +1700,13 @@@ /* Check if there are enough reserved descriptors for transmission. * If not, request chunk of reserved descriptors and check again. */ - static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv, + static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_txq_pcpu *txq_pcpu, int num) { - int req, cpu, desc_count; + int req, desc_count; + unsigned int thread;
if (txq_pcpu->reserved_num >= num) return 0; @@@ -1690,10 -1717,10 +1717,10 @@@
desc_count = 0; /* Compute total of used descriptors */ - for_each_present_cpu(cpu) { + for (thread = 0; thread < port->priv->nthreads; thread++) { struct mvpp2_txq_pcpu *txq_pcpu_aux;
- txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread); desc_count += txq_pcpu_aux->count; desc_count += txq_pcpu_aux->reserved_num; } @@@ -1702,10 -1729,10 +1729,10 @@@ desc_count += req;
if (desc_count > - (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK))) + (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK))) return -ENOMEM;
- txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req); + txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
/* OK, the descriptor could have been updated: check again. */ if (txq_pcpu->reserved_num < num) @@@ -1759,7 -1786,7 +1786,7 @@@ static u32 mvpp2_txq_desc_csum(int l3_o
/* Get number of sent descriptors and decrement counter. * The number of sent descriptors is returned. - * Per-CPU access + * Per-thread access * * Called only from mvpp2_txq_done(), called from mvpp2_tx() * (migration disabled) and from the TX completion tasklet (migration @@@ -1771,7 -1798,8 +1798,8 @@@ static inline int mvpp2_txq_sent_desc_p u32 val;
/* Reading status reg resets transmitted descriptor counter */ - val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(), + val = mvpp2_thread_read_relaxed(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> @@@ -1786,10 -1814,15 +1814,15 @@@ static void mvpp2_txq_sent_counter_clea struct mvpp2_port *port = arg; int queue;
+ /* If the thread isn't used, don't do anything */ + if (smp_processor_id() > port->priv->nthreads) + return; + for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id;
- mvpp2_percpu_read(port->priv, smp_processor_id(), + mvpp2_thread_read(port->priv, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()), MVPP2_TXQ_SENT_REG(id)); } } @@@ -1849,13 -1882,13 +1882,13 @@@ static void mvpp2_txp_max_tx_size_set(s static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
- mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
put_cpu(); @@@ -1865,15 -1898,15 +1898,15 @@@ static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { - int cpu = get_cpu(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
put_cpu(); } @@@ -1974,7 -2007,7 +2007,7 @@@ static void mvpp2_txq_done(struct mvpp2 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); int tx_done;
- if (txq_pcpu->cpu != smp_processor_id()) + if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id())) netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
tx_done = mvpp2_txq_sent_desc_proc(port, txq); @@@ -1990,7 -2023,7 +2023,7 @@@ }
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, - int cpu) + unsigned int thread) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; @@@ -2001,7 -2034,7 +2034,7 @@@ if (!txq) break;
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); @@@ -2017,8 -2050,8 +2050,8 @@@
/* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, int cpu, - struct mvpp2 *priv) + struct mvpp2_tx_queue *aggr_txq, + unsigned int thread, struct mvpp2 *priv) { u32 txq_dma;
@@@ -2033,7 -2066,7 +2066,7 @@@
/* Aggr TXQ no reset WA */ aggr_txq->next_desc_to_proc = mvpp2_read(priv, - MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + MVPP2_AGGR_TXQ_INDEX_REG(thread));
/* Set Tx descriptors queue starting address indirect * access @@@ -2044,8 -2077,8 +2077,8 @@@ txq_dma = aggr_txq->descs_dma >> MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
- mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread), MVPP2_AGGR_TXQ_SIZE);
return 0; @@@ -2056,8 -2089,8 +2089,8 @@@ static int mvpp2_rxq_init(struct mvpp2_ struct mvpp2_rx_queue *rxq)
{ + unsigned int thread; u32 rxq_dma; - int cpu;
rxq->size = port->rx_ring_size;
@@@ -2074,15 -2107,15 +2107,15 @@@ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; else rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0); put_cpu();
/* Set Offset */ @@@ -2127,7 -2160,7 +2160,7 @@@ static void mvpp2_rxq_drop_pkts(struct static void mvpp2_rxq_deinit(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu; + unsigned int thread;
mvpp2_rxq_drop_pkts(port, rxq);
@@@ -2146,10 -2179,10 +2179,10 @@@ * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0); put_cpu(); }
@@@ -2158,7 -2191,8 +2191,8 @@@ static int mvpp2_txq_init(struct mvpp2_ struct mvpp2_tx_queue *txq) { u32 val; - int cpu, desc, desc_per_txq, tx_port_num; + unsigned int thread; + int desc, desc_per_txq, tx_port_num; struct mvpp2_txq_pcpu *txq_pcpu;
txq->size = port->tx_ring_size; @@@ -2173,18 -2207,18 +2207,18 @@@ txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, txq->size & MVPP2_TXQ_DESC_SIZE_MASK); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG, txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); val &= ~MVPP2_TXQ_PENDING_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
/* Calculate base address in prefetch buffer. We reserve 16 descriptors * for each existing TXQ. @@@ -2195,7 -2229,7 +2229,7 @@@ desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + (txq->log_id * desc_per_txq);
- mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); put_cpu(); @@@ -2214,8 -2248,8 +2248,8 @@@ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), val);
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); txq_pcpu->size = txq->size; txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, sizeof(*txq_pcpu->buffs), @@@ -2249,10 -2283,10 +2283,10 @@@ static void mvpp2_txq_deinit(struct mvp struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int cpu; + unsigned int thread;
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); kfree(txq_pcpu->buffs);
if (txq_pcpu->tso_headers) @@@ -2278,10 -2312,10 +2312,10 @@@ mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */ - cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0); put_cpu(); }
@@@ -2289,14 -2323,14 +2323,14 @@@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) { struct mvpp2_txq_pcpu *txq_pcpu; - int delay, pending, cpu; + int delay, pending; + unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu()); u32 val;
- cpu = get_cpu(); - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); - val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
/* The napi queue has been stopped so wait for all packets * to be transmitted. @@@ -2312,17 -2346,17 +2346,17 @@@ mdelay(1); delay++;
- pending = mvpp2_percpu_read(port->priv, cpu, + pending = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG); pending &= MVPP2_TXQ_PENDING_MASK; } while (pending);
val &= ~MVPP2_TXQ_DRAIN_EN_MASK; - mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val); put_cpu();
- for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
/* Release all packets */ mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); @@@ -2503,16 -2537,20 +2537,20 @@@ static void mvpp2_tx_proc_cb(unsigned l { struct net_device *dev = (struct net_device *)data; struct mvpp2_port *port = netdev_priv(dev); - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu; unsigned int tx_todo, cause;
+ port_pcpu = per_cpu_ptr(port->pcpu, + mvpp2_cpu_to_thread(port->priv, smp_processor_id())); + if (!netif_running(dev)) return; port_pcpu->timer_scheduled = false;
/* Process all the Tx queues */ cause = (1 << port->ntxqs) - 1; - tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); + tx_todo = mvpp2_tx_done(port, cause, + mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */ if (tx_todo) @@@ -2728,7 -2766,8 +2766,8 @@@ static inline voi tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, struct mvpp2_tx_desc *desc) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
dma_addr_t buf_dma_addr = mvpp2_txdesc_dma_addr_get(port, desc); @@@ -2745,7 -2784,8 +2784,8 @@@ static int mvpp2_tx_frag_process(struc struct mvpp2_tx_queue *aggr_txq, struct mvpp2_tx_queue *txq) { - struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread); struct mvpp2_tx_desc *tx_desc; int i; dma_addr_t buf_dma_addr; @@@ -2864,9 -2904,8 +2904,8 @@@ static int mvpp2_tx_tso(struct sk_buff int i, len, descs = 0;
/* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, - tso_count_descs(skb)) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, tso_count_descs(skb))) return 0;
@@@ -2906,21 -2945,28 +2945,28 @@@ release }
/* Main tx processing */ - static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) + static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_tx_queue *txq, *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_desc *tx_desc; dma_addr_t buf_dma_addr; + unsigned long flags = 0; + unsigned int thread; int frags = 0; u16 txq_id; u32 tx_cmd;
+ thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + txq_id = skb_get_queue_mapping(skb); txq = port->txqs[txq_id]; - txq_pcpu = this_cpu_ptr(txq->pcpu); - aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + aggr_txq = &port->priv->aggr_txqs[thread]; + + if (test_bit(thread, &port->priv->lock_map)) + spin_lock_irqsave(&port->tx_lock[thread], flags);
if (skb_is_gso(skb)) { frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); @@@ -2929,9 -2975,8 +2975,8 @@@ frags = skb_shinfo(skb)->nr_frags + 1;
/* Check number of available descriptors */ - if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) || - mvpp2_txq_reserved_desc_num_proc(port->priv, txq, - txq_pcpu, frags)) { + if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) { frags = 0; goto out; } @@@ -2973,7 -3018,7 +3018,7 @@@
out: if (frags > 0) { - struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
txq_pcpu->reserved_num -= frags; @@@ -3003,11 -3048,14 +3048,14 @@@ /* Set the timer in case not all frags were processed */ if (!port->has_tx_irqs && txq_pcpu->count <= frags && txq_pcpu->count > 0) { - struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); + struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
mvpp2_timer_set(port_pcpu); }
+ if (test_bit(thread, &port->priv->lock_map)) + spin_unlock_irqrestore(&port->tx_lock[thread], flags); + return NETDEV_TX_OK; }
@@@ -3027,7 -3075,7 +3075,7 @@@ static int mvpp2_poll(struct napi_struc int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); struct mvpp2_queue_vector *qv; - int cpu = smp_processor_id(); + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
qv = container_of(napi, struct mvpp2_queue_vector, napi);
@@@ -3041,7 -3089,7 +3089,7 @@@ * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id, + cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; @@@ -3050,21 -3098,20 +3098,22 @@@
/* Clear the cause register */ mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0); - mvpp2_percpu_write(port->priv, cpu, + mvpp2_thread_write(port->priv, thread, MVPP2_ISR_RX_TX_CAUSE_REG(port->id), cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); }
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - if (cause_tx) { - cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; - mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + if (port->has_tx_irqs) { + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } }
/* Process RX packets */ - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx = cause_rx_tx & + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version); cause_rx <<= qv->first_rxq; cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { @@@ -3139,14 -3186,13 +3188,13 @@@ static void mvpp2_start_dev(struct mvpp for (i = 0; i < port->nqvecs; i++) napi_enable(&port->qvecs[i].napi);
- /* Enable interrupts on all CPUs */ + /* Enable interrupts on all threads */ mvpp2_interrupts_enable(port);
if (port->priv->hw_version == MVPP22) mvpp22_mode_reconfigure(port);
if (port->phylink) { - netif_carrier_off(port->dev); phylink_start(port->phylink); } else { /* Phylink isn't used as of now for ACPI, so the MAC has to be @@@ -3169,7 -3215,7 +3217,7 @@@ static void mvpp2_stop_dev(struct mvpp2 { int i;
- /* Disable interrupts on all CPUs */ + /* Disable interrupts on all threads */ mvpp2_interrupts_disable(port);
for (i = 0; i < port->nqvecs; i++) @@@ -3249,9 -3295,18 +3297,18 @@@ static int mvpp2_irqs_init(struct mvpp2 if (err) goto err;
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) - irq_set_affinity_hint(qv->irq, - cpumask_of(qv->sw_thread_id)); + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) { + unsigned long mask = 0; + unsigned int cpu; + + for_each_present_cpu(cpu) { + if (mvpp2_cpu_to_thread(port->priv, cpu) == + qv->sw_thread_id) + mask |= BIT(cpu); + } + + irq_set_affinity_hint(qv->irq, to_cpumask(&mask)); + } }
return 0; @@@ -3395,11 -3450,11 +3452,11 @@@ static int mvpp2_stop(struct net_devic { struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port_pcpu *port_pcpu; - int cpu; + unsigned int thread;
mvpp2_stop_dev(port);
- /* Mask interrupts on all CPUs */ + /* Mask interrupts on all threads */ on_each_cpu(mvpp2_interrupts_mask, port, 1); mvpp2_shared_interrupt_mask_unmask(port, true);
@@@ -3410,8 -3465,8 +3467,8 @@@
mvpp2_irqs_deinit(port); if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < port->priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_cancel(&port_pcpu->tx_done_timer); port_pcpu->timer_scheduled = false; @@@ -3556,7 -3611,7 +3613,7 @@@ mvpp2_get_stats64(struct net_device *de { struct mvpp2_port *port = netdev_priv(dev); unsigned int start; - int cpu; + unsigned int cpu;
for_each_possible_cpu(cpu) { struct mvpp2_pcpu_stats *cpu_stats; @@@ -3983,12 -4038,18 +4040,18 @@@ static int mvpp2_simple_queue_vectors_i static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, struct device_node *port_node) { + struct mvpp2 *priv = port->priv; struct mvpp2_queue_vector *v; int i, ret;
- port->nqvecs = num_possible_cpus(); - if (queue_mode == MVPP2_QDIST_SINGLE_MODE) - port->nqvecs += 1; + switch (queue_mode) { + case MVPP2_QDIST_SINGLE_MODE: + port->nqvecs = priv->nthreads + 1; + break; + case MVPP2_QDIST_MULTI_MODE: + port->nqvecs = priv->nthreads; + break; + }
for (i = 0; i < port->nqvecs; i++) { char irqname[16]; @@@ -4000,7 -4061,10 +4063,10 @@@ v->sw_thread_id = i; v->sw_thread_mask = BIT(i);
- snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + if (port->flags & MVPP2_F_DT_COMPAT) + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + else + snprintf(irqname, sizeof(irqname), "hif%d", i);
if (queue_mode == MVPP2_QDIST_MULTI_MODE) { v->first_rxq = i * MVPP2_DEFAULT_RXQ; @@@ -4010,7 -4074,9 +4076,9 @@@ v->first_rxq = 0; v->nrxqs = port->nrxqs; v->type = MVPP2_QUEUE_VECTOR_SHARED; - strncpy(irqname, "rx-shared", sizeof(irqname)); + + if (port->flags & MVPP2_F_DT_COMPAT) + strncpy(irqname, "rx-shared", sizeof(irqname)); }
if (port_node) @@@ -4087,7 -4153,8 +4155,8 @@@ static int mvpp2_port_init(struct mvpp2 struct device *dev = port->dev->dev.parent; struct mvpp2 *priv = port->priv; struct mvpp2_txq_pcpu *txq_pcpu; - int queue, cpu, err; + unsigned int thread; + int queue, err;
/* Checks for hardware constraints */ if (port->first_rxq + port->nrxqs > @@@ -4131,9 -4198,9 +4200,9 @@@ txq->id = queue_phy_id; txq->log_id = queue; txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; - for_each_present_cpu(cpu) { - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); - txq_pcpu->cpu = cpu; + for (thread = 0; thread < priv->nthreads; thread++) { + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + txq_pcpu->thread = thread; }
port->txqs[queue] = txq; @@@ -4206,24 -4273,51 +4275,51 @@@ err_free_percpu return err; }
- /* Checks if the port DT description has the TX interrupts - * described. On PPv2.1, there are no such interrupts. On PPv2.2, - * there are available, but we need to keep support for old DTs. + static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node, + unsigned long *flags) + { + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3" }; + int i; + + for (i = 0; i < 5; i++) + if (of_property_match_string(port_node, "interrupt-names", + irqs[i]) < 0) + return false; + + *flags |= MVPP2_F_DT_COMPAT; + return true; + } + + /* Checks if the port dt description has the required Tx interrupts: + * - PPv2.1: there are no such interrupts. + * - PPv2.2: + * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3] + * - The new ones have: "hifX" with X in [0..8] + * + * All those variants are supported to keep the backward compatibility. */ - static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, - struct device_node *port_node) + static bool mvpp2_port_has_irqs(struct mvpp2 *priv, + struct device_node *port_node, + unsigned long *flags) { - char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", - "tx-cpu2", "tx-cpu3" }; - int ret, i; + char name[5]; + int i; + + /* ACPI */ + if (!port_node) + return true;
if (priv->hw_version == MVPP21) return false;
- for (i = 0; i < 5; i++) { - ret = of_property_match_string(port_node, "interrupt-names", - irqs[i]); - if (ret < 0) + if (mvpp22_port_has_legacy_tx_irqs(port_node, flags)) + return true; + + for (i = 0; i < MVPP2_MAX_THREADS; i++) { + snprintf(name, 5, "hif%d", i); + if (of_property_match_string(port_node, "interrupt-names", + name) < 0) return false; }
@@@ -4600,23 -4694,21 +4696,21 @@@ static int mvpp2_port_probe(struct plat struct resource *res; struct phylink *phylink; char *mac_from = ""; - unsigned int ntxqs, nrxqs; + unsigned int ntxqs, nrxqs, thread; + unsigned long flags = 0; bool has_tx_irqs; u32 id; int features; int phy_mode; - int err, i, cpu; + int err, i;
- if (port_node) { - has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); - } else { - has_tx_irqs = true; - queue_mode = MVPP2_QDIST_MULTI_MODE; + has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags); + if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) { + dev_err(&pdev->dev, + "not enough IRQs to support multi queue mode\n"); + return -EINVAL; }
- if (!has_tx_irqs) - queue_mode = MVPP2_QDIST_SINGLE_MODE; - ntxqs = MVPP2_MAX_TXQ; if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); @@@ -4664,6 -4756,7 +4758,7 @@@ port->nrxqs = nrxqs; port->priv = priv; port->has_tx_irqs = has_tx_irqs; + port->flags = flags;
err = mvpp2_queue_vectors_init(port, port_node); if (err) @@@ -4760,8 -4853,8 +4855,8 @@@ }
if (!port->has_tx_irqs) { - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + for (thread = 0; thread < priv->nthreads; thread++) { + port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); @@@ -5045,13 -5138,13 +5140,13 @@@ static int mvpp2_init(struct platform_d }
/* Allocate and initialize aggregated TXQs */ - priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(), + priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS, sizeof(*priv->aggr_txqs), GFP_KERNEL); if (!priv->aggr_txqs) return -ENOMEM;
- for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); @@@ -5098,7 -5191,7 +5193,7 @@@ static int mvpp2_probe(struct platform_ struct mvpp2 *priv; struct resource *res; void __iomem *base; - int i; + int i, shared; int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@@ -5163,6 -5256,15 +5258,15 @@@
mvpp2_setup_bm_pool();
+ + priv->nthreads = min_t(unsigned int, num_present_cpus(), + MVPP2_MAX_THREADS); + + shared = num_present_cpus() - priv->nthreads; + if (shared > 0) + bitmap_fill(&priv->lock_map, + min_t(int, shared, MVPP2_MAX_THREADS)); + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz;
@@@ -5337,7 -5439,7 +5441,7 @@@ static int mvpp2_remove(struct platform mvpp2_bm_pool_destroy(pdev, priv, bm_pool); }
- for_each_present_cpu(i) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
dma_free_coherent(&pdev->dev, diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c index b492152c8881,1fc20263b15b..8cbb52945572 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@@ -44,8 -44,8 +44,8 @@@ #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 1702 -#define MLXSW_SP1_FWREV_SUBMINOR 6 +#define MLXSW_SP1_FWREV_MINOR 1703 +#define MLXSW_SP1_FWREV_SUBMINOR 4 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@@ -2804,6 -2804,13 +2804,13 @@@ static int mlxsw_sp_port_ets_init(struc MLXSW_REG_QEEC_MAS_DIS); if (err) return err; + + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i + 8, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; }
/* Map all priorities to traffic class 0. */ diff --combined drivers/net/ethernet/qlogic/qed/qed_dcbx.c index f5459de6d60a,6ce9a762cfc0..8e8fa823d611 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@@ -190,8 -190,10 +190,8 @@@ qed_dcbx_dp_protocol(struct qed_hwfn *p
static void qed_dcbx_set_params(struct qed_dcbx_results *p_data, - struct qed_hw_info *p_info, - bool enable, - u8 prio, - u8 tc, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool enable, u8 prio, u8 tc, enum dcbx_protocol_type type, enum qed_pci_personality personality) { @@@ -204,30 -206,19 +204,30 @@@ else p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+ /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */ + if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) || + test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits))) + p_data->arr[type].dont_add_vlan0 = true; + /* QM reconf data */ - if (p_info->personality == personality) - qed_hw_info_set_offload_tc(p_info, tc); + if (p_hwfn->hw_info.personality == personality) + qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); + + /* Configure dcbx vlan priority in doorbell block for roce EDPM */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + type == DCBX_PROTOCOL_ROCE) { + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); + } }
/* Update app protocol data and hw_info fields with the TLV info */ static void qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, - struct qed_hwfn *p_hwfn, - bool enable, - u8 prio, u8 tc, enum dcbx_protocol_type type) + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool enable, u8 prio, u8 tc, + enum dcbx_protocol_type type) { - struct qed_hw_info *p_info = &p_hwfn->hw_info; enum qed_pci_personality personality; enum dcbx_protocol_type id; int i; @@@ -240,7 -231,7 +240,7 @@@
personality = qed_dcbx_app_update[i].personality;
- qed_dcbx_set_params(p_data, p_info, enable, + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable, prio, tc, type, personality); } } @@@ -262,8 -253,9 +262,9 @@@ qed_dcbx_get_app_protocol_type(struct q *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n", - app_prio_bitmap); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; }
@@@ -274,7 -266,7 +275,7 @@@ * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ static int -qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, +qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, int count, u8 dcbx_version) @@@ -318,7 -310,7 +319,7 @@@ enable = true; }
- qed_dcbx_update_app_info(p_data, p_hwfn, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, priority, tc, type); } } @@@ -340,7 -332,7 +341,7 @@@ continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; - qed_dcbx_update_app_info(p_data, p_hwfn, enable, + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable, priority, tc, type); }
@@@ -350,8 -342,7 +351,8 @@@ /* Parse app TLV's to update TC information in hw_info structure for * reconfiguring QM. Get protocol specific data for PF update ramrod command. */ -static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) +static int +qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct dcbx_app_priority_feature *p_app; struct dcbx_app_priority_entry *p_tbl; @@@ -375,7 -366,7 +376,7 @@@ p_info = &p_hwfn->hw_info; num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
- rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, + rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, num_entries, dcbx_version); if (rc) return rc; @@@ -901,7 -892,7 +902,7 @@@ qed_dcbx_mib_update_event(struct qed_hw return rc;
if (type == QED_DCBX_OPERATIONAL_MIB) { - rc = qed_dcbx_process_mib_info(p_hwfn); + rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); if (!rc) { /* reconfigure tcs of QM queues according * to negotiation results @@@ -964,7 -955,6 +965,7 @@@ static void qed_dcbx_update_protocol_da p_data->dcb_enable_flag = p_src->arr[type].enable; p_data->dcb_priority = p_src->arr[type].priority; p_data->dcb_tc = p_src->arr[type].tc; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; }
/* Set pf update ramrod command params */ diff --combined drivers/net/ethernet/qlogic/qed/qed_dev.c index 97f073fd3725,128eb63ca54a..0fbeafeef7a0 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@@ -144,6 -144,12 +144,12 @@@ static void qed_qm_info_free(struct qed qm_info->wfq_data = NULL; }
+ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) + { + kfree(p_hwfn->dbg_user_info); + p_hwfn->dbg_user_info = NULL; + } + void qed_resc_free(struct qed_dev *cdev) { int i; @@@ -183,6 -189,7 +189,7 @@@ qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); + qed_dbg_user_data_free(p_hwfn); } }
@@@ -1083,6 -1090,10 +1090,10 @@@ int qed_resc_alloc(struct qed_dev *cdev rc = qed_dcbx_info_alloc(p_hwfn); if (rc) goto alloc_err; + + rc = qed_dbg_alloc_user_data(p_hwfn); + if (rc) + goto alloc_err; }
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); @@@ -1706,7 -1717,7 +1717,7 @@@ static int qed_vf_start(struct qed_hwf int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) { struct qed_load_req_params load_req_params; - u32 load_code, param, drv_mb_param; + u32 load_code, resp, param, drv_mb_param; bool b_default_mtu = true; struct qed_hwfn *p_hwfn; int rc = 0, mfw_rc, i; @@@ -1852,19 -1863,6 +1863,19 @@@
if (IS_PF(cdev)) { p_hwfn = QED_LEADING_HWFN(cdev); + + /* Get pre-negotiated values for stag, bandwidth etc. */ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); + drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_GET_OEM_UPDATES, + drv_mb_param, &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to send GET_OEM_UPDATES attention request\n"); + drv_mb_param = STORM_FW_VERSION; rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, diff --combined drivers/net/ethernet/qlogic/qed/qed_hsi.h index 9b3ef00e5782,21ec8091a24a..d4d08383c753 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@@ -274,7 -274,8 +274,8 @@@ struct core_rx_start_ramrod_data u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; - u8 reserved[6]; + u8 wipe_inner_vlan_pri_en; + u8 reserved[5]; };
/* Ramrod data for rx queue stop ramrod */ @@@ -351,7 -352,8 +352,8 @@@ struct core_tx_start_ramrod_data __le16 pbl_size; __le16 qm_pq_id; u8 gsi_offload_flag; - u8 resrved[3]; + u8 vport_id; + u8 resrved[2]; };
/* Ramrod data for tx queue stop ramrod */ @@@ -914,6 -916,16 +916,16 @@@ struct eth_rx_rate_limit __le16 reserved1; };
+ /* Update RSS indirection table entry command */ + struct eth_tstorm_rss_update_data { + u8 valid; + u8 vport_id; + u8 ind_table_index; + u8 reserved; + __le16 ind_table_value; + __le16 reserved1; + }; + struct eth_ustorm_per_pf_stat { struct regpair rcv_lb_ucast_bytes; struct regpair rcv_lb_mcast_bytes; @@@ -1241,6 -1253,10 +1253,10 @@@ struct rl_update_ramrod_data u8 rl_id_first; u8 rl_id_last; u8 rl_dc_qcn_flg; + u8 dcqcn_reset_alpha_on_idle; + u8 rl_bc_stage_th; + u8 rl_timer_stage_th; + u8 reserved1; __le32 rl_bc_rate; __le16 rl_max_rate; __le16 rl_r_ai; @@@ -1249,7 -1265,7 +1265,7 @@@ __le32 dcqcn_k_us; __le32 dcqcn_timeuot_us; __le32 qcn_timeuot_us; - __le32 reserved[2]; + __le32 reserved2; };
/* Slowpath Element (SPQE) */ @@@ -3322,6 -3338,25 +3338,25 @@@ enum dbg_status qed_dbg_read_attn(struc enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results);
+ /******************************* Data Types **********************************/ + + struct mcp_trace_format { + u32 data; + #define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff + #define MCP_TRACE_FORMAT_MODULE_SHIFT 0 + #define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 + #define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 + #define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 + #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 + #define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 + #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 + #define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 + #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 + #define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 + #define MCP_TRACE_FORMAT_LEN_SHIFT 24 + char *format_str; + }; + /******************************** Constants **********************************/
#define MAX_NAME_LEN 16 @@@ -3337,6 -3372,13 +3372,13 @@@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr);
/** + * @brief qed_dbg_alloc_user_data - Allocates user debug data. + * + * @param p_hwfn - HW device data + */ + enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn); + + /** * @brief qed_dbg_get_status_str - Returns a string for the specified status. * * @param status - a debug status code. @@@ -3381,8 -3423,7 +3423,7 @@@ enum dbg_status qed_print_idle_chk_resu u32 *num_warnings);
/** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace - * meta data. + * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. * * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * no NVRAM access). @@@ -3390,7 -3431,8 +3431,8 @@@ * @param data - pointer to MCP Trace meta data * @param size - size of MCP Trace meta data in dwords */ - void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); + void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf);
/** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size @@@ -3425,19 -3467,45 +3467,45 @@@ enum dbg_status qed_print_mcp_trace_res char *results_buf);
/** + * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @param p_hwfn - HW device data + * @param dump_buf - mcp trace dump buffer, starting from the header. + * @param results_buf - buffer for printing the mcp trace results. + * + * @return error if the parsing fails, ok otherwise. + */ + enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + + /** * @brief print_mcp_trace_line - Prints MCP Trace results for a single line * + * @param p_hwfn - HW device data * @param dump_buf - mcp trace dump buffer, starting from the header. * @param num_dumped_bytes - number of bytes that were dumped. * @param results_buf - buffer for printing the mcp trace results. * * @return error if the parsing fails, ok otherwise. */ - enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, + enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf);
/** + * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @param p_hwfn - HW device data + */ + void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + + /** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). * @@@ -4303,154 -4371,161 +4371,161 @@@ void qed_set_rdma_error_level(struct qe (IRO[29].base + ((pf_id) * IRO[29].m1)) #define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+ /* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ + #define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ + (IRO[30].base + ((pf_id) * IRO[30].m1)) + #define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size) + /* Xstorm queue zone */ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[30].base + ((queue_id) * IRO[30].m1)) - #define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) + (IRO[31].base + ((queue_id) * IRO[31].m1)) + #define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
/* Ystorm cqe producer */ #define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[31].base + ((rss_id) * IRO[31].m1)) - #define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) + (IRO[32].base + ((rss_id) * IRO[32].m1)) + #define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
/* Ustorm cqe producer */ #define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[32].base + ((rss_id) * IRO[32].m1)) - #define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + (IRO[33].base + ((rss_id) * IRO[33].m1)) + #define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
/* Ustorm grq producer */ #define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) - #define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) + (IRO[34].base + ((pf_id) * IRO[34].m1)) + #define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
/* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) - #define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1)) + #define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, * BDqueue-id. */ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) - #define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) + #define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) - #define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2)) + #define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
/* Tstorm iSCSI RX stats */ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) - #define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + (IRO[38].base + ((pf_id) * IRO[38].m1)) + #define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
/* Mstorm iSCSI RX stats */ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) - #define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + (IRO[39].base + ((pf_id) * IRO[39].m1)) + #define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
/* Ustorm iSCSI RX stats */ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) - #define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + (IRO[40].base + ((pf_id) * IRO[40].m1)) + #define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
/* Xstorm iSCSI TX stats */ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) - #define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) + #define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
/* Ystorm iSCSI TX stats */ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) - #define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + (IRO[42].base + ((pf_id) * IRO[42].m1)) + #define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
/* Pstorm iSCSI TX stats */ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) - #define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) + (IRO[43].base + ((pf_id) * IRO[43].m1)) + #define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
/* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) - #define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) + (IRO[44].base + ((pf_id) * IRO[44].m1)) + #define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
/* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[44].base + ((pf_id) * IRO[44].m1)) - #define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) + (IRO[45].base + ((pf_id) * IRO[45].m1)) + #define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
/* Pstorm RDMA queue statistics */ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) - #define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) + #define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
/* Tstorm RDMA queue statistics */ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) - #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1)) + #define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
/* Xstorm error level for assert */ #define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) - #define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + (IRO[48].base + ((pf_id) * IRO[48].m1)) + #define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
/* Ystorm error level for assert */ #define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[48].base + ((pf_id) * IRO[48].m1)) - #define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + (IRO[49].base + ((pf_id) * IRO[49].m1)) + #define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
/* Pstorm error level for assert */ #define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[49].base + ((pf_id) * IRO[49].m1)) - #define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + (IRO[50].base + ((pf_id) * IRO[50].m1)) + #define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
/* Tstorm error level for assert */ #define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[50].base + ((pf_id) * IRO[50].m1)) - #define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + (IRO[51].base + ((pf_id) * IRO[51].m1)) + #define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
/* Mstorm error level for assert */ #define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) - #define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + (IRO[52].base + ((pf_id) * IRO[52].m1)) + #define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
/* Ustorm error level for assert */ #define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) - #define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) + #define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
/* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[53].base + ((pf_id) * IRO[53].m1)) - #define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) + (IRO[54].base + ((pf_id) * IRO[54].m1)) + #define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
/* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) - #define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) + #define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
/* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) - #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) + #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
/* RoCE Error Statistics */ #define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) - #define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) + #define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
/* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) - #define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) + #define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
/* RoCE CQEs Statistics */ #define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) - #define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + (IRO[59].base + ((roce_pf_id) * IRO[59].m1)) + #define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
- static const struct iro iro_arr[59] = { + static const struct iro iro_arr[60] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@@ -4461,14 -4536,14 +4536,14 @@@ {0x84, 0x8, 0x0, 0x0, 0x2}, {0x4c48, 0x0, 0x0, 0x0, 0x78}, {0x3e38, 0x0, 0x0, 0x0, 0x78}, - {0x2b78, 0x0, 0x0, 0x0, 0x78}, + {0x3ef8, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x4998, 0x0, 0x0, 0x0, 0x78}, {0x7f50, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, {0x6210, 0x10, 0x0, 0x0, 0x10}, {0xb820, 0x30, 0x0, 0x0, 0x30}, - {0x96c0, 0x30, 0x0, 0x0, 0x30}, + {0xa990, 0x30, 0x0, 0x0, 0x30}, {0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x53a8, 0x80, 0x4, 0x0, 0x4}, @@@ -4476,11 -4551,12 +4551,12 @@@ {0x4ba8, 0x80, 0x0, 0x0, 0x20}, {0x8158, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, - {0x2d10, 0x80, 0x0, 0x0, 0x38}, - {0xf2b8, 0x78, 0x0, 0x0, 0x78}, + {0x4090, 0x80, 0x0, 0x0, 0x38}, + {0xfea8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xaf20, 0x0, 0x0, 0x0, 0xf0}, {0xb010, 0x8, 0x0, 0x0, 0x8}, + {0xc00, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, @@@ -4492,23 -4568,23 +4568,23 @@@ {0x12908, 0x18, 0x0, 0x0, 0x10}, {0x11aa8, 0x40, 0x0, 0x0, 0x18}, {0xa588, 0x50, 0x0, 0x0, 0x20}, - {0x8700, 0x40, 0x0, 0x0, 0x28}, - {0x10300, 0x18, 0x0, 0x0, 0x10}, + {0x8f00, 0x40, 0x0, 0x0, 0x28}, + {0x10e30, 0x18, 0x0, 0x0, 0x10}, {0xde48, 0x48, 0x0, 0x0, 0x38}, - {0x10768, 0x20, 0x0, 0x0, 0x20}, - {0x2d48, 0x80, 0x0, 0x0, 0x10}, + {0x11298, 0x20, 0x0, 0x0, 0x20}, + {0x40c8, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, {0xc748, 0x8, 0x0, 0x0, 0x1}, - {0xa128, 0x8, 0x0, 0x0, 0x1}, - {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xa928, 0x8, 0x0, 0x0, 0x1}, + {0x11a30, 0x8, 0x0, 0x0, 0x1}, {0xf030, 0x8, 0x0, 0x0, 0x1}, {0x13028, 0x8, 0x0, 0x0, 0x1}, {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, {0xed90, 0x28, 0x0, 0x0, 0x28}, - {0xa520, 0x18, 0x0, 0x0, 0x18}, - {0xa6a0, 0x8, 0x0, 0x0, 0x8}, - {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0xad20, 0x18, 0x0, 0x0, 0x18}, + {0xaea0, 0x8, 0x0, 0x0, 0x8}, + {0x13c38, 0x8, 0x0, 0x0, 0x8}, {0x13c50, 0x18, 0x0, 0x0, 0x18}, };
@@@ -5661,6 -5737,14 +5737,14 @@@ enum eth_filter_type MAX_ETH_FILTER_TYPE };
+ /* inner to inner vlan priority translation configurations */ + struct eth_in_to_in_pri_map_cfg { + u8 inner_vlan_pri_remap_en; + u8 reserved[7]; + u8 non_rdma_in_to_in_pri_map[8]; + u8 rdma_in_to_in_pri_map[8]; + }; + /* Eth IPv4 Fragment Type */ enum eth_ipv4_frag_type { ETH_IPV4_NOT_FRAG, @@@ -6018,6 -6102,14 +6102,14 @@@ struct tx_queue_update_ramrod_data struct regpair reserved1[5]; };
+ /* Inner to Inner VLAN priority map update mode */ + enum update_in_to_in_pri_map_mode_enum { + ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED, + ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL, + ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL, + MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM + }; + /* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { struct eth_filter_cmd_header filter_cmd_hdr; @@@ -6048,7 -6140,8 +6140,8 @@@ struct vport_start_ramrod_data u8 zero_placement_offset; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[1]; + u8 wipe_inner_vlan_pri_en; + struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; };
/* Ramrod data for vport stop ramrod */ @@@ -6100,7 -6193,9 +6193,9 @@@ struct vport_update_ramrod_data_cmn u8 update_ctl_frame_checks_en_flg; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[15]; + u8 update_in_to_in_pri_map_mode; + u8 in_to_in_pri_map[8]; + u8 reserved[6]; };
struct vport_update_ramrod_mcast { @@@ -6929,11 -7024,6 +7024,6 @@@ struct mstorm_rdma_task_st_ctx struct regpair temp[4]; };
- /* The roce task context of Ustorm */ - struct ustorm_rdma_task_st_ctx { - struct regpair temp[2]; - }; - struct e4_ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; @@@ -7007,8 -7097,6 +7097,6 @@@ struct e4_rdma_task_context struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; };
@@@ -7388,7 -7476,7 +7476,7 @@@ struct e4_ustorm_rdma_conn_ag_ctx #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; - u8 byte3; + u8 nvmf_only; __le16 conn_dpi; __le16 word1; __le32 cq_cons; @@@ -7831,7 -7919,12 +7919,12 @@@ struct roce_create_qp_req_ramrod_data struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; u8 stats_counter_id; - u8 reserved3[7]; + u8 reserved3[6]; + u8 flags2; + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1 + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F + #define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1 __le16 regular_latency_phy_queue; __le16 dpi; }; @@@ -7954,6 -8047,7 +8047,7 @@@ enum roce_event_opcode ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, + ROCE_EVENT_FUNC_UPDATE, MAX_ROCE_EVENT_OPCODE };
@@@ -7962,7 -8056,13 +8056,13 @@@ struct roce_init_func_params u8 ll2_queue_id; u8 cnp_vlan_priority; u8 cnp_dscp; - u8 reserved; + u8 flags; + #define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 + #define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 + #define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 + #define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 + #define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F + #define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2 __le32 cnp_send_timeout; __le16 rl_offset; u8 rl_count_log; @@@ -8109,9 -8209,24 +8209,24 @@@ enum roce_ramrod_cmd_id ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, + ROCE_RAMROD_FUNC_UPDATE, MAX_ROCE_RAMROD_CMD_ID };
+ /* RoCE func init ramrod data */ + struct roce_update_func_params { + u8 cnp_vlan_priority; + u8 cnp_dscp; + __le16 flags; + #define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 + #define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 + #define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 + #define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 + #define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF + #define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2 + __le32 cnp_send_timeout; + }; + struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; @@@ -12414,7 -12529,6 +12529,7 @@@ struct public_drv_mb #define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 #define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 +#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 @@@ -12542,9 -12656,6 +12657,9 @@@ #define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + #define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 diff --combined drivers/net/ethernet/renesas/ravb.h index 9b6bf557a2f5,b2b18036380e..1c6e4df94f01 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@@ -428,7 -428,6 +428,7 @@@ enum EIS_BIT EIS_CULF1 = 0x00000080, EIS_TFFF = 0x00000100, EIS_QFS = 0x00010000, + EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)), };
/* RIC0 */ @@@ -473,7 -472,6 +473,7 @@@ enum RIS0_BIT RIS0_FRF15 = 0x00008000, RIS0_FRF16 = 0x00010000, RIS0_FRF17 = 0x00020000, + RIS0_RESERVED = GENMASK(31, 18), };
/* RIC1 */ @@@ -530,7 -528,6 +530,7 @@@ enum RIS2_BIT RIS2_QFF16 = 0x00010000, RIS2_QFF17 = 0x00020000, RIS2_RFFF = 0x80000000, + RIS2_RESERVED = GENMASK(30, 18), };
/* TIC */ @@@ -547,7 -544,6 +547,7 @@@ enum TIS_BIT TIS_FTF1 = 0x00000002, /* Undocumented? */ TIS_TFUF = 0x00000100, TIS_TFWF = 0x00000200, + TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4)) };
/* ISS */ @@@ -621,7 -617,6 +621,7 @@@ enum GIC_BIT enum GIS_BIT { GIS_PTCF = 0x00000001, /* Undocumented? */ GIS_PTMF = 0x00000004, + GIS_RESERVED = GENMASK(15, 10), };
/* GIE (R-Car Gen3 only) */ @@@ -959,7 -954,10 +959,10 @@@ enum RAVB_QUEUE #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 #define NUM_TX_QUEUE 2 - #define NUM_TX_DESC 2 /* TX descriptors per packet */ + + /* TX descriptors per packet */ + #define NUM_TX_DESC_GEN2 2 + #define NUM_TX_DESC_GEN3 1
struct ravb_tstamp_skb { struct list_head list; @@@ -1038,6 -1036,7 +1041,7 @@@ struct ravb_private unsigned no_avb_link:1; unsigned avb_link_active_low:1; unsigned wol_enabled:1; + int num_tx_desc; /* TX descriptors per packet */ };
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --combined drivers/net/ethernet/renesas/ravb_main.c index d6f753925352,f7c92d48d0dd..eda6ceee2535 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@@ -182,6 -182,7 +182,7 @@@ static int ravb_tx_free(struct net_devi { struct ravb_private *priv = netdev_priv(ndev); struct net_device_stats *stats = &priv->stats[q]; + int num_tx_desc = priv->num_tx_desc; struct ravb_tx_desc *desc; int free_num = 0; int entry; @@@ -191,7 -192,7 +192,7 @@@ bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * - NUM_TX_DESC); + num_tx_desc); desc = &priv->tx_ring[q][entry]; txed = desc->die_dt == DT_FEMPTY; if (free_txed_only && !txed) @@@ -200,12 -201,12 +201,12 @@@ dma_rmb(); size = le16_to_cpu(desc->ds_tagl) & TX_DS; /* Free the original skb. */ - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + if (priv->tx_skb[q][entry / num_tx_desc]) { dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), size, DMA_TO_DEVICE); /* Last packet descriptor? */ - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { - entry /= NUM_TX_DESC; + if (entry % num_tx_desc == num_tx_desc - 1) { + entry /= num_tx_desc; dev_kfree_skb_any(priv->tx_skb[q][entry]); priv->tx_skb[q][entry] = NULL; if (txed) @@@ -224,6 -225,7 +225,7 @@@ static void ravb_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; int ring_size; int i;
@@@ -249,7 -251,7 +251,7 @@@ ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; @@@ -278,12 -280,13 +280,13 @@@ static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct ravb_ex_rx_desc *rx_desc; struct ravb_tx_desc *tx_desc; struct ravb_desc *desc; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * - NUM_TX_DESC; + num_tx_desc; dma_addr_t dma_addr; int i;
@@@ -318,8 -321,10 +321,10 @@@ for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; i++, tx_desc++) { tx_desc->die_dt = DT_EEMPTY; - tx_desc++; - tx_desc->die_dt = DT_EEMPTY; + if (num_tx_desc > 1) { + tx_desc++; + tx_desc->die_dt = DT_EEMPTY; + } } tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); tx_desc->die_dt = DT_LINKFIX; /* type */ @@@ -339,6 -344,7 +344,7 @@@ static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; struct sk_buff *skb; int ring_size; int i; @@@ -362,11 -368,13 +368,13 @@@ priv->rx_skb[q][i] = skb; }
- /* Allocate rings for the aligned buffers */ - priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + - DPTR_ALIGN - 1, GFP_KERNEL); - if (!priv->tx_align[q]) - goto error; + if (num_tx_desc > 1) { + /* Allocate rings for the aligned buffers */ + priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + + DPTR_ALIGN - 1, GFP_KERNEL); + if (!priv->tx_align[q]) + goto error; + }
/* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); @@@ -380,7 -388,7 +388,7 @@@
/* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * - (priv->num_tx_ring[q] * NUM_TX_DESC + 1); + (priv->num_tx_ring[q] * num_tx_desc + 1); priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, &priv->tx_desc_dma[q], GFP_KERNEL); @@@ -739,11 -747,10 +747,11 @@@ static void ravb_error_interrupt(struc u32 eis, ris2;
eis = ravb_read(ndev, EIS); - ravb_write(ndev, ~EIS_QFS, EIS); + ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + RIS2);
/* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) @@@ -796,7 -803,7 +804,7 @@@ static bool ravb_timestamp_interrupt(st u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) { - ravb_write(ndev, ~TIS_TFUF, TIS); + ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); ravb_get_tx_tstamp(ndev); return true; } @@@ -931,7 -938,7 +939,7 @@@ static int ravb_poll(struct napi_struc /* Processing RX Descriptor Ring */ if (ris0 & mask) { /* Clear RX interrupt */ - ravb_write(ndev, ~mask, RIS0); + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); if (ravb_rx(ndev, "a, q)) goto out; } @@@ -939,7 -946,7 +947,7 @@@ if (tis & mask) { spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ - ravb_write(ndev, ~mask, TIS); + ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); mmiowb(); @@@ -1075,7 -1082,8 +1083,8 @@@ static int ravb_phy_init(struct net_dev }
/* 10BASE is not supported */ - phydev->supported &= ~PHY_10BT_FEATURES; + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
phy_attached_info(phydev);
@@@ -1485,6 -1493,7 +1494,7 @@@ static void ravb_tx_timeout_work(struc static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; @@@ -1496,7 -1505,7 +1506,7 @@@
spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * - NUM_TX_DESC) { + num_tx_desc) { netif_err(priv, tx_queued, ndev, "still transmitting with the full ring!\n"); netif_stop_subqueue(ndev, q); @@@ -1507,41 -1516,55 +1517,55 @@@ if (skb_put_padto(skb, ETH_ZLEN)) goto exit;
- entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC); - priv->tx_skb[q][entry / NUM_TX_DESC] = skb; - - buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + - entry / NUM_TX_DESC * DPTR_ALIGN; - len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; - /* Zero length DMA descriptors are problematic as they seem to - * terminate DMA transfers. Avoid them by simply using a length of - * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN. - * - * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of - * data by the call to skb_put_padto() above this is safe with - * respect to both the length of the first DMA descriptor (len) - * overflowing the available data and the length of the second DMA - * descriptor (skb->len - len) being negative. - */ - if (len == 0) - len = DPTR_ALIGN; + entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); + priv->tx_skb[q][entry / num_tx_desc] = skb; + + if (num_tx_desc > 1) { + buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + + entry / num_tx_desc * DPTR_ALIGN; + len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; + + /* Zero length DMA descriptors are problematic as they seem + * to terminate DMA transfers. Avoid them by simply using a + * length of DPTR_ALIGN (4) when skb data is aligned to + * DPTR_ALIGN. + * + * As skb is guaranteed to have at least ETH_ZLEN (60) + * bytes of data by the call to skb_put_padto() above this + * is safe with respect to both the length of the first DMA + * descriptor (len) overflowing the available data and the + * length of the second DMA descriptor (skb->len - len) + * being negative. + */ + if (len == 0) + len = DPTR_ALIGN;
- memcpy(buffer, skb->data, len); - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto drop; + memcpy(buffer, skb->data, len); + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop;
- desc = &priv->tx_ring[q][entry]; - desc->ds_tagl = cpu_to_le16(len); - desc->dptr = cpu_to_le32(dma_addr); + desc = &priv->tx_ring[q][entry]; + desc->ds_tagl = cpu_to_le16(len); + desc->dptr = cpu_to_le32(dma_addr);
- buffer = skb->data + len; - len = skb->len - len; - dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - goto unmap; + buffer = skb->data + len; + len = skb->len - len; + dma_addr = dma_map_single(ndev->dev.parent, buffer, len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto unmap;
- desc++; + desc++; + } else { + desc = &priv->tx_ring[q][entry]; + len = skb->len; + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + goto drop; + } desc->ds_tagl = cpu_to_le16(len); desc->dptr = cpu_to_le32(dma_addr);
@@@ -1549,9 -1572,11 +1573,11 @@@ if (q == RAVB_NC) { ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC); if (!ts_skb) { - desc--; - dma_unmap_single(ndev->dev.parent, dma_addr, len, - DMA_TO_DEVICE); + if (num_tx_desc > 1) { + desc--; + dma_unmap_single(ndev->dev.parent, dma_addr, + len, DMA_TO_DEVICE); + } goto unmap; } ts_skb->skb = skb; @@@ -1568,15 -1593,18 +1594,18 @@@ skb_tx_timestamp(skb); /* Descriptor type must be set after all the above writes */ dma_wmb(); - desc->die_dt = DT_FEND; - desc--; - desc->die_dt = DT_FSTART; - + if (num_tx_desc > 1) { + desc->die_dt = DT_FEND; + desc--; + desc->die_dt = DT_FSTART; + } else { + desc->die_dt = DT_FSINGLE; + } ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
- priv->cur_tx[q] += NUM_TX_DESC; + priv->cur_tx[q] += num_tx_desc; if (priv->cur_tx[q] - priv->dirty_tx[q] > - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + (priv->num_tx_ring[q] - 1) * num_tx_desc && !ravb_tx_free(ndev, q, true)) netif_stop_subqueue(ndev, q);
@@@ -1590,7 -1618,7 +1619,7 @@@ unmap le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); drop: dev_kfree_skb_any(skb); - priv->tx_skb[q][entry / NUM_TX_DESC] = NULL; + priv->tx_skb[q][entry / num_tx_desc] = NULL; goto exit; }
@@@ -2076,6 -2104,9 +2105,9 @@@ static int ravb_probe(struct platform_d ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU;
+ priv->num_tx_desc = chip_id == RCAR_GEN2 ? + NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 75896d6ba6e2,3715a0a4af3c..076a8be18d67 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -148,14 -148,12 +148,14 @@@ static void stmmac_verify_args(void static void stmmac_disable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- napi_disable(&rx_q->napi); + napi_disable(&ch->napi); } }
@@@ -166,14 -164,12 +166,14 @@@ static void stmmac_enable_all_queues(struct stmmac_priv *priv) { u32 rx_queues_cnt = priv->plat->rx_queues_to_use; + u32 tx_queues_cnt = priv->plat->tx_queues_to_use; + u32 maxq = max(rx_queues_cnt, tx_queues_cnt); u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- napi_enable(&rx_q->napi); + napi_enable(&ch->napi); } }
@@@ -991,17 -987,20 +991,20 @@@ static int stmmac_init_phy(struct net_d if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII) || (max_speed < 1000 && max_speed > 0)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100);
/* * Half-duplex mode not supported with multiqueue * half-duplex can only works with single queue */ - if (tx_cnt > 1) - phydev->supported &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); + if (tx_cnt > 1) { + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + }
/* * Broken HW is sometimes missing the pull-up resistor on the @@@ -1847,18 -1846,18 +1850,18 @@@ static void stmmac_dma_operation_mode(s * @queue: TX queue index * Description: it reclaims the transmit resources after transmission completes. */ -static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) +static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) { struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; unsigned int bytes_compl = 0, pkts_compl = 0; - unsigned int entry; + unsigned int entry, count = 0;
- netif_tx_lock(priv->dev); + __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
priv->xstats.tx_clean++;
entry = tx_q->dirty_tx; - while (entry != tx_q->cur_tx) { + while ((entry != tx_q->cur_tx) && (count < budget)) { struct sk_buff *skb = tx_q->tx_skbuff[entry]; struct dma_desc *p; int status; @@@ -1874,8 -1873,6 +1877,8 @@@ if (unlikely(status & tx_dma_own)) break;
+ count++; + /* Make sure descriptor fields are read after reading * the own bit. */ @@@ -1943,10 -1940,7 +1946,10 @@@ stmmac_enable_eee_mode(priv); mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); } - netif_tx_unlock(priv->dev); + + __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); + + return count; }
/** @@@ -2029,33 -2023,6 +2032,33 @@@ static bool stmmac_safety_feat_interrup return false; }
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan) +{ + int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, + &priv->xstats, chan); + struct stmmac_channel *ch = &priv->channel[chan]; + bool needs_work = false; + + if ((status & handle_rx) && ch->has_rx) { + needs_work = true; + } else { + status &= ~handle_rx; + } + + if ((status & handle_tx) && ch->has_tx) { + needs_work = true; + } else { + status &= ~handle_tx; + } + + if (needs_work && napi_schedule_prep(&ch->napi)) { + stmmac_disable_dma_irq(priv, priv->ioaddr, chan); + __napi_schedule(&ch->napi); + } + + return status; +} + /** * stmmac_dma_interrupt - DMA ISR * @priv: driver private structure @@@ -2070,14 -2037,57 +2073,14 @@@ static void stmmac_dma_interrupt(struc u32 channels_to_check = tx_channel_count > rx_channel_count ? tx_channel_count : rx_channel_count; u32 chan; - bool poll_scheduled = false; int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) channels_to_check = ARRAY_SIZE(status);
- /* Each DMA channel can be used for rx and tx simultaneously, yet - * napi_struct is embedded in struct stmmac_rx_queue rather than in a - * stmmac_channel struct. - * Because of this, stmmac_poll currently checks (and possibly wakes) - * all tx queues rather than just a single tx queue. - */ for (chan = 0; chan < channels_to_check; chan++) - status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, - &priv->xstats, chan); - - for (chan = 0; chan < rx_channel_count; chan++) { - if (likely(status[chan] & handle_rx)) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; - - if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, priv->ioaddr, chan); - __napi_schedule(&rx_q->napi); - poll_scheduled = true; - } - } - } - - /* If we scheduled poll, we already know that tx queues will be checked. - * If we didn't schedule poll, see if any DMA channel (used by tx) has a - * completed transmission, if so, call stmmac_poll (once). - */ - if (!poll_scheduled) { - for (chan = 0; chan < tx_channel_count; chan++) { - if (status[chan] & handle_tx) { - /* It doesn't matter what rx queue we choose - * here. We use 0 since it always exists. - */ - struct stmmac_rx_queue *rx_q = - &priv->rx_queue[0]; - - if (likely(napi_schedule_prep(&rx_q->napi))) { - stmmac_disable_dma_irq(priv, - priv->ioaddr, chan); - __napi_schedule(&rx_q->napi); - } - break; - } - } - } + status[chan] = stmmac_napi_check(priv, chan);
for (chan = 0; chan < tx_channel_count; chan++) { if (unlikely(status[chan] & tx_hard_error_bump_tc)) { @@@ -2213,7 -2223,8 +2216,7 @@@ static int stmmac_init_dma_engine(struc stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, tx_q->dma_tx_phy, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy + - (DMA_TX_SIZE * sizeof(struct dma_desc)); + tx_q->tx_tail_addr = tx_q->dma_tx_phy; stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, chan); } @@@ -2225,13 -2236,6 +2228,13 @@@ return ret; }
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) +{ + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; + + mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); +} + /** * stmmac_tx_timer - mitigation sw timer for tx. * @data: data pointer @@@ -2240,14 -2244,13 +2243,14 @@@ */ static void stmmac_tx_timer(struct timer_list *t) { - struct stmmac_priv *priv = from_timer(priv, t, txtimer); - u32 tx_queues_count = priv->plat->tx_queues_to_use; - u32 queue; + struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer); + struct stmmac_priv *priv = tx_q->priv_data; + struct stmmac_channel *ch; + + ch = &priv->channel[tx_q->queue_index];
- /* let's scan all the tx queues */ - for (queue = 0; queue < tx_queues_count; queue++) - stmmac_tx_clean(priv, queue); + if (likely(napi_schedule_prep(&ch->napi))) + __napi_schedule(&ch->napi); }
/** @@@ -2260,17 -2263,11 +2263,17 @@@ */ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) { + u32 tx_channel_count = priv->plat->tx_queues_to_use; + u32 chan; + priv->tx_coal_frames = STMMAC_TX_FRAMES; priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - timer_setup(&priv->txtimer, stmmac_tx_timer, 0); - priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); - add_timer(&priv->txtimer); + + for (chan = 0; chan < tx_channel_count; chan++) { + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; + + timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); + } }
static void stmmac_set_rings_length(struct stmmac_priv *priv) @@@ -2598,7 -2595,6 +2601,7 @@@ static void stmmac_hw_teardown(struct n static int stmmac_open(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 chan; int ret;
stmmac_check_ether_addr(priv); @@@ -2695,9 -2691,7 +2698,9 @@@ irq_error if (dev->phydev) phy_stop(dev->phydev);
- del_timer_sync(&priv->txtimer); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer); + stmmac_hw_teardown(dev); init_error: free_dma_desc_resources(priv); @@@ -2717,7 -2711,6 +2720,7 @@@ dma_desc_error static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); + u32 chan;
if (priv->eee_enabled) del_timer_sync(&priv->eee_ctrl_timer); @@@ -2732,8 -2725,7 +2735,8 @@@
stmmac_disable_all_queues(priv);
- del_timer_sync(&priv->txtimer); + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) + del_timer_sync(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */ free_irq(dev->irq, dev); @@@ -2947,13 -2939,14 +2950,13 @@@ static netdev_tx_t stmmac_tso_xmit(stru priv->xstats.tx_tso_nfrags += nfrags;
/* Manage tx mitigation */ - priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else { - priv->tx_count_frames = 0; + tx_q->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames <= tx_q->tx_count_frames) { stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + tx_q->tx_count_frames = 0; + } else { + stmmac_tx_timer_arm(priv, queue); }
skb_tx_timestamp(skb); @@@ -3002,7 -2995,6 +3005,7 @@@
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK; @@@ -3157,13 -3149,14 +3160,13 @@@ static netdev_tx_t stmmac_xmit(struct s * This approach takes care about the fragments: desc is the first * element in case of no SG. */ - priv->tx_count_frames += nfrags + 1; - if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else { - priv->tx_count_frames = 0; + tx_q->tx_count_frames += nfrags + 1; + if (priv->tx_coal_frames <= tx_q->tx_count_frames) { stmmac_set_tx_ic(priv, desc); priv->xstats.tx_set_ic_bit++; + tx_q->tx_count_frames = 0; + } else { + stmmac_tx_timer_arm(priv, queue); }
skb_tx_timestamp(skb); @@@ -3209,8 -3202,6 +3212,8 @@@ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr); + + tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc)); stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK; @@@ -3331,7 -3322,6 +3334,7 @@@ static inline void stmmac_rx_refill(str static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) { struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + struct stmmac_channel *ch = &priv->channel[queue]; unsigned int entry = rx_q->cur_rx; int coe = priv->hw->rx_csum; unsigned int next_entry; @@@ -3504,7 -3494,7 +3507,7 @@@ else skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&rx_q->napi, skb); + napi_gro_receive(&ch->napi, skb);
priv->dev->stats.rx_packets++; priv->dev->stats.rx_bytes += frame_len; @@@ -3527,33 -3517,27 +3530,33 @@@ * Description : * To look at the incoming frames and clear the tx resources. */ -static int stmmac_poll(struct napi_struct *napi, int budget) +static int stmmac_napi_poll(struct napi_struct *napi, int budget) { - struct stmmac_rx_queue *rx_q = - container_of(napi, struct stmmac_rx_queue, napi); - struct stmmac_priv *priv = rx_q->priv_data; - u32 tx_count = priv->plat->tx_queues_to_use; - u32 chan = rx_q->queue_index; - int work_done = 0; - u32 queue; + struct stmmac_channel *ch = + container_of(napi, struct stmmac_channel, napi); + struct stmmac_priv *priv = ch->priv_data; + int work_done = 0, work_rem = budget; + u32 chan = ch->index;
priv->xstats.napi_poll++;
- /* check all the queues */ - for (queue = 0; queue < tx_count; queue++) - stmmac_tx_clean(priv, queue); + if (ch->has_tx) { + int done = stmmac_tx_clean(priv, work_rem, chan);
- work_done = stmmac_rx(priv, budget, rx_q->queue_index); - if (work_done < budget) { - napi_complete_done(napi, work_done); - stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + work_done += done; + work_rem -= done; + } + + if (ch->has_rx) { + int done = stmmac_rx(priv, work_rem, chan); + + work_done += done; + work_rem -= done; } + + if (work_done < budget && napi_complete_done(napi, work_done)) + stmmac_enable_dma_irq(priv, priv->ioaddr, chan); + return work_done; }
@@@ -4217,8 -4201,8 +4220,8 @@@ int stmmac_dvr_probe(struct device *dev { struct net_device *ndev = NULL; struct stmmac_priv *priv; + u32 queue, maxq; int ret = 0; - u32 queue;
ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), MTL_MAX_TX_QUEUES, @@@ -4341,22 -4325,11 +4344,22 @@@ "Enable RX Mitigation via HW Watchdog Timer\n"); }
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + /* Setup channels NAPI */ + maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
- netif_napi_add(ndev, &rx_q->napi, stmmac_poll, - (8 * priv->plat->rx_queues_to_use)); + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue]; + + ch->priv_data = priv; + ch->index = queue; + + if (queue < priv->plat->rx_queues_to_use) + ch->has_rx = true; + if (queue < priv->plat->tx_queues_to_use) + ch->has_tx = true; + + netif_napi_add(ndev, &ch->napi, stmmac_napi_poll, + NAPI_POLL_WEIGHT); }
mutex_init(&priv->lock); @@@ -4402,10 -4375,10 +4405,10 @@@ error_netdev_register priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); error_mdio_register: - for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { - struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; + for (queue = 0; queue < maxq; queue++) { + struct stmmac_channel *ch = &priv->channel[queue];
- netif_napi_del(&rx_q->napi); + netif_napi_del(&ch->napi); } error_hw_init: destroy_workqueue(priv->wq); diff --combined include/linux/mlx5/driver.h index 4321962d9b7c,ed73b51f6697..ad252b0a7acc --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@@ -163,7 -163,10 +163,7 @@@ enum mlx5_dcbx_oper_mode };
enum mlx5_dct_atomic_mode { - MLX5_ATOMIC_MODE_DCT_OFF = 20, - MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, - MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, - MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, + MLX5_ATOMIC_MODE_DCT_CX = 2, };
enum { @@@ -580,10 -583,11 +580,11 @@@ struct mlx5_irq_info };
struct mlx5_fc_stats { - struct rb_root counters; - struct list_head addlist; - /* protect addlist add/splice operations */ - spinlock_t addlist_lock; + spinlock_t counters_idr_lock; /* protects counters_idr */ + struct idr counters_idr; + struct list_head counters; + struct llist_head addlist; + struct llist_head dellist;
struct workqueue_struct *wq; struct delayed_work work; @@@ -801,7 -805,7 +802,7 @@@ struct mlx5_pps };
struct mlx5_clock { - rwlock_t lock; + seqlock_t lock; struct cyclecounter cycles; struct timecounter tc; struct hwtstamp_config hwtstamp_config; diff --combined net/batman-adv/soft-interface.c index 626ddca332db,2c7d95727f90..5db5a0a4c959 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@@ -574,20 -574,15 +574,20 @@@ int batadv_softif_create_vlan(struct ba struct batadv_softif_vlan *vlan; int err;
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock); + vlan = batadv_softif_vlan_get(bat_priv, vid); if (vlan) { batadv_softif_vlan_put(vlan); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -EEXIST; }
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); - if (!vlan) + if (!vlan) { + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); return -ENOMEM; + }
vlan->bat_priv = bat_priv; vlan->vid = vid; @@@ -595,23 -590,17 +595,23 @@@
atomic_set(&vlan->ap_isolation, 0);
+ kref_get(&vlan->refcount); + hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); + spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + + /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the + * sleeping behavior of the sysfs functions and the fs_reclaim lock + */ err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan); if (err) { - kfree(vlan); + /* ref for the function */ + batadv_softif_vlan_put(vlan); + + /* ref for the list */ + batadv_softif_vlan_put(vlan); return err; }
- spin_lock_bh(&bat_priv->softif_vlan_list_lock); - kref_get(&vlan->refcount); - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); - /* add a new TT local entry. This one will be marked with the NOPURGE * flag */ @@@ -844,7 -833,6 +844,6 @@@ static int batadv_softif_init_late(stru atomic_set(&bat_priv->frag_seqno, random_seqno);
bat_priv->primary_if = NULL; - bat_priv->num_ifaces = 0;
batadv_nc_init_bat_priv(bat_priv);
@@@ -1062,6 -1050,7 +1061,7 @@@ static void batadv_softif_init_early(st dev->needs_free_netdev = true; dev->priv_destructor = batadv_softif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; + dev->features |= NETIF_F_LLTX; dev->priv_flags |= IFF_NO_QUEUE;
/* can't call min_mtu, because the needed variables diff --combined net/ipv6/route.c index 826b14de7dbb,938db8ae2316..d28f83e01593 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -364,14 -364,11 +364,14 @@@ EXPORT_SYMBOL(ip6_dst_alloc)
static void ip6_dst_destroy(struct dst_entry *dst) { + struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); struct rt6_info *rt = (struct rt6_info *)dst; struct fib6_info *from; struct inet6_dev *idev;
- dst_destroy_metrics_generic(dst); + if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) + kfree(p); + rt6_uncached_list_del(rt);
idev = rt->rt6i_idev; @@@ -979,10 -976,6 +979,10 @@@ static void rt6_set_from(struct rt6_inf rt->rt6i_flags &= ~RTF_EXPIRES; rcu_assign_pointer(rt->from, from); dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true); + if (from->fib6_metrics != &dst_default_metrics) { + rt->dst._metrics |= DST_METRICS_REFCOUNTED; + refcount_inc(&from->fib6_metrics->refcnt); + } }
/* Caller must already hold reference to @ort */ @@@ -1000,7 -993,6 +1000,6 @@@ static void ip6_rt_copy_init(struct rt6 #ifdef CONFIG_IPV6_SUBTREES rt->rt6i_src = ort->fib6_src; #endif - rt->rt6i_prefsrc = ort->fib6_prefsrc; }
static struct fib6_node* fib6_backtrack(struct fib6_node *fn, @@@ -1454,11 -1446,6 +1453,6 @@@ static int rt6_insert_exception(struct if (ort->fib6_src.plen) src_key = &nrt->rt6i_src.addr; #endif - - /* Update rt6i_prefsrc as it could be changed - * in rt6_remove_prefsrc() - */ - nrt->rt6i_prefsrc = ort->fib6_prefsrc; /* rt6_mtu_change() might lower mtu on ort. * Only insert this exception route if its mtu * is less than ort's mtu value. @@@ -1640,25 -1627,6 +1634,6 @@@ static void rt6_update_exception_stamp_ rcu_read_unlock(); }
- static void rt6_exceptions_remove_prefsrc(struct fib6_info *rt) - { - struct rt6_exception_bucket *bucket; - struct rt6_exception *rt6_ex; - int i; - - bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, - lockdep_is_held(&rt6_exception_lock)); - - if (bucket) { - for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { - hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { - rt6_ex->rt6i->rt6i_prefsrc.plen = 0; - } - bucket++; - } - } - } - static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev, struct rt6_info *rt, int mtu) { @@@ -2103,7 -2071,8 +2078,8 @@@ struct dst_entry *ip6_route_output_flag { bool any_src;
- if (rt6_need_strict(&fl6->daddr)) { + if (ipv6_addr_type(&fl6->daddr) & + (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) { struct dst_entry *dst;
dst = l3mdev_link_scope_lookup(net, fl6); @@@ -3142,8 -3111,6 +3118,6 @@@ install_route rt->fib6_nh.nh_dev = dev; rt->fib6_table = table;
- cfg->fc_nlinfo.nl_net = dev_net(dev); - if (idev) in6_dev_put(idev);
@@@ -3800,8 -3767,6 +3774,6 @@@ static int fib6_remove_prefsrc(struct f spin_lock_bh(&rt6_exception_lock); /* remove prefsrc entry */ rt->fib6_prefsrc.plen = 0; - /* need to update cache as well */ - rt6_exceptions_remove_prefsrc(rt); spin_unlock_bh(&rt6_exception_lock); } return 0; diff --combined net/mac80211/tx.c index 25ba24bef8f5,c42bfa1dcd2c..e0ccee23fbcd --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@@ -214,7 -214,6 +214,7 @@@ ieee80211_tx_h_dynamic_ps(struct ieee80 { struct ieee80211_local *local = tx->local; struct ieee80211_if_managed *ifmgd; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
/* driver doesn't support power save */ if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS)) @@@ -243,9 -242,6 +243,9 @@@ if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) return TX_CONTINUE;
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK)) + return TX_CONTINUE; + ifmgd = &tx->sdata->u.mgd;
/* @@@ -1253,10 -1249,18 +1253,18 @@@ static struct txq_info *ieee80211_get_t (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) return NULL;
- if (!ieee80211_is_data_present(hdr->frame_control)) - return NULL; - - if (sta) { + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) { + if ((!ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_bufferable_mmpdu(hdr->frame_control) || + vif->type == NL80211_IFTYPE_STATION) && + sta && sta->uploaded) { + /* + * This will be NULL if the driver didn't set the + * opt-in hardware flag. + */ + txq = sta->sta.txq[IEEE80211_NUM_TIDS]; + } + } else if (sta) { u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (!sta->uploaded) @@@ -1444,16 -1448,33 +1452,33 @@@ void ieee80211_txq_init(struct ieee8021
txqi->txq.vif = &sdata->vif;
- if (sta) { - txqi->txq.sta = &sta->sta; - sta->sta.txq[tid] = &txqi->txq; - txqi->txq.tid = tid; - txqi->txq.ac = ieee80211_ac_from_tid(tid); - } else { + if (!sta) { sdata->vif.txq = &txqi->txq; txqi->txq.tid = 0; txqi->txq.ac = IEEE80211_AC_BE; + + return; } + + if (tid == IEEE80211_NUM_TIDS) { + if (sdata->vif.type == NL80211_IFTYPE_STATION) { + /* Drivers need to opt in to the management MPDU TXQ */ + if (!ieee80211_hw_check(&sdata->local->hw, + STA_MMPDU_TXQ)) + return; + } else if (!ieee80211_hw_check(&sdata->local->hw, + BUFF_MMPDU_TXQ)) { + /* Drivers need to opt in to the bufferable MMPDU TXQ */ + return; + } + txqi->txq.ac = IEEE80211_AC_VO; + } else { + txqi->txq.ac = ieee80211_ac_from_tid(tid); + } + + txqi->txq.sta = &sta->sta; + txqi->txq.tid = tid; + sta->sta.txq[tid] = &txqi->txq; }
void ieee80211_txq_purge(struct ieee80211_local *local, @@@ -1894,7 -1915,7 +1919,7 @@@ static bool ieee80211_tx(struct ieee802 sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
if (invoke_tx_handlers_early(&tx)) - return false; + return true;
if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb)) return true; @@@ -2955,6 -2976,10 +2980,10 @@@ void ieee80211_check_fast_xmit(struct s if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) goto out;
+ /* Key is being removed */ + if (build.key->flags & KEY_FLAG_TAINTED) + goto out; + switch (build.key->conf.cipher) { case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: @@@ -3200,6 -3225,10 +3229,10 @@@ static bool ieee80211_amsdu_aggregate(s max_amsdu_len = min_t(int, max_amsdu_len, sta->sta.max_rc_amsdu_len);
+ if (sta->sta.max_tid_amsdu_len[tid]) + max_amsdu_len = min_t(int, max_amsdu_len, + sta->sta.max_tid_amsdu_len[tid]); + spin_lock_bh(&fq->lock);
/* TODO: Ideally aggregation should be done on dequeue to remain @@@ -3232,6 -3261,9 +3265,9 @@@ if (max_frags && nfrags > max_frags) goto out;
+ if (!drv_can_aggregate_in_amsdu(local, head, skb)) + goto out; + if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head)) goto out;
@@@ -3476,13 -3508,19 +3512,19 @@@ struct sk_buff *ieee80211_tx_dequeue(st struct ieee80211_tx_info *info; struct ieee80211_tx_data tx; ieee80211_tx_result r; - struct ieee80211_vif *vif; + struct ieee80211_vif *vif = txq->vif;
spin_lock_bh(&fq->lock);
- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags)) + if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) || + test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags)) goto out;
+ if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) { + set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags); + goto out; + } + /* Make sure fragments stay together. */ skb = __skb_dequeue(&txqi->frags); if (skb) @@@ -3577,6 -3615,7 +3619,7 @@@ begin }
IEEE80211_SKB_CB(skb)->control.vif = vif; + out: spin_unlock_bh(&fq->lock);
@@@ -3605,13 -3644,7 +3648,7 @@@ void __ieee80211_subif_start_xmit(struc if (!IS_ERR_OR_NULL(sta)) { struct ieee80211_fast_tx *fast_tx;
- /* We need a bit of data queued to build aggregates properly, so - * instruct the TCP stack to allow more than a single ms of data - * to be queued in the stack. The value is a bit-shift of 1 - * second, so 8 is ~4ms of queued data. Only affects local TCP - * sockets. - */ - sk_pacing_shift_update(skb->sk, 8); + sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
fast_tx = rcu_dereference(sta->fast_tx);
diff --combined net/wireless/reg.c index 765dedb12361,56be68a27bb9..5ad5b9f98e8f --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@@ -847,22 -847,36 +847,36 @@@ static bool valid_regdb(const u8 *data return true; }
- static void set_wmm_rule(struct ieee80211_reg_rule *rrule, - struct fwdb_wmm_rule *wmm) - { - struct ieee80211_wmm_rule *rule = &rrule->wmm_rule; - unsigned int i; + static void set_wmm_rule(const struct fwdb_header *db, + const struct fwdb_country *country, + const struct fwdb_rule *rule, + struct ieee80211_reg_rule *rrule) + { + struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule; + struct fwdb_wmm_rule *wmm; + unsigned int i, wmm_ptr; + + wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; + wmm = (void *)((u8 *)db + wmm_ptr); + + if (!valid_wmm(wmm)) { + pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n", + be32_to_cpu(rule->start), be32_to_cpu(rule->end), + country->alpha2[0], country->alpha2[1]); + return; + }
for (i = 0; i < IEEE80211_NUM_ACS; i++) { - rule->client[i].cw_min = + wmm_rule->client[i].cw_min = ecw2cw((wmm->client[i].ecw & 0xf0) >> 4); - rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); - rule->client[i].aifsn = wmm->client[i].aifsn; - rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot); - rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); - rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); - rule->ap[i].aifsn = wmm->ap[i].aifsn; - rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); + wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); + wmm_rule->client[i].aifsn = wmm->client[i].aifsn; + wmm_rule->client[i].cot = + 1000 * be16_to_cpu(wmm->client[i].cot); + wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); + wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); + wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn; + wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); }
rrule->has_wmm = true; @@@ -870,7 -884,7 +884,7 @@@
static int __regdb_query_wmm(const struct fwdb_header *db, const struct fwdb_country *country, int freq, - struct ieee80211_reg_rule *rule) + struct ieee80211_reg_rule *rrule) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); @@@ -879,18 -893,14 +893,14 @@@ for (i = 0; i < coll->n_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; - struct fwdb_rule *rrule = (void *)((u8 *)db + rule_ptr); - struct fwdb_wmm_rule *wmm; - unsigned int wmm_ptr; + struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr);
- if (rrule->len < offsetofend(struct fwdb_rule, wmm_ptr)) + if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr)) continue;
- if (freq >= KHZ_TO_MHZ(be32_to_cpu(rrule->start)) && - freq <= KHZ_TO_MHZ(be32_to_cpu(rrule->end))) { - wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2; - wmm = (void *)((u8 *)db + wmm_ptr); - set_wmm_rule(rule, wmm); + if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) && + freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) { + set_wmm_rule(db, country, rule, rrule); return 0; } } @@@ -972,12 -982,8 +982,8 @@@ static int regdb_query_country(const st if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout)) rrule->dfs_cac_ms = 1000 * be16_to_cpu(rule->cac_timeout); - if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { - u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; - struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr); - - set_wmm_rule(rrule, wmm); - } + if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) + set_wmm_rule(db, country, rule, rrule); }
return reg_schedule_apply(regdom); @@@ -2867,7 -2873,6 +2873,7 @@@ static int regulatory_hint_core(const c request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; + request->wiphy_idx = WIPHY_IDX_INVALID;
queue_regulatory_request(request);
@@@ -3185,13 -3190,59 +3191,59 @@@ static void restore_regulatory_settings schedule_work(®_work); }
+ static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag) + { + struct cfg80211_registered_device *rdev; + struct wireless_dev *wdev; + + list_for_each_entry(rdev, &cfg80211_rdev_list, list) { + list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { + wdev_lock(wdev); + if (!(wdev->wiphy->regulatory_flags & flag)) { + wdev_unlock(wdev); + return false; + } + wdev_unlock(wdev); + } + } + + return true; + } + void regulatory_hint_disconnect(void) { + /* Restore of regulatory settings is not required when wiphy(s) + * ignore IE from connected access point but clearance of beacon hints + * is required when wiphy(s) supports beacon hints. + */ + if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) { + struct reg_beacon *reg_beacon, *btmp; + + if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS)) + return; + + spin_lock_bh(®_pending_beacons_lock); + list_for_each_entry_safe(reg_beacon, btmp, + ®_pending_beacons, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + spin_unlock_bh(®_pending_beacons_lock); + + list_for_each_entry_safe(reg_beacon, btmp, + ®_beacon_list, list) { + list_del(®_beacon->list); + kfree(reg_beacon); + } + + return; + } + pr_debug("All devices are disconnected, going to restore regulatory settings\n"); restore_regulatory_settings(false); }
- static bool freq_is_chan_12_13_14(u16 freq) + static bool freq_is_chan_12_13_14(u32 freq) { if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) || diff --combined net/xfrm/xfrm_output.c index 261995d37ced,2d42cb0c94b8..4ae87c5ce2e3 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@@ -100,10 -100,6 +100,10 @@@ static int xfrm_output_one(struct sk_bu spin_unlock_bh(&x->lock);
skb_dst_force(skb); + if (!skb_dst(skb)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); + goto error_nolock; + }
if (xfrm_offload(skb)) { x->type_offload->encap(x, skb); @@@ -193,7 -189,7 +193,7 @@@ static int xfrm_output_gso(struct net * struct sk_buff *nskb = segs->next; int err;
- segs->next = NULL; + skb_mark_not_on_list(segs); err = xfrm_output2(net, sk, segs);
if (unlikely(err)) {