The following commit has been merged in the master branch: commit cf145c57ea23578117cb546cabb993f6cf200192 Merge: db4dd038f4da0b35d7ee07efc987760bc6965edf bb709987f1043e23fce907cddedde5d8e495e76b Author: Stephen Rothwell sfr@canb.auug.org.au Date: Thu May 12 11:09:01 2022 +1000
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
diff --combined Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml index 000000000000,f5a531738d93..76199a67d628 mode 000000,100644..100644 --- a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml +++ b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml @@@ -1,0 -1,137 +1,138 @@@ + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + # Copyright (c) 2020, Silicon Laboratories, Inc. + %YAML 1.2 + --- + + $id: http://devicetree.org/schemas/net/wireless/silabs,wfx.yaml# + $schema: http://devicetree.org/meta-schemas/core.yaml# + + title: Silicon Labs WFxxx devicetree bindings + + maintainers: + - J��r��me Pouiller jerome.pouiller@silabs.com + + description: > + Support for the Wifi chip WFxxx from Silicon Labs. Currently, the only device + from the WFxxx series is the WF200 described here: + https://www.silabs.com/documents/public/data-sheets/wf200-datasheet.pdf + + The WF200 can be connected via SPI or via SDIO. + + For SDIO: + + Declaring the WFxxx chip in device tree is mandatory (usually, the VID/PID is + sufficient for the SDIO devices). + + It is recommended to declare a mmc-pwrseq on SDIO host above WFx. Without + it, you may encounter issues during reboot. The mmc-pwrseq should be + compatible with mmc-pwrseq-simple. Please consult + Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml for more + information. + + For SPI: + + In add of the properties below, please consult + Documentation/devicetree/bindings/spi/spi-controller.yaml for optional SPI + related properties. + + properties: + compatible: + items: + - enum: ++ - prt,prtt1c-wfm200 # Protonic PRTT1C Board + - silabs,brd4001a # WGM160P Evaluation Board + - silabs,brd8022a # WF200 Evaluation Board + - silabs,brd8023a # WFM200 Evaluation Board + - const: silabs,wf200 # Chip alone without antenna + + reg: + description: + When used on SDIO bus, <reg> must be set to 1. When used on SPI bus, it is + the chip select address of the device as defined in the SPI devices + bindings. + maxItems: 1 + + spi-max-frequency: true + + interrupts: + description: The interrupt line. Should be IRQ_TYPE_EDGE_RISING. When SPI is + used, this property is required. When SDIO is used, the "in-band" + interrupt provided by the SDIO bus is used unless an interrupt is defined + in the Device Tree. + maxItems: 1 + + reset-gpios: + description: (SPI only) Phandle of gpio that will be used to reset chip + during probe. Without this property, you may encounter issues with warm + boot. + + For SDIO, the reset gpio should declared using a mmc-pwrseq. + maxItems: 1 + + wakeup-gpios: + description: Phandle of gpio that will be used to wake-up chip. Without this + property, driver will disable most of power saving features. + maxItems: 1 + + silabs,antenna-config-file: + $ref: /schemas/types.yaml#/definitions/string + description: Use an alternative file for antenna configuration (aka + "Platform Data Set" in Silabs jargon). Default depends of "compatible" + string. For "silabs,wf200", the default is 'wf200.pds'. + + local-mac-address: true + + mac-address: true + + additionalProperties: false + + required: + - compatible + - reg + + examples: + - | + #include <dt-bindings/gpio/gpio.h> + #include <dt-bindings/interrupt-controller/irq.h> + + spi { + #address-cells = <1>; + #size-cells = <0>; + + wifi@0 { + compatible = "silabs,brd8022a", "silabs,wf200"; + pinctrl-names = "default"; + pinctrl-0 = <&wfx_irq &wfx_gpios>; + reg = <0>; + interrupts-extended = <&gpio 16 IRQ_TYPE_EDGE_RISING>; + wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>; + reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>; + spi-max-frequency = <42000000>; + }; + }; + + - | + #include <dt-bindings/gpio/gpio.h> + #include <dt-bindings/interrupt-controller/irq.h> + + wfx_pwrseq: wfx_pwrseq { + compatible = "mmc-pwrseq-simple"; + pinctrl-names = "default"; + pinctrl-0 = <&wfx_reset>; + reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>; + }; + + mmc { + mmc-pwrseq = <&wfx_pwrseq>; + #address-cells = <1>; + #size-cells = <0>; + + wifi@1 { + compatible = "silabs,brd8022a", "silabs,wf200"; + pinctrl-names = "default"; + pinctrl-0 = <&wfx_wakeup>; + reg = <1>; + wakeup-gpios = <&gpio 12 GPIO_ACTIVE_HIGH>; + }; + }; + ... diff --combined Documentation/devicetree/bindings/vendor-prefixes.yaml index 0206bb14c1df,e12a75e10456..43f50d0ccf95 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@@ -283,6 -283,8 +283,8 @@@ patternProperties description: Shenzen Chuangsiqi Technology Co.,Ltd. "^ctera,.*": description: CTERA Networks Intl. + "^ctu,.*": + description: Czech Technical University in Prague "^cubietech,.*": description: Cubietech, Ltd. "^cui,.*": @@@ -1193,8 -1195,6 +1195,8 @@@ description: StorLink Semiconductors, Inc. "^storm,.*": description: Storm Semiconductor, Inc. + "^storopack,.*": + description: Storopack "^summit,.*": description: Summit microelectronics "^sunchip,.*": diff --combined MAINTAINERS index 22ce332e43f1,de6a26a01166..3fd28a22516f --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -1044,6 -1044,7 +1044,6 @@@ F: arch/arm64/boot/dts/amd/amd-seattle- F: drivers/net/ethernet/amd/xgbe/
AMD SENSOR FUSION HUB DRIVER -M: Nehal Shah nehal-bakulchandra.shah@amd.com M: Basavaraj Natikar basavaraj.natikar@amd.com L: linux-input@vger.kernel.org S: Maintained @@@ -1446,7 -1447,6 +1446,7 @@@ F: drivers/media/i2c/aptina-pll.
AQUACOMPUTER D5 NEXT PUMP SENSOR DRIVER M: Aleksa Savic savicaleksa83@gmail.com +M: Jack Doan me@jackdoan.com L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/aquacomputer_d5next.rst @@@ -1526,7 -1526,10 +1526,7 @@@ F: Documentation/devicetree/bindings/mt F: arch/arm/boot/dts/arm-realview-* F: arch/arm/boot/dts/integrator* F: arch/arm/boot/dts/versatile* -F: arch/arm/mach-integrator/ -F: arch/arm/mach-realview/ F: arch/arm/mach-versatile/ -F: arch/arm/plat-versatile/ F: drivers/bus/arm-integrator-lm.c F: drivers/clk/versatile/ F: drivers/i2c/busses/i2c-versatile.c @@@ -1834,9 -1837,7 +1834,9 @@@ F: Documentation/devicetree/bindings/ar F: Documentation/devicetree/bindings/clock/apple,nco.yaml F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml F: Documentation/devicetree/bindings/interrupt-controller/apple,* +F: Documentation/devicetree/bindings/iommu/apple,sart.yaml F: Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml +F: Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml F: Documentation/devicetree/bindings/pci/apple,pcie.yaml F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml F: Documentation/devicetree/bindings/power/apple* @@@ -1847,14 -1848,12 +1847,14 @@@ F: drivers/i2c/busses/i2c-pasemi-core. F: drivers/i2c/busses/i2c-pasemi-platform.c F: drivers/irqchip/irq-apple-aic.c F: drivers/mailbox/apple-mailbox.c +F: drivers/nvme/host/apple.c F: drivers/pinctrl/pinctrl-apple-gpio.c F: drivers/soc/apple/* F: drivers/watchdog/apple_wdt.c F: include/dt-bindings/interrupt-controller/apple-aic.h F: include/dt-bindings/pinctrl/apple.h F: include/linux/apple-mailbox.h +F: include/linux/soc/apple/*
ARM/ARTPEC MACHINE SUPPORT M: Jesper Nilsson jesper.nilsson@axis.com @@@ -3572,9 -3571,8 +3572,9 @@@ M: Andy Gospodarek <andy@greyhouse.net L: netdev@vger.kernel.org S: Supported W: http://sourceforge.net/projects/bonding/ +F: Documentation/networking/bonding.rst F: drivers/net/bonding/ -F: include/net/bonding.h +F: include/net/bond* F: include/uapi/linux/if_bonding.h
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER @@@ -3744,23 -3742,9 +3744,23 @@@ F: drivers/net/dsa/bcm_sf2 F: include/linux/dsa/brcm.h F: include/linux/platform_data/b53.h
+BROADCOM BCMBCA ARM ARCHITECTURE +M: William Zhang william.zhang@broadcom.com +M: Anand Gore anand.gore@broadcom.com +M: Kursad Oney kursad.oney@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +T: git git://github.com/broadcom/stblinux.git +F: Documentation/devicetree/bindings/arm/bcm/brcm,bcmbca.yaml +F: arch/arm/boot/dts/bcm47622.dtsi +F: arch/arm/boot/dts/bcm947622.dts +N: bcmbca +N: bcm[9]?47622 + BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE M: Nicolas Saenz Julienne nsaenz@kernel.org -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -3770,13 -3754,12 +3770,13 @@@ F: drivers/pci/controller/pcie-brcmstb. F: drivers/staging/vc04_services N: bcm2711 N: bcm283* +N: raspberrypi
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE M: Florian Fainelli f.fainelli@gmail.com M: Ray Jui rjui@broadcom.com M: Scott Branden sbranden@broadcom.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com S: Maintained T: git git://github.com/broadcom/mach-bcm F: arch/arm/mach-bcm/ @@@ -3796,7 -3779,7 +3796,7 @@@ F: arch/mips/include/asm/mach-bcm47xx/
BROADCOM BCM4908 ETHERNET DRIVER M: Rafa�� Mi��ecki rafal@milecki.pl -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml @@@ -3805,7 -3788,7 +3805,7 @@@ F: drivers/net/ethernet/broadcom/unimac
BROADCOM BCM4908 PINMUX DRIVER M: Rafa�� Mi��ecki rafal@milecki.pl -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-gpio@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pinctrl/brcm,bcm4908-pinctrl.yaml @@@ -3815,7 -3798,7 +3815,7 @@@ BROADCOM BCM5301X ARM ARCHITECTUR M: Florian Fainelli f.fainelli@gmail.com M: Hauke Mehrtens hauke@hauke-m.de M: Rafa�� Mi��ecki zajec5@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/bcm470* @@@ -3826,7 -3809,7 +3826,7 @@@ F: arch/arm/mach-bcm/bcm_5301x. BROADCOM BCM53573 ARM ARCHITECTURE M: Florian Fainelli f.fainelli@gmail.com M: Rafa�� Mi��ecki rafal@milecki.pl -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/bcm47189* @@@ -3834,7 -3817,7 +3834,7 @@@ F: arch/arm/boot/dts/bcm53573
BROADCOM BCM63XX ARM ARCHITECTURE M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://github.com/broadcom/stblinux.git @@@ -3848,7 -3831,7 +3848,7 @@@ F: drivers/usb/gadget/udc/bcm63xx_udc.
BROADCOM BCM7XXX ARM ARCHITECTURE M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://github.com/broadcom/stblinux.git @@@ -3866,21 -3849,21 +3866,21 @@@ N: bcm712 BROADCOM BDC DRIVER M: Al Cooper alcooperx@gmail.com L: linux-usb@vger.kernel.org -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com S: Maintained F: Documentation/devicetree/bindings/usb/brcm,bdc.yaml F: drivers/usb/gadget/udc/bdc/
BROADCOM BMIPS CPUFREQ DRIVER M: Markus Mayer mmayer@broadcom.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-pm@vger.kernel.org S: Maintained F: drivers/cpufreq/bmips-cpufreq.c
BROADCOM BMIPS MIPS ARCHITECTURE M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-mips@vger.kernel.org S: Maintained T: git git://github.com/broadcom/stblinux.git @@@ -3948,14 -3931,14 +3948,14 @@@ F: drivers/net/wireless/broadcom/brcm80 BROADCOM BRCMSTB GPIO DRIVER M: Doug Berger opendmb@gmail.com M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com S: Supported F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml F: drivers/gpio/gpio-brcmstb.c
BROADCOM BRCMSTB I2C DRIVER M: Kamal Dasu kdasu.kdev@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-i2c@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml @@@ -3963,7 -3946,7 +3963,7 @@@ F: drivers/i2c/busses/i2c-brcmstb.
BROADCOM BRCMSTB UART DRIVER M: Al Cooper alcooperx@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-serial@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml @@@ -3971,7 -3954,7 +3971,7 @@@ F: drivers/tty/serial/8250/8250_bcm7271
BROADCOM BRCMSTB USB EHCI DRIVER M: Al Cooper alcooperx@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-usb@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml @@@ -3979,7 -3962,7 +3979,7 @@@ F: drivers/usb/host/ehci-brcm.
BROADCOM BRCMSTB USB PIN MAP DRIVER M: Al Cooper alcooperx@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-usb@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml @@@ -3987,14 -3970,14 +3987,14 @@@ F: drivers/usb/misc/brcmstb-usb-pinmap.
BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER M: Al Cooper alcooperx@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-kernel@vger.kernel.org S: Maintained F: drivers/phy/broadcom/phy-brcm-usb*
BROADCOM ETHERNET PHY DRIVERS M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt @@@ -4005,7 -3988,7 +4005,7 @@@ F: include/linux/brcmphy. BROADCOM GENET ETHERNET DRIVER M: Doug Berger opendmb@gmail.com M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml @@@ -4019,7 -4002,7 +4019,7 @@@ F: include/linux/platform_data/mdio-bcm BROADCOM IPROC ARM ARCHITECTURE M: Ray Jui rjui@broadcom.com M: Scott Branden sbranden@broadcom.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://github.com/broadcom/stblinux.git @@@ -4047,7 -4030,7 +4047,7 @@@ N: stingra
BROADCOM IPROC GBIT ETHERNET DRIVER M: Rafa�� Mi��ecki rafal@milecki.pl -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/brcm,amac.yaml @@@ -4056,7 -4039,7 +4056,7 @@@ F: drivers/net/ethernet/broadcom/unimac
BROADCOM KONA GPIO DRIVER M: Ray Jui rjui@broadcom.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com S: Supported F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt F: drivers/gpio/gpio-bcm-kona.c @@@ -4089,7 -4072,7 +4089,7 @@@ F: drivers/firmware/broadcom/ BROADCOM PMB (POWER MANAGEMENT BUS) DRIVER M: Rafa�� Mi��ecki rafal@milecki.pl M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-pm@vger.kernel.org S: Maintained T: git git://github.com/broadcom/stblinux.git @@@ -4105,7 -4088,7 +4105,7 @@@ F: include/linux/bcma
BROADCOM SPI DRIVER M: Kamal Dasu kdasu.kdev@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com S: Maintained F: Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml F: drivers/spi/spi-bcm-qspi.* @@@ -4114,7 -4097,7 +4114,7 @@@ F: drivers/spi/spi-iproc-qspi.
BROADCOM STB AVS CPUFREQ DRIVER M: Markus Mayer mmayer@broadcom.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-pm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt @@@ -4122,7 -4105,7 +4122,7 @@@ F: drivers/cpufreq/brcmstb
BROADCOM STB AVS TMON DRIVER M: Markus Mayer mmayer@broadcom.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-pm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml @@@ -4130,7 -4113,7 +4130,7 @@@ F: drivers/thermal/broadcom/brcmstb
BROADCOM STB DPFE DRIVER M: Markus Mayer mmayer@broadcom.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml @@@ -4139,7 -4122,7 +4139,7 @@@ F: drivers/memory/brcmstb_dpfe. BROADCOM STB NAND FLASH DRIVER M: Brian Norris computersforpeace@gmail.com M: Kamal Dasu kdasu.kdev@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/nand/raw/brcmnand/ @@@ -4149,7 -4132,7 +4149,7 @@@ BROADCOM STB PCIE DRIVE M: Jim Quinlan jim2101024@gmail.com M: Nicolas Saenz Julienne nsaenz@kernel.org M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml @@@ -4157,7 -4140,7 +4157,7 @@@ F: drivers/pci/controller/pcie-brcmstb.
BROADCOM SYSTEMPORT ETHERNET DRIVER M: Florian Fainelli f.fainelli@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/broadcom/bcmsysport.* @@@ -4174,7 -4157,7 +4174,7 @@@ F: drivers/net/ethernet/broadcom/tg3.
BROADCOM VK DRIVER M: Scott Branden scott.branden@broadcom.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com S: Supported F: drivers/misc/bcm-vk/ F: include/uapi/linux/misc/bcm_vk.h @@@ -5067,12 -5050,6 +5067,6 @@@ S: Maintaine F: Documentation/hwmon/corsair-psu.rst F: drivers/hwmon/corsair-psu.c
- COSA/SRP SYNC SERIAL DRIVER - M: Jan "Yenya" Kasprzak kas@fi.muni.cz - S: Maintained - W: http://www.fi.muni.cz/~kas/cosa/ - F: drivers/net/wan/cosa* - COUNTER SUBSYSTEM M: William Breathitt Gray vilhelm.gray@gmail.com L: linux-iio@vger.kernel.org @@@ -5254,6 -5231,14 +5248,14 @@@ T: git git://linuxtv.org/media_tree.gi F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml F: drivers/media/platform/sunxi/sun6i-csi/
+ CTU CAN FD DRIVER + M: Pavel Pisa pisa@cmp.felk.cvut.cz + M: Ondrej Ille ondrej.ille@gmail.com + L: linux-can@vger.kernel.org + S: Maintained + F: Documentation/devicetree/bindings/net/can/ctu,ctucanfd.yaml + F: drivers/net/can/ctucanfd/ + CW1200 WLAN driver M: Solomon Peachy pizza@shaftnet.org S: Maintained @@@ -5456,7 -5441,6 +5458,7 @@@ F: net/ax25/sysctl_net_ax25.
DATA ACCESS MONITOR M: SeongJae Park sj@kernel.org +L: damon@lists.linux.dev L: linux-mm@kvack.org S: Maintained F: Documentation/ABI/testing/sysfs-kernel-mm-damon @@@ -5935,7 -5919,7 +5937,7 @@@ R: Benjamin Gaignard <benjamin.gaignard R: Liam Mark lmark@codeaurora.org R: Laura Abbott labbott@redhat.com R: Brian Starkey Brian.Starkey@arm.com -R: John Stultz john.stultz@linaro.org +R: John Stultz jstultz@google.com L: linux-media@vger.kernel.org L: dri-devel@lists.freedesktop.org L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers) @@@ -6010,12 -5994,6 +6012,12 @@@ L: linux-doc@vger.kernel.or S: Maintained F: Documentation/translations/it_IT
+DOCUMENTATION/JAPANESE +R: Akira Yokosawa akiyks@gmail.com +L: linux-doc@vger.kernel.org +S: Maintained +F: Documentation/translations/ja_JP + DONGWOON DW9714 LENS VOICE COIL DRIVER M: Sakari Ailus sakari.ailus@linux.intel.com L: linux-media@vger.kernel.org @@@ -6611,7 -6589,7 +6613,7 @@@ F: drivers/gpu/drm/gma500 DRM DRIVERS FOR HISILICON M: Xinliang Liu xinliang.liu@linaro.org M: Tian Tao tiantao6@hisilicon.com -R: John Stultz john.stultz@linaro.org +R: John Stultz jstultz@google.com R: Xinwei Kong kong.kongxinwei@hisilicon.com R: Chen Feng puck.chen@hisilicon.com L: dri-devel@lists.freedesktop.org @@@ -7523,7 -7501,7 +7525,7 @@@ F: Documentation/hwmon/f71805f.rs F: drivers/hwmon/f71805f.c
FADDR2LINE -M: Josh Poimboeuf jpoimboe@redhat.com +M: Josh Poimboeuf jpoimboe@kernel.org S: Maintained F: scripts/faddr2line
@@@ -8136,7 -8114,7 +8138,7 @@@ M: Ingo Molnar <mingo@redhat.com R: Peter Zijlstra peterz@infradead.org R: Darren Hart dvhart@infradead.org R: Davidlohr Bueso dave@stgolabs.net -R: Andr�� Almeida andrealmeid@collabora.com +R: Andr�� Almeida andrealmeid@igalia.com L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core @@@ -8409,7 -8387,7 +8411,7 @@@ M: Linus Walleij <linus.walleij@linaro. M: Bartosz Golaszewski brgl@bgdev.pl L: linux-gpio@vger.kernel.org S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git F: Documentation/ABI/obsolete/sysfs-gpio F: Documentation/ABI/testing/gpio-cdev F: Documentation/admin-guide/gpio/ @@@ -8580,6 -8558,17 +8582,6 @@@ L: linux-efi@vger.kernel.or S: Maintained F: block/partitions/efi.*
-H8/300 ARCHITECTURE -M: Yoshinori Sato ysato@users.sourceforge.jp -L: uclinux-h8-devel@lists.sourceforge.jp (moderated for non-subscribers) -S: Maintained -W: http://uclinux-h8.sourceforge.jp -T: git git://git.sourceforge.jp/gitroot/uclinux-h8/linux.git -F: arch/h8300/ -F: drivers/clk/h8300/ -F: drivers/clocksource/h8300_*.c -F: drivers/irqchip/irq-renesas-h8*.c - HABANALABS PCI DRIVER M: Oded Gabbay ogabbay@kernel.org S: Supported @@@ -8765,14 -8754,6 +8767,14 @@@ F: drivers/hid/hid-sensor- F: drivers/iio/*/hid-* F: include/linux/hid-sensor-*
+HID WACOM DRIVER +M: Ping Cheng ping.cheng@wacom.com +M: Jason Gerecke jason.gerecke@wacom.com +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/wacom.h +F: drivers/hid/wacom_* + HIGH-RESOLUTION TIMERS, CLOCKEVENTS M: Thomas Gleixner tglx@linutronix.de L: linux-kernel@vger.kernel.org @@@ -8788,7 -8769,6 +8790,6 @@@ F: kernel/time/timer_*. HIGH-SPEED SCC DRIVER FOR AX.25 L: linux-hams@vger.kernel.org S: Orphan - F: drivers/net/hamradio/dmascc.c F: drivers/net/hamradio/scc.c
HIGHPOINT ROCKETRAID 3xxx RAID DRIVER @@@ -8869,7 -8849,7 +8870,7 @@@ F: Documentation/devicetree/bindings/ne F: drivers/net/ethernet/hisilicon/
HIKEY960 ONBOARD USB GPIO HUB DRIVER -M: John Stultz john.stultz@linaro.org +M: John Stultz jstultz@google.com L: linux-kernel@vger.kernel.org S: Maintained F: drivers/misc/hisi_hikey_usb.c @@@ -10152,7 -10132,7 +10153,7 @@@ S: Supporte F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi) -M: Luca Coelho luciano.coelho@intel.com +M: Gregory Greenman gregory.greenman@intel.com L: linux-wireless@vger.kernel.org S: Supported W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi @@@ -11369,7 -11349,7 +11370,7 @@@ F: drivers/mmc/host/litex_mmc. N: litex
LIVE PATCHING -M: Josh Poimboeuf jpoimboe@redhat.com +M: Josh Poimboeuf jpoimboe@kernel.org M: Jiri Kosina jikos@kernel.org M: Miroslav Benes mbenes@suse.cz M: Petr Mladek pmladek@suse.com @@@ -11852,6 -11832,13 +11853,13 @@@ S: Supporte F: Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt F: drivers/mmc/host/sdhci-xenon*
+ MARVELL OCTEON ENDPOINT DRIVER + M: Veerasenareddy Burru vburru@marvell.com + M: Abhijit Ayarekar aayarekar@marvell.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/ethernet/marvell/octeon_ep + MATROX FRAMEBUFFER DRIVER L: linux-fbdev@vger.kernel.org S: Orphan @@@ -12501,6 -12488,17 +12509,17 @@@ S: Maintaine F: drivers/net/dsa/mt7530.* F: net/dsa/tag_mtk.c
+ MEDIATEK T7XX 5G WWAN MODEM DRIVER + M: Chandrashekar Devegowda chandrashekar.devegowda@intel.com + M: Intel Corporation linuxwwan@intel.com + R: Chiranjeevi Rapolu chiranjeevi.rapolu@linux.intel.com + R: Liu Haijun haijun.liu@mediatek.com + R: M Chetan Kumar m.chetan.kumar@linux.intel.com + R: Ricardo Martinez ricardo.martinez@linux.intel.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/wwan/t7xx/ + MEDIATEK USB3 DRD IP DRIVER M: Chunfeng Yun chunfeng.yun@mediatek.com L: linux-usb@vger.kernel.org @@@ -12930,6 -12928,13 +12949,13 @@@ F: drivers/net/dsa/microchip/ F: include/linux/platform_data/microchip-ksz.h F: net/dsa/tag_ksz.c
+ MICROCHIP LAN87xx/LAN937x T1 PHY DRIVER + M: Arun Ramadoss arun.ramadoss@microchip.com + R: UNGLinuxDriver@microchip.com + L: netdev@vger.kernel.org + S: Maintained + F: drivers/net/phy/microchip_t1.c + MICROCHIP LAN743X ETHERNET DRIVER M: Bryan Whitehead bryan.whitehead@microchip.com M: UNGLinuxDriver@microchip.com @@@ -13555,21 -13560,12 +13581,21 @@@ M: Samuel Mendoza-Jonas <sam@mendozajon S: Maintained F: net/ncsi/
-NCT6775 HARDWARE MONITOR DRIVER +NCT6775 HARDWARE MONITOR DRIVER - CORE & PLATFORM DRIVER M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/nct6775.rst -F: drivers/hwmon/nct6775.c +F: drivers/hwmon/nct6775-core.c +F: drivers/hwmon/nct6775-platform.c +F: drivers/hwmon/nct6775.h + +NCT6775 HARDWARE MONITOR DRIVER - I2C DRIVER +M: Zev Weiss zev@bewilderbeest.net +L: linux-hwmon@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/hwmon/nuvoton,nct6775.yaml +F: drivers/hwmon/nct6775-i2c.c
NETDEVSIM M: Jakub Kicinski kuba@kernel.org @@@ -14254,7 -14250,7 +14280,7 @@@ F: lib/objagg. F: lib/test_objagg.c
OBJTOOL -M: Josh Poimboeuf jpoimboe@redhat.com +M: Josh Poimboeuf jpoimboe@kernel.org M: Peter Zijlstra peterz@infradead.org S: Supported F: tools/objtool/ @@@ -14402,6 -14398,7 +14428,6 @@@ F: arch/arm/*omap*/*pm F: drivers/cpufreq/omap-cpufreq.c
OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT -M: Rajendra Nayak rnayak@codeaurora.org M: Paul Walmsley paul@pwsan.com L: linux-omap@vger.kernel.org S: Maintained @@@ -14427,7 -14424,6 +14453,7 @@@ F: arch/arm/boot/dts/am335x-nano.dt
OMAP1 SUPPORT M: Aaro Koskinen aaro.koskinen@iki.fi +M: Janusz Krzysztofik jmkrzyszt@gmail.com M: Tony Lindgren tony@atomide.com L: linux-omap@vger.kernel.org S: Maintained @@@ -15894,9 -15890,7 +15920,9 @@@ F: kernel/printk PRINTK INDEXING R: Chris Down chris@chrisdown.name S: Maintained +F: Documentation/core-api/printk-index.rst F: kernel/printk/index.c +K: printk_index
PROC FILESYSTEM L: linux-kernel@vger.kernel.org @@@ -16011,6 -16005,12 +16037,12 @@@ T: git git://linuxtv.org/media_tree.gi F: Documentation/admin-guide/media/pulse8-cec.rst F: drivers/media/cec/usb/pulse8/
+ PURELIFI PLFXLC DRIVER + M: Srinivasan Raju srini.raju@purelifi.com + L: linux-wireless@vger.kernel.org + S: Supported + F: drivers/net/wireless/purelifi/plfxlc/ + PVRUSB2 VIDEO4LINUX DRIVER M: Mike Isely isely@pobox.com L: pvrusb2@isely.net (subscribers-only) @@@ -17690,7 -17690,7 +17722,7 @@@ K: \bTIF_SECCOMP\
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) Broadcom BRCMSTB DRIVER M: Al Cooper alcooperx@gmail.com -R: Broadcom Kernel Team bcm-kernel-feedback-list@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-mmc@vger.kernel.org S: Maintained F: drivers/mmc/host/sdhci-brcmstb* @@@ -18021,8 -18021,8 +18053,8 @@@ F: drivers/platform/x86/touchscreen_dmi SILICON LABS WIRELESS DRIVERS (for WFxxx series) M: J��r��me Pouiller jerome.pouiller@silabs.com S: Supported - F: Documentation/devicetree/bindings/staging/net/wireless/silabs,wfx.yaml - F: drivers/staging/wfx/ + F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml + F: drivers/net/wireless/silabs/wfx/
SILICON MOTION SM712 FRAME BUFFER DRIVER M: Sudip Mukherjee sudipm.mukherjee@gmail.com @@@ -18824,7 -18824,7 +18856,7 @@@ F: include/dt-bindings/reset/starfive-j
STATIC BRANCH/CALL M: Peter Zijlstra peterz@infradead.org -M: Josh Poimboeuf jpoimboe@redhat.com +M: Josh Poimboeuf jpoimboe@kernel.org M: Jason Baron jbaron@akamai.com R: Steven Rostedt rostedt@goodmis.org R: Ard Biesheuvel ardb@kernel.org @@@ -18908,6 -18908,14 +18940,14 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/ethernet/dlink/sundance.c
+ SUNPLUS ETHERNET DRIVER + M: Wells Lu wellslutw@gmail.com + L: netdev@vger.kernel.org + S: Maintained + W: https://sunplus.atlassian.net/wiki/spaces/doc/overview + F: Documentation/devicetree/bindings/net/sunplus,sp7021-emac.yaml + F: drivers/net/ethernet/sunplus/ + SUNPLUS OCOTP DRIVER M: Vincent Shih vincent.sunplus@gmail.com S: Maintained @@@ -19573,7 -19581,6 +19613,7 @@@ F: drivers/thermal F: include/linux/cpu_cooling.h F: include/linux/thermal.h F: include/uapi/linux/thermal.h +F: tools/lib/thermal/ F: tools/thermal/
THERMAL DRIVER FOR AMLOGIC SOCS @@@ -19826,7 -19833,7 +19866,7 @@@ F: drivers/net/wireless/ti F: include/linux/wl12xx.h
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER -M: John Stultz john.stultz@linaro.org +M: John Stultz jstultz@google.com M: Thomas Gleixner tglx@linutronix.de R: Stephen Boyd sboyd@kernel.org L: linux-kernel@vger.kernel.org @@@ -19882,7 -19889,6 +19922,7 @@@ TMP401 HARDWARE MONITOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/hwmon/ti,tmp401.yaml F: Documentation/hwmon/tmp401.rst F: drivers/hwmon/tmp401.c
@@@ -20566,6 -20572,7 +20606,6 @@@ F: drivers/usb/host/uhci
USB VIDEO CLASS M: Laurent Pinchart laurent.pinchart@ideasonboard.com -L: linux-uvc-devel@lists.sourceforge.net (subscribers-only) L: linux-media@vger.kernel.org S: Maintained W: http://www.ideasonboard.org/uvc/ @@@ -21477,7 -21484,7 +21517,7 @@@ F: arch/x86/kernel/apic/x2apic_uv_x. F: arch/x86/platform/uv/
X86 STACK UNWINDING -M: Josh Poimboeuf jpoimboe@redhat.com +M: Josh Poimboeuf jpoimboe@kernel.org M: Peter Zijlstra peterz@infradead.org S: Supported F: arch/x86/include/asm/unwind*.h @@@ -21676,7 -21683,7 +21716,7 @@@ M: Appana Durga Kedareswara rao <appana R: Naga Sureshkumar Relli naga.sureshkumar.relli@xilinx.com L: linux-can@vger.kernel.org S: Maintained - F: Documentation/devicetree/bindings/net/can/xilinx_can.txt + F: Documentation/devicetree/bindings/net/can/xilinx,can.yaml F: drivers/net/can/xilinx_can.c
XILINX GPIO DRIVER diff --combined arch/arm/boot/dts/aspeed-g6.dtsi index 892ca0753e1f,6aa1fd5c9359..765738716cac --- a/arch/arm/boot/dts/aspeed-g6.dtsi +++ b/arch/arm/boot/dts/aspeed-g6.dtsi @@@ -107,21 -107,18 +107,21 @@@ reg = < 0 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; status = "disabled"; }; flash@1 { reg = < 1 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; status = "disabled"; }; flash@2 { reg = < 2 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; status = "disabled"; }; }; @@@ -138,14 -135,12 +138,14 @@@ reg = < 0 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; status = "disabled"; }; flash@1 { reg = < 1 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; status = "disabled"; }; }; @@@ -162,21 -157,18 +162,21 @@@ reg = < 0 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; status = "disabled"; }; flash@1 { reg = < 1 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; status = "disabled"; }; flash@2 { reg = < 2 >; compatible = "jedec,spi-nor"; spi-max-frequency = <50000000>; + spi-rx-bus-width = <2>; status = "disabled"; }; }; @@@ -189,6 -181,7 +189,7 @@@ status = "disabled"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mdio1_default>; + resets = <&syscon ASPEED_RESET_MII>; };
mdio1: mdio@1e650008 { @@@ -199,6 -192,7 +200,7 @@@ status = "disabled"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mdio2_default>; + resets = <&syscon ASPEED_RESET_MII>; };
mdio2: mdio@1e650010 { @@@ -209,6 -203,7 +211,7 @@@ status = "disabled"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mdio3_default>; + resets = <&syscon ASPEED_RESET_MII>; };
mdio3: mdio@1e650018 { @@@ -219,6 -214,7 +222,7 @@@ status = "disabled"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mdio4_default>; + resets = <&syscon ASPEED_RESET_MII>; };
mac0: ftgmac@1e660000 { @@@ -359,17 -355,6 +363,17 @@@ quality = <100>; };
+ gfx: display@1e6e6000 { + compatible = "aspeed,ast2600-gfx", "syscon"; + reg = <0x1e6e6000 0x1000>; + reg-io-width = <4>; + clocks = <&syscon ASPEED_CLK_GATE_D1CLK>; + resets = <&syscon ASPEED_RESET_GRAPHICS>; + syscon = <&syscon>; + status = "disabled"; + interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; + }; + xdma: xdma@1e6e7000 { compatible = "aspeed,ast2600-xdma"; reg = <0x1e6e7000 0x100>; @@@ -408,16 -393,6 +412,16 @@@ reg = <0x1e6f2000 0x1000>; };
+ video: video@1e700000 { + compatible = "aspeed,ast2600-video-engine"; + reg = <0x1e700000 0x1000>; + clocks = <&syscon ASPEED_CLK_GATE_VCLK>, + <&syscon ASPEED_CLK_GATE_ECLK>; + clock-names = "vclk", "eclk"; + interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>; + status = "disabled"; + }; + gpio0: gpio@1e780000 { #gpio-cells = <2>; gpio-controller; diff --combined drivers/infiniband/hw/mlx5/main.c index e2d88027a9a7,61a3b767262f..b68fddeac0f1 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -41,7 -41,6 +41,7 @@@ #include "wr.h" #include "restrack.h" #include "counters.h" +#include "umr.h" #include <rdma/uverbs_std_types.h> #include <rdma/uverbs_ioctl.h> #include <rdma/mlx5_user_ioctl_verbs.h> @@@ -855,13 -854,13 +855,13 @@@ static int mlx5_ib_query_device(struct IB_DEVICE_MEM_WINDOW_TYPE_2B; props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); /* We support 'Gappy' memory registration too */ - props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; + props->kernel_cap_flags |= IBK_SG_GAPS_REG; } /* IB_WR_REG_MR always requires changing the entity size with UMR */ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; if (MLX5_CAP_GEN(mdev, sho)) { - props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; + props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER; /* At this stage no support for signature handover */ props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | IB_PROT_T10DIF_TYPE_2 | @@@ -870,7 -869,7 +870,7 @@@ IB_GUARD_T10DIF_CSUM; } if (MLX5_CAP_GEN(mdev, block_lb_mc)) - props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; + props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { if (MLX5_CAP_ETH(mdev, csum_cap)) { @@@ -917,7 -916,7 +917,7 @@@
if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; - props->device_cap_flags |= IB_DEVICE_UD_TSO; + props->kernel_cap_flags |= IBK_UD_TSO; }
if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && @@@ -993,7 -992,7 +993,7 @@@
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) - props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; + props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING; props->odp_caps = dev->odp_caps; if (!uhw) { /* ODP for kernel QPs is not implemented for receive @@@ -1014,8 -1013,11 +1014,8 @@@ } }
- if (MLX5_CAP_GEN(mdev, cd)) - props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; - if (mlx5_core_is_vf(mdev)) - props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; + props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET && raw_support) { @@@ -2989,6 -2991,7 +2989,7 @@@ static int mlx5_eth_lag_init(struct mlx }
dev->flow_db->lag_demux_ft = ft; + dev->lag_ports = mlx5_lag_get_num_ports(mdev); dev->lag_active = true; return 0;
@@@ -4005,7 -4008,12 +4006,7 @@@ static void mlx5_ib_stage_pre_ib_reg_um if (err) mlx5_ib_warn(dev, "mr cache cleanup failed\n");
- if (dev->umrc.qp) - ib_destroy_qp(dev->umrc.qp); - if (dev->umrc.cq) - ib_free_cq(dev->umrc.cq); - if (dev->umrc.pd) - ib_dealloc_pd(dev->umrc.pd); + mlx5r_umr_resource_cleanup(dev); }
static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) @@@ -4013,19 -4021,112 +4014,19 @@@ ib_unregister_device(&dev->ib_dev); }
-enum { - MAX_UMR_WR = 128, -}; - static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) { - struct ib_qp_init_attr *init_attr = NULL; - struct ib_qp_attr *attr = NULL; - struct ib_pd *pd; - struct ib_cq *cq; - struct ib_qp *qp; int ret;
- attr = kzalloc(sizeof(*attr), GFP_KERNEL); - init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); - if (!attr || !init_attr) { - ret = -ENOMEM; - goto error_0; - } - - pd = ib_alloc_pd(&dev->ib_dev, 0); - if (IS_ERR(pd)) { - mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); - ret = PTR_ERR(pd); - goto error_0; - } - - cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); - if (IS_ERR(cq)) { - mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); - ret = PTR_ERR(cq); - goto error_2; - } - - init_attr->send_cq = cq; - init_attr->recv_cq = cq; - init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; - init_attr->cap.max_send_wr = MAX_UMR_WR; - init_attr->cap.max_send_sge = 1; - init_attr->qp_type = MLX5_IB_QPT_REG_UMR; - init_attr->port_num = 1; - qp = ib_create_qp(pd, init_attr); - if (IS_ERR(qp)) { - mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); - ret = PTR_ERR(qp); - goto error_3; - } - - attr->qp_state = IB_QPS_INIT; - attr->port_num = 1; - ret = ib_modify_qp(qp, attr, - IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT); - if (ret) { - mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); - goto error_4; - } - - memset(attr, 0, sizeof(*attr)); - attr->qp_state = IB_QPS_RTR; - attr->path_mtu = IB_MTU_256; - - ret = ib_modify_qp(qp, attr, IB_QP_STATE); - if (ret) { - mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); - goto error_4; - } - - memset(attr, 0, sizeof(*attr)); - attr->qp_state = IB_QPS_RTS; - ret = ib_modify_qp(qp, attr, IB_QP_STATE); - if (ret) { - mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); - goto error_4; - } - - dev->umrc.qp = qp; - dev->umrc.cq = cq; - dev->umrc.pd = pd; + ret = mlx5r_umr_resource_init(dev); + if (ret) + return ret;
- sema_init(&dev->umrc.sem, MAX_UMR_WR); ret = mlx5_mr_cache_init(dev); if (ret) { mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); - goto error_4; + mlx5r_umr_resource_cleanup(dev); } - - kfree(attr); - kfree(init_attr); - - return 0; - -error_4: - ib_destroy_qp(qp); - dev->umrc.qp = NULL; - -error_3: - ib_free_cq(cq); - dev->umrc.cq = NULL; - -error_2: - ib_dealloc_pd(pd); - dev->umrc.pd = NULL; - -error_0: - kfree(attr); - kfree(init_attr); return ret; }
diff --combined drivers/infiniband/hw/mlx5/mlx5_ib.h index df2b566ad73d,8b3c83c0b70a..998b67509a53 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@@ -291,9 -291,16 +291,9 @@@ struct mlx5_ib_flow_db };
/* Use macros here so that don't have to duplicate - * enum ib_send_flags and enum ib_qp_type for low-level driver + * enum ib_qp_type for low-level driver */
-#define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0) -#define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1) -#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2) -#define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3) -#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4) -#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END - #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 /* * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI @@@ -304,6 -311,9 +304,6 @@@ #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
-#define MLX5_IB_UMR_OCTOWORD 16 -#define MLX5_IB_UMR_XLT_ALIGNMENT 64 - #define MLX5_IB_UPD_XLT_ZAP BIT(0) #define MLX5_IB_UPD_XLT_ENABLE BIT(1) #define MLX5_IB_UPD_XLT_ATOMIC BIT(2) @@@ -529,6 -539,24 +529,6 @@@ struct mlx5_ib_cq_buf int nent; };
-struct mlx5_umr_wr { - struct ib_send_wr wr; - u64 virt_addr; - u64 offset; - struct ib_pd *pd; - unsigned int page_shift; - unsigned int xlt_size; - u64 length; - int access_flags; - u32 mkey; - u8 ignore_free_state:1; -}; - -static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr) -{ - return container_of(wr, struct mlx5_umr_wr, wr); -} - enum mlx5_ib_cq_pr_flags { MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0, MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1, @@@ -1103,6 -1131,7 +1103,7 @@@ struct mlx5_ib_dev struct xarray sig_mrs; struct mlx5_port_caps port_caps[MLX5_MAX_PORTS]; u16 pkey_table_len; + u8 lag_ports; };
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) @@@ -1262,6 -1291,9 +1263,6 @@@ int mlx5_ib_advise_mr(struct ib_pd *pd struct uverbs_attr_bundle *attrs); int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata); int mlx5_ib_dealloc_mw(struct ib_mw *mw); -int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, - int page_shift, int flags); -int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags); struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, int access_flags); void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr); @@@ -1440,6 -1472,9 +1441,6 @@@ static inline int is_qp1(enum ib_qp_typ return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI; }
-#define MLX5_MAX_UMR_SHIFT 16 -#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT) - static inline u32 check_cq_create_flags(u32 flags) { /* @@@ -1511,6 -1546,59 +1512,6 @@@ int bfregn_to_uar_index(struct mlx5_ib_ struct mlx5_bfreg_info *bfregi, u32 bfregn, bool dyn_bfreg);
-static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev, - size_t length) -{ - /* - * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is - * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka - * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey - * can never be enabled without this capability. Simplify this weird - * quirky hardware by just saying it can't use PAS lists with UMR at - * all. - */ - if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) - return false; - - /* - * length is the size of the MR in bytes when mlx5_ib_update_xlt() is - * used. - */ - if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) && - length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE) - return false; - return true; -} - -/* - * true if an existing MR can be reconfigured to new access_flags using UMR. - * Older HW cannot use UMR to update certain elements of the MKC. See - * umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask() - */ -static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev, - unsigned int current_access_flags, - unsigned int target_access_flags) -{ - unsigned int diffs = current_access_flags ^ target_access_flags; - - if ((diffs & IB_ACCESS_REMOTE_ATOMIC) && - MLX5_CAP_GEN(dev->mdev, atomic) && - MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled)) - return false; - - if ((diffs & IB_ACCESS_RELAXED_ORDERING) && - MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) && - !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr)) - return false; - - if ((diffs & IB_ACCESS_RELAXED_ORDERING) && - MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) && - !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr)) - return false; - - return true; -} - static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mkey *mmkey) { diff --combined drivers/infiniband/hw/mlx5/qp.c index d2f243d3c4e2,fb8669c02546..40d9410ec303 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@@ -40,7 -40,6 +40,7 @@@ #include "ib_rep.h" #include "counters.h" #include "cmd.h" +#include "umr.h" #include "qp.h" #include "wr.h"
@@@ -3908,7 -3907,7 +3908,7 @@@ static unsigned int get_tx_affinity_rr( tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
return (unsigned int)atomic_add_return(1, tx_port_affinity) % - MLX5_MAX_PORTS + 1; + (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1; }
static bool qp_supports_affinity(struct mlx5_ib_qp *qp) diff --combined drivers/net/dsa/ocelot/felix.c index faccfb3f0158,f5c9d695408a..a23781d9a15c --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@@ -42,44 -42,6 +42,6 @@@ static struct net_device *felix_classif } }
- /* We are called before felix_npi_port_init(), so ocelot->npi is -1. */ - static int felix_migrate_fdbs_to_npi_port(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) - { - struct net_device *bridge_dev = felix_classify_db(db); - struct ocelot *ocelot = ds->priv; - int cpu = ocelot->num_phys_ports; - int err; - - err = ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); - if (err) - return err; - - return ocelot_fdb_add(ocelot, cpu, addr, vid, bridge_dev); - } - - static int felix_migrate_mdbs_to_npi_port(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) - { - struct net_device *bridge_dev = felix_classify_db(db); - struct switchdev_obj_port_mdb mdb; - struct ocelot *ocelot = ds->priv; - int cpu = ocelot->num_phys_ports; - int err; - - memset(&mdb, 0, sizeof(mdb)); - ether_addr_copy(mdb.addr, addr); - mdb.vid = vid; - - err = ocelot_port_mdb_del(ocelot, port, &mdb, bridge_dev); - if (err) - return err; - - return ocelot_port_mdb_add(ocelot, cpu, &mdb, bridge_dev); - } - static void felix_migrate_pgid_bit(struct dsa_switch *ds, int from, int to, int pgid) { @@@ -117,49 -79,6 +79,6 @@@ felix_migrate_flood_to_tag_8021q_port(s felix_migrate_pgid_bit(ds, ocelot->num_phys_ports, port, PGID_BC); }
- /* ocelot->npi was already set to -1 by felix_npi_port_deinit, so - * ocelot_fdb_add() will not redirect FDB entries towards the - * CPU port module here, which is what we want. - */ - static int - felix_migrate_fdbs_to_tag_8021q_port(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) - { - struct net_device *bridge_dev = felix_classify_db(db); - struct ocelot *ocelot = ds->priv; - int cpu = ocelot->num_phys_ports; - int err; - - err = ocelot_fdb_del(ocelot, cpu, addr, vid, bridge_dev); - if (err) - return err; - - return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); - } - - static int - felix_migrate_mdbs_to_tag_8021q_port(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid, - struct dsa_db db) - { - struct net_device *bridge_dev = felix_classify_db(db); - struct switchdev_obj_port_mdb mdb; - struct ocelot *ocelot = ds->priv; - int cpu = ocelot->num_phys_ports; - int err; - - memset(&mdb, 0, sizeof(mdb)); - ether_addr_copy(mdb.addr, addr); - mdb.vid = vid; - - err = ocelot_port_mdb_del(ocelot, cpu, &mdb, bridge_dev); - if (err) - return err; - - return ocelot_port_mdb_add(ocelot, port, &mdb, bridge_dev); - } - /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that * the tagger can perform RX source port identification. */ @@@ -403,7 -322,6 +322,7 @@@ static int felix_update_trapping_destin { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); + struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *trap; enum ocelot_mask_mode mask_mode; unsigned long port_mask; @@@ -423,13 -341,9 +342,13 @@@ /* We are sure that "cpu" was found, otherwise * dsa_tree_setup_default_cpu() would have failed earlier. */ + block_vcap_is2 = &ocelot->block[VCAP_IS2];
/* Make sure all traps are set up for that destination */ - list_for_each_entry(trap, &ocelot->traps, trap_list) { + list_for_each_entry(trap, &block_vcap_is2->rules, list) { + if (!trap->is_trap) + continue; + /* Figure out the current trapping destination */ if (using_tag_8021q) { /* Redirect to the tag_8021q CPU port. If timestamps @@@ -493,14 -407,11 +412,11 @@@ static int felix_setup_tag_8021q(struc if (err) return err;
- err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port); + err = ocelot_migrate_mdbs(ocelot, BIT(ocelot->num_phys_ports), + BIT(cpu)); if (err) goto out_tag_8021q_unregister;
- err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_tag_8021q_port); - if (err) - goto out_migrate_fdbs; - felix_migrate_flood_to_tag_8021q_port(ds, cpu);
err = felix_update_trapping_destinations(ds, true); @@@ -520,9 -431,7 +436,7 @@@
out_migrate_flood: felix_migrate_flood_to_npi_port(ds, cpu); - dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port); - out_migrate_fdbs: - dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port); + ocelot_migrate_mdbs(ocelot, BIT(cpu), BIT(ocelot->num_phys_ports)); out_tag_8021q_unregister: dsa_tag_8021q_unregister(ds); return err; @@@ -602,24 -511,16 +516,16 @@@ static int felix_setup_tag_npi(struct d struct ocelot *ocelot = ds->priv; int err;
- err = dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_npi_port); + err = ocelot_migrate_mdbs(ocelot, BIT(cpu), + BIT(ocelot->num_phys_ports)); if (err) return err;
- err = dsa_port_walk_mdbs(ds, cpu, felix_migrate_mdbs_to_npi_port); - if (err) - goto out_migrate_fdbs; - felix_migrate_flood_to_npi_port(ds, cpu);
felix_npi_port_init(ocelot, cpu);
return 0; - - out_migrate_fdbs: - dsa_port_walk_fdbs(ds, cpu, felix_migrate_fdbs_to_tag_8021q_port); - - return err; }
static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu) @@@ -1107,6 -1008,7 +1013,7 @@@ static const u32 felix_phy_match_table[ [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, + [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX, [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, };
@@@ -1202,7 -1104,6 +1109,6 @@@ static int felix_init_structs(struct fe
ocelot->map = felix->info->map; ocelot->stats_layout = felix->info->stats_layout; - ocelot->num_stats = felix->info->num_stats; ocelot->num_mact_rows = felix->info->num_mact_rows; ocelot->vcap = felix->info->vcap; ocelot->vcap_pol.base = felix->info->vcap_pol_base; diff --combined drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 8201ce7adb77,ea740210803f..25129e723b57 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@@ -7,15 -7,37 +7,37 @@@
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
- #include "aq_ring.h" #include "aq_nic.h" #include "aq_hw.h" #include "aq_hw_utils.h" #include "aq_ptp.h" + #include "aq_vec.h" + #include "aq_main.h"
+ #include <net/xdp.h> + #include <linux/filter.h> + #include <linux/bpf_trace.h> #include <linux/netdevice.h> #include <linux/etherdevice.h>
+ static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff, + struct xdp_buff *xdp) + { + struct skb_shared_info *sinfo; + int i; + + if (xdp_buff_has_frags(xdp)) { + sinfo = xdp_get_shared_info_from_buff(xdp); + + for (i = 0; i < sinfo->nr_frags; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + + page_ref_inc(skb_frag_page(frag)); + } + } + page_ref_inc(buff->rxdata.page); + } + static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) { unsigned int len = PAGE_SIZE << rxpage->order; @@@ -27,9 -49,10 +49,10 @@@ rxpage->page = NULL; }
- static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, - struct device *dev) + static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring) { + struct device *dev = aq_nic_get_dev(rx_ring->aq_nic); + unsigned int order = rx_ring->page_order; struct page *page; int ret = -ENOMEM; dma_addr_t daddr; @@@ -47,7 -70,7 +70,7 @@@ rxpage->page = page; rxpage->daddr = daddr; rxpage->order = order; - rxpage->pg_off = 0; + rxpage->pg_off = rx_ring->page_offset;
return 0;
@@@ -58,21 -81,26 +81,26 @@@ err_exit return ret; }
- static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, - int order) + static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf) { + unsigned int order = self->page_order; + u16 page_offset = self->page_offset; + u16 frame_max = self->frame_max; + u16 tail_size = self->tail_size; int ret;
if (rxbuf->rxdata.page) { /* One means ring is the only user and can reuse */ if (page_ref_count(rxbuf->rxdata.page) > 1) { /* Try reuse buffer */ - rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; - if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= - (PAGE_SIZE << order)) { + rxbuf->rxdata.pg_off += frame_max + page_offset + + tail_size; + if (rxbuf->rxdata.pg_off + frame_max + tail_size <= + (PAGE_SIZE << order)) { u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_flips++; u64_stats_update_end(&self->stats.rx.syncp); + } else { /* Buffer exhausted. We have other users and * should release this page and realloc @@@ -84,7 -112,7 +112,7 @@@ u64_stats_update_end(&self->stats.rx.syncp); } } else { - rxbuf->rxdata.pg_off = 0; + rxbuf->rxdata.pg_off = page_offset; u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_reuses++; u64_stats_update_end(&self->stats.rx.syncp); @@@ -92,8 -120,7 +120,7 @@@ }
if (!rxbuf->rxdata.page) { - ret = aq_get_rxpage(&rxbuf->rxdata, order, - aq_nic_get_dev(self->aq_nic)); + ret = aq_alloc_rxpages(&rxbuf->rxdata, self); if (ret) { u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.alloc_fails++; @@@ -117,6 -144,7 +144,7 @@@ static struct aq_ring_s *aq_ring_alloc( err = -ENOMEM; goto err_exit; } + self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), self->size * self->dx_size, &self->dx_ring_pa, GFP_KERNEL); @@@ -172,11 -200,22 +200,22 @@@ struct aq_ring_s *aq_ring_rx_alloc(stru self->idx = idx; self->size = aq_nic_cfg->rxds; self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; - self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + - (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; - - if (aq_nic_cfg->rxpageorder > self->page_order) - self->page_order = aq_nic_cfg->rxpageorder; + self->xdp_prog = aq_nic->xdp_prog; + self->frame_max = AQ_CFG_RX_FRAME_MAX; + + /* Only order-2 is allowed if XDP is enabled */ + if (READ_ONCE(self->xdp_prog)) { + self->page_offset = AQ_XDP_HEADROOM; + self->page_order = AQ_CFG_XDP_PAGEORDER; + self->tail_size = AQ_XDP_TAILROOM; + } else { + self->page_offset = 0; + self->page_order = fls(self->frame_max / PAGE_SIZE + + (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1; + if (aq_nic_cfg->rxpageorder > self->page_order) + self->page_order = aq_nic_cfg->rxpageorder; + self->tail_size = 0; + }
self = aq_ring_alloc(self, aq_nic); if (!self) { @@@ -298,15 -337,26 +337,26 @@@ bool aq_ring_tx_clean(struct aq_ring_s } }
- if (unlikely(buff->is_eop && buff->skb)) { + if (likely(!buff->is_eop)) + goto out; + + if (buff->skb) { u64_stats_update_begin(&self->stats.tx.syncp); ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; u64_stats_update_end(&self->stats.tx.syncp); - dev_kfree_skb_any(buff->skb); - buff->skb = NULL; + } else if (buff->xdpf) { + u64_stats_update_begin(&self->stats.tx.syncp); + ++self->stats.tx.packets; + self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf); + u64_stats_update_end(&self->stats.tx.syncp); + xdp_return_frame_rx_napi(buff->xdpf); } + + out: + buff->skb = NULL; + buff->xdpf = NULL; buff->pa = 0U; buff->eop_index = 0xffffU; self->sw_head = aq_ring_next_dx(self, self->sw_head); @@@ -339,13 -389,162 +389,161 @@@ static void aq_rx_checksum(struct aq_ri __skb_incr_checksum_unnecessary(skb); }
- #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - int aq_ring_rx_clean(struct aq_ring_s *self, - struct napi_struct *napi, - int *work_done, - int budget) + int aq_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) + { + struct aq_nic_s *aq_nic = netdev_priv(dev); + unsigned int vec, i, drop = 0; + int cpu = smp_processor_id(); + struct aq_nic_cfg_s *aq_cfg; + struct aq_ring_s *ring; + + aq_cfg = aq_nic_get_cfg(aq_nic); + vec = cpu % aq_cfg->vecs; + ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)]; + + for (i = 0; i < num_frames; i++) { + struct xdp_frame *xdpf = frames[i]; + + if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY) + drop++; + } + + return num_frames - drop; + } + + static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, + struct xdp_buff *xdp, + struct aq_ring_s *rx_ring, + struct aq_ring_buff_s *buff) + { + int result = NETDEV_TX_BUSY; + struct aq_ring_s *tx_ring; + struct xdp_frame *xdpf; + struct bpf_prog *prog; + u32 act = XDP_ABORTED; + struct sk_buff *skb; + + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.packets; + rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp); + u64_stats_update_end(&rx_ring->stats.rx.syncp); + + prog = READ_ONCE(rx_ring->xdp_prog); + if (!prog) + goto pass; + + prefetchw(xdp->data_hard_start); /* xdp_frame write */ + + /* single buffer XDP program, but packet is multi buffer, aborted */ + if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags) + goto out_aborted; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + pass: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_aborted; + skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev); + if (!skb) + goto out_aborted; + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_pass; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + return skb; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + goto out_aborted; + tx_ring = aq_nic->aq_ring_tx[rx_ring->idx]; + result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf); + if (result == NETDEV_TX_BUSY) + goto out_aborted; + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_tx; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + break; + case XDP_REDIRECT: + if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0) + goto out_aborted; + xdp_do_flush(); + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_redirect; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + aq_get_rxpages_xdp(buff, xdp); + break; + default: + fallthrough; + case XDP_ABORTED: + out_aborted: + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_aborted; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + trace_xdp_exception(aq_nic->ndev, prog, act); + bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act); + break; + case XDP_DROP: + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.xdp_drop; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + break; + } + + return ERR_PTR(-result); + } + + static bool aq_add_rx_fragment(struct device *dev, + struct aq_ring_s *ring, + struct aq_ring_buff_s *buff, + struct xdp_buff *xdp) + { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + struct aq_ring_buff_s *buff_ = buff; + + memset(sinfo, 0, sizeof(*sinfo)); + do { + skb_frag_t *frag; + + if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) + return true; + + frag = &sinfo->frags[sinfo->nr_frags++]; + buff_ = &ring->buff_ring[buff_->next]; + dma_sync_single_range_for_cpu(dev, + buff_->rxdata.daddr, + buff_->rxdata.pg_off, + buff_->len, + DMA_FROM_DEVICE); + skb_frag_off_set(frag, buff_->rxdata.pg_off); + skb_frag_size_set(frag, buff_->len); + sinfo->xdp_frags_size += buff_->len; + __skb_frag_set_page(frag, buff_->rxdata.page); + + buff_->is_cleaned = 1; + + buff->is_ip_cso &= buff_->is_ip_cso; + buff->is_udp_cso &= buff_->is_udp_cso; + buff->is_tcp_cso &= buff_->is_tcp_cso; + buff->is_cso_err |= buff_->is_cso_err; + + if (page_is_pfmemalloc(buff_->rxdata.page)) + xdp_buff_set_frag_pfmemalloc(xdp); + + } while (!buff_->is_eop); + + xdp_buff_set_frags_flag(xdp); + + return false; + } + + static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi, + int *work_done, int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); - bool is_rsc_completed = true; int err = 0;
for (; (self->sw_head != self->hw_head) && budget; @@@ -363,17 -562,12 +561,17 @@@ continue;
if (!buff->is_eop) { + unsigned int frag_cnt = 0U; buff_ = buff; do { + bool is_rsc_completed = true; + if (buff_->next >= self->size) { err = -EIO; goto err_exit; } + + frag_cnt++; next_ = buff_->next, buff_ = &self->buff_ring[next_]; is_rsc_completed = @@@ -381,17 -575,18 +579,17 @@@ next_, self->hw_head);
- if (unlikely(!is_rsc_completed)) - break; + if (unlikely(!is_rsc_completed) || + frag_cnt > MAX_SKB_FRAGS) { + err = 0; + goto err_exit; + }
buff->is_error |= buff_->is_error; buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) { - err = 0; - goto err_exit; - } if (buff->is_error || (buff->is_lro && buff->is_cso_err)) { buff_ = buff; @@@ -449,15 -644,16 +647,15 @@@ ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) { - skb_add_rx_frag(skb, 0, buff->rxdata.page, + skb_add_rx_frag(skb, i++, buff->rxdata.page, buff->rxdata.pg_off + hdr_len, buff->len - hdr_len, - AQ_CFG_RX_FRAME_MAX); + self->frame_max); page_ref_inc(buff->rxdata.page); }
if (!buff->is_eop) { buff_ = buff; - i = 1U; do { next_ = buff_->next; buff_ = &self->buff_ring[next_]; @@@ -471,7 -667,7 +669,7 @@@ buff_->rxdata.page, buff_->rxdata.pg_off, buff_->len, - AQ_CFG_RX_FRAME_MAX); + self->frame_max); page_ref_inc(buff_->rxdata.page); buff_->is_cleaned = 1;
@@@ -512,6 -708,149 +710,149 @@@ err_exit return err; }
+ static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring, + struct napi_struct *napi, int *work_done, + int budget) + { + int frame_sz = rx_ring->page_offset + rx_ring->frame_max + + rx_ring->tail_size; + struct aq_nic_s *aq_nic = rx_ring->aq_nic; + bool is_rsc_completed = true; + struct device *dev; + int err = 0; + + dev = aq_nic_get_dev(aq_nic); + for (; (rx_ring->sw_head != rx_ring->hw_head) && budget; + rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head), + --budget, ++(*work_done)) { + struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head]; + bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring); + struct aq_ring_buff_s *buff_ = NULL; + struct sk_buff *skb = NULL; + unsigned int next_ = 0U; + struct xdp_buff xdp; + void *hard_start; + + if (buff->is_cleaned) + continue; + + if (!buff->is_eop) { + buff_ = buff; + do { + if (buff_->next >= rx_ring->size) { + err = -EIO; + goto err_exit; + } + next_ = buff_->next; + buff_ = &rx_ring->buff_ring[next_]; + is_rsc_completed = + aq_ring_dx_in_range(rx_ring->sw_head, + next_, + rx_ring->hw_head); + + if (unlikely(!is_rsc_completed)) + break; + + buff->is_error |= buff_->is_error; + buff->is_cso_err |= buff_->is_cso_err; + } while (!buff_->is_eop); + + if (!is_rsc_completed) { + err = 0; + goto err_exit; + } + if (buff->is_error || + (buff->is_lro && buff->is_cso_err)) { + buff_ = buff; + do { + if (buff_->next >= rx_ring->size) { + err = -EIO; + goto err_exit; + } + next_ = buff_->next; + buff_ = &rx_ring->buff_ring[next_]; + + buff_->is_cleaned = true; + } while (!buff_->is_eop); + + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.errors; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + } + + if (buff->is_error) { + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.errors; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + + dma_sync_single_range_for_cpu(dev, + buff->rxdata.daddr, + buff->rxdata.pg_off, + buff->len, DMA_FROM_DEVICE); + hard_start = page_address(buff->rxdata.page) + + buff->rxdata.pg_off - rx_ring->page_offset; + + if (is_ptp_ring) + buff->len -= + aq_ptp_extract_ts(rx_ring->aq_nic, skb, + aq_buf_vaddr(&buff->rxdata), + buff->len); + + xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); + xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset, + buff->len, false); + if (!buff->is_eop) { + if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) { + u64_stats_update_begin(&rx_ring->stats.rx.syncp); + ++rx_ring->stats.rx.packets; + rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp); + ++rx_ring->stats.rx.xdp_aborted; + u64_stats_update_end(&rx_ring->stats.rx.syncp); + continue; + } + } + + skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff); + if (IS_ERR(skb) || !skb) + continue; + + if (buff->is_vlan) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + buff->vlan_rx_tag); + + aq_rx_checksum(rx_ring, buff, skb); + + skb_set_hash(skb, buff->rss_hash, + buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : + PKT_HASH_TYPE_NONE); + /* Send all PTP traffic to 0 queue */ + skb_record_rx_queue(skb, + is_ptp_ring ? 0 + : AQ_NIC_RING2QMAP(rx_ring->aq_nic, + rx_ring->idx)); + + napi_gro_receive(napi, skb); + } + + err_exit: + return err; + } + + int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget) + { + if (static_branch_unlikely(&aq_xdp_locking_key)) + return __aq_ring_xdp_clean(self, napi, work_done, budget); + else + return __aq_ring_rx_clean(self, napi, work_done, budget); + } + void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) { #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) @@@ -531,7 -870,6 +872,6 @@@
int aq_ring_rx_fill(struct aq_ring_s *self) { - unsigned int page_order = self->page_order; struct aq_ring_buff_s *buff = NULL; int err = 0; int i = 0; @@@ -545,9 -883,9 +885,9 @@@ buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U; - buff->len = AQ_CFG_RX_FRAME_MAX; + buff->len = self->frame_max;
- err = aq_get_rxpages(self, buff, page_order); + err = aq_get_rxpages(self, buff); if (err) goto err_exit;
@@@ -602,6 -940,15 +942,15 @@@ unsigned int aq_ring_fill_stats_data(st data[++count] = self->stats.rx.alloc_fails; data[++count] = self->stats.rx.skb_alloc_fails; data[++count] = self->stats.rx.polls; + data[++count] = self->stats.rx.pg_flips; + data[++count] = self->stats.rx.pg_reuses; + data[++count] = self->stats.rx.pg_losts; + data[++count] = self->stats.rx.xdp_aborted; + data[++count] = self->stats.rx.xdp_drop; + data[++count] = self->stats.rx.xdp_pass; + data[++count] = self->stats.rx.xdp_tx; + data[++count] = self->stats.rx.xdp_invalid; + data[++count] = self->stats.rx.xdp_redirect; } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); } else { /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ diff --combined drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 15ede7285fb5,878a53abec33..54e70f07b573 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@@ -766,7 -766,7 +766,7 @@@ int hw_atl_b0_hw_ring_rx_init(struct aq hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self, - AQ_CFG_RX_FRAME_MAX / 1024U, + aq_ring->frame_max / 1024U, aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); @@@ -889,13 -889,6 +889,13 @@@ int hw_atl_b0_hw_ring_tx_head_update(st err = -ENXIO; goto err_exit; } + + /* Validate that the new hw_head_ is reasonable. */ + if (hw_head_ >= ring->size) { + err = -ENXIO; + goto err_exit; + } + ring->hw_head = hw_head_; err = aq_hw_err_from_flags(self);
@@@ -976,15 -969,15 +976,15 @@@ int hw_atl_b0_hw_ring_rx_receive(struc rxd_wb->status); if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) { buff->len = rxd_wb->pkt_len % - AQ_CFG_RX_FRAME_MAX; + ring->frame_max; buff->len = buff->len ? - buff->len : AQ_CFG_RX_FRAME_MAX; + buff->len : ring->frame_max; buff->next = 0U; buff->is_eop = 1U; } else { buff->len = - rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ? - AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len; + rxd_wb->pkt_len > ring->frame_max ? + ring->frame_max : rxd_wb->pkt_len;
if (buff->is_lro) { /* LRO */ diff --combined drivers/net/ethernet/dec/tulip/tulip_core.c index 0040dcaab945,434d8bf0e8f9..825e81f5fd22 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@@ -1399,10 -1399,8 +1399,10 @@@ static int tulip_init_one(struct pci_de
/* alloc_etherdev ensures aligned and zeroed private structures */ dev = alloc_etherdev (sizeof (*tp)); - if (!dev) + if (!dev) { + pci_disable_device(pdev); return -ENOMEM; + }
SET_NETDEV_DEV(dev, &pdev->dev); if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { @@@ -1691,7 -1689,7 +1691,7 @@@ dev->netdev_ops = &tulip_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; #ifdef CONFIG_TULIP_NAPI - netif_napi_add(dev, &tp->napi, tulip_poll, 16); + netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16); #endif dev->ethtool_ops = &ops;
@@@ -1787,7 -1785,6 +1787,7 @@@ err_out_free_res
err_out_free_netdev: free_netdev (dev); + pci_disable_device(pdev); return -ENODEV; }
diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index 98871f014994,358c2edc118d..332a608dbaa6 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -77,6 -77,7 +77,7 @@@ static const struct pci_device_id i40e_ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0}, @@@ -785,6 -786,7 +786,7 @@@ static void i40e_update_vsi_stats(struc unsigned int start; u64 tx_linearize; u64 tx_force_wb; + u64 tx_stopped; u64 rx_p, rx_b; u64 tx_p, tx_b; u16 q; @@@ -804,6 -806,7 +806,7 @@@ rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; + tx_stopped = 0; rx_page = 0; rx_buf = 0; rx_reuse = 0; @@@ -828,6 -831,7 +831,7 @@@ tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; + tx_stopped += p->tx_stats.tx_stopped;
/* locate Rx ring */ p = READ_ONCE(vsi->rx_rings[q]); @@@ -872,6 -876,7 +876,7 @@@ vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; + vsi->tx_stopped = tx_stopped; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; vsi->rx_page_reuse = rx_reuse; @@@ -7549,43 -7554,42 +7554,43 @@@ static void i40e_free_macvlan_channels( static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev, struct i40e_fwd_adapter *fwd) { + struct i40e_channel *ch = NULL, *ch_tmp, *iter; int ret = 0, num_tc = 1, i, aq_err; - struct i40e_channel *ch, *ch_tmp; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw;
- if (list_empty(&vsi->macvlan_list)) - return -EINVAL; - /* Go through the list and find an available channel */ - list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { - if (!i40e_is_channel_macvlan(ch)) { - ch->fwd = fwd; + list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { + if (!i40e_is_channel_macvlan(iter)) { + iter->fwd = fwd; /* record configuration for macvlan interface in vdev */ for (i = 0; i < num_tc; i++) netdev_bind_sb_channel_queue(vsi->netdev, vdev, i, - ch->num_queue_pairs, - ch->base_queue); - for (i = 0; i < ch->num_queue_pairs; i++) { + iter->num_queue_pairs, + iter->base_queue); + for (i = 0; i < iter->num_queue_pairs; i++) { struct i40e_ring *tx_ring, *rx_ring; u16 pf_q;
- pf_q = ch->base_queue + i; + pf_q = iter->base_queue + i;
/* Get to TX ring ptr */ tx_ring = vsi->tx_rings[pf_q]; - tx_ring->ch = ch; + tx_ring->ch = iter;
/* Get the RX ring ptr */ rx_ring = vsi->rx_rings[pf_q]; - rx_ring->ch = ch; + rx_ring->ch = iter; } + ch = iter; break; } }
+ if (!ch) + return -EINVAL; + /* Guarantee all rings are updated before we update the * MAC address filter. */ @@@ -13437,8 -13441,7 +13442,7 @@@ static int i40e_config_netdev(struct i4 np->vsi = vsi;
hw_enc_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | + NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_SOFT_FEATURES | NETIF_F_TSO | @@@ -13469,6 -13472,23 +13473,23 @@@ /* record features VLANs can make use of */ netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+ #define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPXIP4 | \ + NETIF_F_GSO_IPXIP6 | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + I40E_GSO_PARTIAL_FEATURES; + + netdev->mpls_features |= NETIF_F_SG; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->mpls_features |= NETIF_F_TSO; + netdev->mpls_features |= NETIF_F_TSO6; + netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES; + /* enable macvlan offloads */ netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
diff --combined drivers/net/ethernet/intel/ice/ice.h index a895e3a8e988,2fbb3d737dfd..60453b3b8d23 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@@ -540,7 -540,6 +540,7 @@@ struct ice_pf struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */ struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ + struct mutex adev_mutex; /* lock to protect aux device access */ u32 msg_enable; struct ice_ptp ptp; struct tty_driver *ice_gnss_tty_driver; @@@ -758,6 -757,21 +758,21 @@@ static inline struct ice_vsi *ice_get_c return pf->vsi[pf->ctrl_vsi_idx]; }
+ /** + * ice_find_vsi - Find the VSI from VSI ID + * @pf: The PF pointer to search in + * @vsi_num: The VSI ID to search for + */ + static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) + { + int i; + + ice_for_each_vsi(pf, i) + if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) + return pf->vsi[i]; + return NULL; + } + /** * ice_is_switchdev_running - check if switchdev is configured * @pf: pointer to PF structure diff --combined drivers/net/ethernet/intel/ice/ice_idc.c index 3e3b2ed4cd5d,7e941264ae21..895c32bcc8b5 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@@ -37,34 -37,16 +37,19 @@@ void ice_send_event_to_aux(struct ice_p if (WARN_ON_ONCE(!in_task())) return;
+ mutex_lock(&pf->adev_mutex); if (!pf->adev) - return; + goto finish;
device_lock(&pf->adev->dev); iadrv = ice_get_auxiliary_drv(pf); if (iadrv && iadrv->event_handler) iadrv->event_handler(pf, event); device_unlock(&pf->adev->dev); +finish: + mutex_unlock(&pf->adev_mutex); }
- /** - * ice_find_vsi - Find the VSI from VSI ID - * @pf: The PF pointer to search in - * @vsi_num: The VSI ID to search for - */ - static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num) - { - int i; - - ice_for_each_vsi(pf, i) - if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num) - return pf->vsi[i]; - return NULL; - } - /** * ice_add_rdma_qset - Add Leaf Node for RDMA Qset * @pf: PF struct @@@ -293,6 -275,7 +278,6 @@@ int ice_plug_aux_dev(struct ice_pf *pf return -ENOMEM;
adev = &iadev->adev; - pf->adev = adev; iadev->pf = pf;
adev->id = pf->aux_idx; @@@ -302,20 -285,18 +287,20 @@@
ret = auxiliary_device_init(adev); if (ret) { - pf->adev = NULL; kfree(iadev); return ret; }
ret = auxiliary_device_add(adev); if (ret) { - pf->adev = NULL; auxiliary_device_uninit(adev); return ret; }
+ mutex_lock(&pf->adev_mutex); + pf->adev = adev; + mutex_unlock(&pf->adev_mutex); + return 0; }
@@@ -324,17 -305,12 +309,17 @@@ */ void ice_unplug_aux_dev(struct ice_pf *pf) { - if (!pf->adev) - return; + struct auxiliary_device *adev;
- auxiliary_device_delete(pf->adev); - auxiliary_device_uninit(pf->adev); + mutex_lock(&pf->adev_mutex); + adev = pf->adev; pf->adev = NULL; + mutex_unlock(&pf->adev_mutex); + + if (adev) { + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); + } }
/** diff --combined drivers/net/ethernet/intel/ice/ice_main.c index 949669fed7d6,867908f94661..4a5d4d971161 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@@ -296,6 -296,20 +296,20 @@@ static int ice_clear_promisc(struct ice return status; }
+ /** + * ice_get_devlink_port - Get devlink port from netdev + * @netdev: the netdevice structure + */ + static struct devlink_port *ice_get_devlink_port(struct net_device *netdev) + { + struct ice_pf *pf = ice_netdev_to_pf(netdev); + + if (!ice_is_switchdev_running(pf)) + return NULL; + + return &pf->devlink_port; + } + /** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI @@@ -3336,7 -3350,9 +3350,9 @@@ static void ice_set_netdev_features(str vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */ - netdev->mpls_features = NETIF_F_HW_CSUM; + netdev->mpls_features = NETIF_F_HW_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6;
/* enable features */ netdev->features |= netdev->hw_features; @@@ -3769,7 -3785,6 +3785,7 @@@ u16 ice_get_avail_rxq_count(struct ice_ static void ice_deinit_pf(struct ice_pf *pf) { ice_service_task_stop(pf); + mutex_destroy(&pf->adev_mutex); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->tc_mutex); mutex_destroy(&pf->avail_q_mutex); @@@ -3848,7 -3863,6 +3864,7 @@@ static int ice_init_pf(struct ice_pf *p
mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); + mutex_init(&pf->adev_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list); spin_lock_init(&pf->aq_wait_lock); @@@ -5674,11 -5688,12 +5690,12 @@@ ice_fdb_add(struct ndmsg *ndm, struct n * @dev: the net device pointer * @addr: the MAC address entry being added * @vid: VLAN ID + * @extack: netlink extended ack */ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - __always_unused u16 vid) + __always_unused u16 vid, struct netlink_ext_ack *extack) { int err;
@@@ -8926,4 -8941,5 +8943,5 @@@ static const struct net_device_ops ice_ .ndo_bpf = ice_xdp, .ndo_xdp_xmit = ice_xdp_xmit, .ndo_xsk_wakeup = ice_xsk_wakeup, + .ndo_get_devlink_port = ice_get_devlink_port, }; diff --combined drivers/net/ethernet/intel/ice/ice_virtchnl.c index 2889e050a4c9,b47577a2841a..1d9b84c3937a --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@@ -514,24 -514,6 +514,6 @@@ static void ice_vc_reset_vf_msg(struct ice_reset_vf(vf, 0); }
- /** - * ice_find_vsi_from_id - * @pf: the PF structure to search for the VSI - * @id: ID of the VSI it is searching for - * - * searches for the VSI with the given ID - */ - static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id) - { - int i; - - ice_for_each_vsi(pf, i) - if (pf->vsi[i] && pf->vsi[i]->vsi_num == id) - return pf->vsi[i]; - - return NULL; - } - /** * ice_vc_isvalid_vsi_id * @vf: pointer to the VF info @@@ -544,7 -526,7 +526,7 @@@ bool ice_vc_isvalid_vsi_id(struct ice_v struct ice_pf *pf = vf->pf; struct ice_vsi *vsi;
- vsi = ice_find_vsi_from_id(pf, vsi_id); + vsi = ice_find_vsi(pf, vsi_id);
return (vsi && (vsi->vf == vf)); } @@@ -559,7 -541,7 +541,7 @@@ */ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) { - struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id); + struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id); /* allocated Tx and Rx queues should be always equal for VF VSI */ return (vsi && (qid < vsi->alloc_txq)); } @@@ -1307,52 -1289,13 +1289,52 @@@ error_param NULL, 0); }
+/** + * ice_vf_vsi_dis_single_txq - disable a single Tx queue + * @vf: VF to disable queue for + * @vsi: VSI for the VF + * @q_id: VF relative (0-based) queue ID + * + * Attempt to disable the Tx queue passed in. If the Tx queue was successfully + * disabled then clear q_id bit in the enabled queues bitmap and return + * success. Otherwise return error. + */ +static int +ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id) +{ + struct ice_txq_meta txq_meta = { 0 }; + struct ice_tx_ring *ring; + int err; + + if (!test_bit(q_id, vf->txq_ena)) + dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", + q_id, vsi->vsi_num); + + ring = vsi->tx_rings[q_id]; + if (!ring) + return -EINVAL; + + ice_fill_txq_meta(vsi, ring, &txq_meta); + + err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", + q_id, vsi->vsi_num); + return err; + } + + /* Clear enabled queues flag */ + clear_bit(q_id, vf->txq_ena); + + return 0; +} + /** * ice_vc_dis_qs_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer * - * called from the VF to disable all or specific - * queue(s) + * called from the VF to disable all or specific queue(s) */ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) { @@@ -1389,15 -1332,30 +1371,15 @@@ q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id]; - struct ice_txq_meta txq_meta = { 0 }; - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; }
- if (!test_bit(vf_q_id, vf->txq_ena)) - dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n", - vf_q_id, vsi->vsi_num); - - ice_fill_txq_meta(vsi, ring, &txq_meta); - - if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, - ring, &txq_meta)) { - dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n", - vf_q_id, vsi->vsi_num); + if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } - - /* Clear enabled queues flag */ - clear_bit(vf_q_id, vf->txq_ena); } }
@@@ -1646,14 -1604,6 +1628,14 @@@ static int ice_vc_cfg_qs_msg(struct ice if (qpi->txq.ring_len > 0) { vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; vsi->tx_rings[i]->count = qpi->txq.ring_len; + + /* Disable any existing queue first */ + if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + + /* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; @@@ -2392,6 -2342,11 +2374,11 @@@ static int ice_vc_ena_vlan_stripping(st }
vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto error_param; + } + if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q)) v_ret = VIRTCHNL_STATUS_ERR_PARAM;
diff --combined drivers/net/ethernet/mscc/ocelot.c index 20ceac81a2c2,9336f3b00c6e..5f81938c58a9 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@@ -1622,7 -1622,7 +1622,7 @@@ int ocelot_trap_add(struct ocelot *ocel trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; trap->action.port_mask = 0; trap->take_ts = take_ts; - list_add_tail(&trap->trap_list, &ocelot->traps); + trap->is_trap = true; new = true; }
@@@ -1634,8 -1634,10 +1634,8 @@@ err = ocelot_vcap_filter_replace(ocelot, trap); if (err) { trap->ingress_port_mask &= ~BIT(port); - if (!trap->ingress_port_mask) { - list_del(&trap->trap_list); + if (!trap->ingress_port_mask) kfree(trap); - } return err; }
@@@ -1655,8 -1657,11 +1655,8 @@@ int ocelot_trap_del(struct ocelot *ocel return 0;
trap->ingress_port_mask &= ~BIT(port); - if (!trap->ingress_port_mask) { - list_del(&trap->trap_list); - + if (!trap->ingress_port_mask) return ocelot_vcap_filter_del(ocelot, trap); - }
return ocelot_vcap_filter_replace(ocelot, trap); } @@@ -2605,6 -2610,67 +2605,67 @@@ static void ocelot_setup_logical_port_i } }
+ static int ocelot_migrate_mc(struct ocelot *ocelot, struct ocelot_multicast *mc, + unsigned long from_mask, unsigned long to_mask) + { + unsigned char addr[ETH_ALEN]; + struct ocelot_pgid *pgid; + u16 vid = mc->vid; + + dev_dbg(ocelot->dev, + "Migrating multicast %pM vid %d from port mask 0x%lx to 0x%lx\n", + mc->addr, mc->vid, from_mask, to_mask); + + /* First clean up the current port mask from hardware, because + * we'll be modifying it. + */ + ocelot_pgid_free(ocelot, mc->pgid); + ocelot_encode_ports_to_mdb(addr, mc); + ocelot_mact_forget(ocelot, addr, vid); + + mc->ports &= ~from_mask; + mc->ports |= to_mask; + + pgid = ocelot_mdb_get_pgid(ocelot, mc); + if (IS_ERR(pgid)) { + dev_err(ocelot->dev, + "Cannot allocate PGID for mdb %pM vid %d\n", + mc->addr, mc->vid); + devm_kfree(ocelot->dev, mc); + return PTR_ERR(pgid); + } + mc->pgid = pgid; + + ocelot_encode_ports_to_mdb(addr, mc); + + if (mc->entry_type != ENTRYTYPE_MACv4 && + mc->entry_type != ENTRYTYPE_MACv6) + ocelot_write_rix(ocelot, pgid->ports, ANA_PGID_PGID, + pgid->index); + + return ocelot_mact_learn(ocelot, pgid->index, addr, vid, + mc->entry_type); + } + + int ocelot_migrate_mdbs(struct ocelot *ocelot, unsigned long from_mask, + unsigned long to_mask) + { + struct ocelot_multicast *mc; + int err; + + list_for_each_entry(mc, &ocelot->multicast, list) { + if (!(mc->ports & from_mask)) + continue; + + err = ocelot_migrate_mc(ocelot, mc, from_mask, to_mask); + if (err) + return err; + } + + return 0; + } + EXPORT_SYMBOL_GPL(ocelot_migrate_mdbs); + /* Documentation for PORTID_VAL says: * Logical port number for front port. If port is not a member of a LLAG, * then PORTID must be set to the physical port number. @@@ -3223,6 -3289,7 +3284,7 @@@ static void ocelot_detect_features(stru
int ocelot_init(struct ocelot *ocelot) { + const struct ocelot_stat_layout *stat; char queue_name[32]; int i, ret; u32 port; @@@ -3235,6 -3302,10 +3297,10 @@@ } }
+ ocelot->num_stats = 0; + for_each_stat(ocelot, stat) + ocelot->num_stats++; + ocelot->stats = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports * ocelot->num_stats, sizeof(u64), GFP_KERNEL); diff --combined drivers/net/ethernet/mscc/ocelot_vcap.c index eeb4cc07dd16,cdbe29f2ddc7..73cdec5ca6a3 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@@ -374,6 -374,7 +374,6 @@@ static void is2_entry_set(struct ocelo OCELOT_VCAP_BIT_0); vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0, ~filter->ingress_port_mask); - vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY); vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH, OCELOT_VCAP_BIT_ANY); vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc); @@@ -671,12 -672,10 +671,10 @@@ static void is1_entry_set(struct ocelo { const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1]; struct ocelot_vcap_key_vlan *tag = &filter->vlan; - struct ocelot_vcap_u64 payload; struct vcap_data data; int row = ix / 2; u32 type;
- memset(&payload, 0, sizeof(payload)); memset(&data, 0, sizeof(data));
/* Read row */ @@@ -812,11 -811,9 +810,9 @@@ static void es0_entry_set(struct ocelo { const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0]; struct ocelot_vcap_key_vlan *tag = &filter->vlan; - struct ocelot_vcap_u64 payload; struct vcap_data data; int row = ix;
- memset(&payload, 0, sizeof(payload)); memset(&data, 0, sizeof(data));
/* Read row */ @@@ -917,7 -914,7 +913,7 @@@ int ocelot_vcap_policer_add(struct ocel if (!tmp) return -ENOMEM;
- ret = qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + ret = qos_policer_conf_set(ocelot, pol_ix, &pp); if (ret) { kfree(tmp); return ret; @@@ -948,7 -945,7 +944,7 @@@ int ocelot_vcap_policer_del(struct ocel
if (z) { pp.mode = MSCC_QOS_RATE_MODE_DISABLED; - return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + return qos_policer_conf_set(ocelot, pol_ix, &pp); }
return 0; @@@ -996,8 -993,8 +992,8 @@@ static int ocelot_vcap_filter_add_to_bl struct ocelot_vcap_filter *filter, struct netlink_ext_ack *extack) { + struct list_head *pos = &block->rules; struct ocelot_vcap_filter *tmp; - struct list_head *pos, *n; int ret;
ret = ocelot_vcap_filter_add_aux_resources(ocelot, filter, extack); @@@ -1006,17 -1003,13 +1002,13 @@@
block->count++;
- if (list_empty(&block->rules)) { - list_add(&filter->list, &block->rules); - return 0; - } - - list_for_each_safe(pos, n, &block->rules) { - tmp = list_entry(pos, struct ocelot_vcap_filter, list); - if (filter->prio < tmp->prio) + list_for_each_entry(tmp, &block->rules, list) { + if (filter->prio < tmp->prio) { + pos = &tmp->list; break; + } } - list_add(&filter->list, pos->prev); + list_add_tail(&filter->list, pos);
return 0; } @@@ -1216,8 -1209,6 +1208,8 @@@ int ocelot_vcap_filter_add(struct ocelo struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i); + /* Read back the filter's counters before moving it */ + vcap_entry_get(ocelot, i - 1, tmp); vcap_entry_set(ocelot, i, tmp); }
@@@ -1251,11 -1242,7 +1243,11 @@@ int ocelot_vcap_filter_del(struct ocelo struct ocelot_vcap_filter del_filter; int i, index;
+ /* Need to inherit the block_id so that vcap_entry_set() + * does not get confused and knows where to install it. + */ memset(&del_filter, 0, sizeof(del_filter)); + del_filter.block_id = filter->block_id;
/* Gets index of the filter */ index = ocelot_vcap_block_get_filter_index(block, filter); @@@ -1270,8 -1257,6 +1262,8 @@@ struct ocelot_vcap_filter *tmp;
tmp = ocelot_vcap_block_find_filter_by_index(block, i); + /* Read back the filter's counters before moving it */ + vcap_entry_get(ocelot, i + 1, tmp); vcap_entry_set(ocelot, i, tmp); }
@@@ -1409,22 -1394,18 +1401,18 @@@ static void ocelot_vcap_detect_constant
int ocelot_vcap_init(struct ocelot *ocelot) { - int i; + struct qos_policer_conf cpu_drop = { + .mode = MSCC_QOS_RATE_MODE_DATA, + }; + int ret, i;
/* Create a policer that will drop the frames for the cpu. * This policer will be used as action in the acl rules to drop * frames. */ - ocelot_write_gix(ocelot, 0x299, ANA_POL_MODE_CFG, - OCELOT_POLICER_DISCARD); - ocelot_write_gix(ocelot, 0x1, ANA_POL_PIR_CFG, - OCELOT_POLICER_DISCARD); - ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_PIR_STATE, - OCELOT_POLICER_DISCARD); - ocelot_write_gix(ocelot, 0x0, ANA_POL_CIR_CFG, - OCELOT_POLICER_DISCARD); - ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE, - OCELOT_POLICER_DISCARD); + ret = qos_policer_conf_set(ocelot, OCELOT_POLICER_DISCARD, &cpu_drop); + if (ret) + return ret;
for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) { struct ocelot_vcap_block *block = &ocelot->block[i]; diff --combined drivers/net/ethernet/sfc/efx_channels.c index 40df910aa140,3f28f9861dfa..79df636d6df8 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@@ -51,28 -51,7 +51,7 @@@ MODULE_PARM_DESC(irq_adapt_high_thresh */ static int napi_weight = 64;
- /*************** - * Housekeeping - ***************/ - - int efx_channel_dummy_op_int(struct efx_channel *channel) - { - return 0; - } - - void efx_channel_dummy_op_void(struct efx_channel *channel) - { - } - - static const struct efx_channel_type efx_default_channel_type = { - .pre_probe = efx_channel_dummy_op_int, - .post_remove = efx_channel_dummy_op_void, - .get_name = efx_get_channel_name, - .copy = efx_copy_channel, - .want_txqs = efx_default_channel_want_txqs, - .keep_eventq = false, - .want_pio = true, - }; + static const struct efx_channel_type efx_default_channel_type;
/************* * INTERRUPTS @@@ -619,6 -598,7 +598,6 @@@ void efx_fini_channels(struct efx_nic * /* Allocate and initialise a channel structure, copying parameters * (but not resources) from an old channel structure. */ -static struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) { struct efx_rx_queue *rx_queue; @@@ -696,7 -676,8 +675,8 @@@ fail return rc; }
- void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len) + static void efx_get_channel_name(struct efx_channel *channel, char *buf, + size_t len) { struct efx_nic *efx = channel->efx; const char *type; @@@ -867,9 -848,7 +847,9 @@@ static void efx_set_xdp_channels(struc
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) { - struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; + struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel, + *ptp_channel = efx_ptp_channel(efx); + struct efx_ptp_data *ptp_data = efx->ptp_data; unsigned int i, next_buffer_table = 0; u32 old_rxq_entries, old_txq_entries; int rc, rc2; @@@ -940,7 -919,6 +920,7 @@@
efx_set_xdp_channels(efx); out: + efx->ptp_data = NULL; /* Destroy unused channel structures */ for (i = 0; i < efx->n_channels; i++) { channel = other_channel[i]; @@@ -951,7 -929,6 +931,7 @@@ } }
+ efx->ptp_data = ptp_data; rc2 = efx_soft_enable_interrupts(efx); if (rc2) { rc = rc ? rc : rc2; @@@ -970,7 -947,6 +950,7 @@@ rollback efx->txq_entries = old_txq_entries; for (i = 0; i < efx->n_channels; i++) swap(efx->channel[i], other_channel[i]); + efx_ptp_update_channel(efx, ptp_channel); goto out; }
@@@ -1009,7 -985,7 +989,7 @@@ int efx_set_channels(struct efx_nic *ef return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); }
- bool efx_default_channel_want_txqs(struct efx_channel *channel) + static bool efx_default_channel_want_txqs(struct efx_channel *channel) { return channel->channel - channel->efx->tx_channel_offset < channel->efx->n_tx_channels; @@@ -1340,8 -1316,8 +1320,8 @@@ void efx_init_napi_channel(struct efx_c struct efx_nic *efx = channel->efx;
channel->napi_dev = efx->net_dev; - netif_napi_add(channel->napi_dev, &channel->napi_str, - efx_poll, napi_weight); + netif_napi_add_weight(channel->napi_dev, &channel->napi_str, efx_poll, + napi_weight); }
void efx_init_napi(struct efx_nic *efx) @@@ -1367,3 -1343,26 +1347,26 @@@ void efx_fini_napi(struct efx_nic *efx efx_for_each_channel(channel, efx) efx_fini_napi_channel(channel); } + + /*************** + * Housekeeping + ***************/ + + static int efx_channel_dummy_op_int(struct efx_channel *channel) + { + return 0; + } + + void efx_channel_dummy_op_void(struct efx_channel *channel) + { + } + + static const struct efx_channel_type efx_default_channel_type = { + .pre_probe = efx_channel_dummy_op_int, + .post_remove = efx_channel_dummy_op_void, + .get_name = efx_get_channel_name, + .copy = efx_copy_channel, + .want_txqs = efx_default_channel_want_txqs, + .keep_eventq = false, + .want_pio = true, + }; diff --combined drivers/net/ethernet/sfc/efx_channels.h index d77ec1f77fb1,64abb99a56b8..46b702648721 --- a/drivers/net/ethernet/sfc/efx_channels.h +++ b/drivers/net/ethernet/sfc/efx_channels.h @@@ -32,16 -32,13 +32,14 @@@ void efx_fini_eventq(struct efx_channe void efx_remove_eventq(struct efx_channel *channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); - void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len); void efx_set_channel_names(struct efx_nic *efx); int efx_init_channels(struct efx_nic *efx); int efx_probe_channels(struct efx_nic *efx); int efx_set_channels(struct efx_nic *efx); - bool efx_default_channel_want_txqs(struct efx_channel *channel); void efx_remove_channel(struct efx_channel *channel); void efx_remove_channels(struct efx_nic *efx); void efx_fini_channels(struct efx_nic *efx); +struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel); void efx_start_channels(struct efx_nic *efx); void efx_stop_channels(struct efx_nic *efx);
@@@ -50,7 -47,6 +48,6 @@@ void efx_init_napi(struct efx_nic *efx) void efx_fini_napi_channel(struct efx_channel *channel); void efx_fini_napi(struct efx_nic *efx);
- int efx_channel_dummy_op_int(struct efx_channel *channel); void efx_channel_dummy_op_void(struct efx_channel *channel);
#endif diff --combined drivers/net/phy/micrel.c index cd9aa353b653,685a0ab5453c..c34a93403d1e --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@@ -32,6 -32,7 +32,7 @@@ #include <linux/ptp_clock.h> #include <linux/ptp_classify.h> #include <linux/net_tstamp.h> + #include <linux/gpio/consumer.h>
/* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@@ -70,6 -71,27 +71,27 @@@ #define KSZ8081_LMD_SHORT_INDICATOR BIT(12) #define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+ #define KSZ9x31_LMD 0x12 + #define KSZ9x31_LMD_VCT_EN BIT(15) + #define KSZ9x31_LMD_VCT_DIS_TX BIT(14) + #define KSZ9x31_LMD_VCT_PAIR(n) (((n) & 0x3) << 12) + #define KSZ9x31_LMD_VCT_SEL_RESULT 0 + #define KSZ9x31_LMD_VCT_SEL_THRES_HI BIT(10) + #define KSZ9x31_LMD_VCT_SEL_THRES_LO BIT(11) + #define KSZ9x31_LMD_VCT_SEL_MASK GENMASK(11, 10) + #define KSZ9x31_LMD_VCT_ST_NORMAL 0 + #define KSZ9x31_LMD_VCT_ST_OPEN 1 + #define KSZ9x31_LMD_VCT_ST_SHORT 2 + #define KSZ9x31_LMD_VCT_ST_FAIL 3 + #define KSZ9x31_LMD_VCT_ST_MASK GENMASK(9, 8) + #define KSZ9x31_LMD_VCT_DATA_REFLECTED_INVALID BIT(7) + #define KSZ9x31_LMD_VCT_DATA_SIG_WAIT_TOO_LONG BIT(6) + #define KSZ9x31_LMD_VCT_DATA_MASK100 BIT(5) + #define KSZ9x31_LMD_VCT_DATA_NLP_FLP BIT(4) + #define KSZ9x31_LMD_VCT_DATA_LO_PULSE_MASK GENMASK(3, 2) + #define KSZ9x31_LMD_VCT_DATA_HI_PULSE_MASK GENMASK(1, 0) + #define KSZ9x31_LMD_VCT_DATA_MASK GENMASK(7, 0) + /* Lan8814 general Interrupt control/status reg in GPHY specific block. */ #define LAN8814_INTC 0x18 #define LAN8814_INTS 0x1B @@@ -280,6 -302,7 +302,7 @@@ struct kszphy_priv struct kszphy_ptp_priv ptp_priv; const struct kszphy_type *type; int led_mode; + u16 vct_ctrl1000; bool rmii_ref_clk_sel; bool rmii_ref_clk_sel_val; u64 stats[ARRAY_SIZE(kszphy_hw_stats)]; @@@ -1326,6 -1349,199 +1349,199 @@@ static int ksz9031_read_status(struct p return 0; }
+ static int ksz9x31_cable_test_start(struct phy_device *phydev) + { + struct kszphy_priv *priv = phydev->priv; + int ret; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * Prior to running the cable diagnostics, Auto-negotiation should + * be disabled, full duplex set and the link speed set to 1000Mbps + * via the Basic Control Register. + */ + ret = phy_modify(phydev, MII_BMCR, + BMCR_SPEED1000 | BMCR_FULLDPLX | + BMCR_ANENABLE | BMCR_SPEED100, + BMCR_SPEED1000 | BMCR_FULLDPLX); + if (ret) + return ret; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * The Master-Slave configuration should be set to Slave by writing + * a value of 0x1000 to the Auto-Negotiation Master Slave Control + * Register. + */ + ret = phy_read(phydev, MII_CTRL1000); + if (ret < 0) + return ret; + + /* Cache these bits, they need to be restored once LinkMD finishes. */ + priv->vct_ctrl1000 = ret & (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + ret &= ~(CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + ret |= CTL1000_ENABLE_MASTER; + + return phy_write(phydev, MII_CTRL1000, ret); + } + + static int ksz9x31_cable_test_result_trans(u16 status) + { + switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) { + case KSZ9x31_LMD_VCT_ST_NORMAL: + return ETHTOOL_A_CABLE_RESULT_CODE_OK; + case KSZ9x31_LMD_VCT_ST_OPEN: + return ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + case KSZ9x31_LMD_VCT_ST_SHORT: + return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + case KSZ9x31_LMD_VCT_ST_FAIL: + fallthrough; + default: + return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } + } + + static bool ksz9x31_cable_test_failed(u16 status) + { + int stat = FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status); + + return stat == KSZ9x31_LMD_VCT_ST_FAIL; + } + + static bool ksz9x31_cable_test_fault_length_valid(u16 status) + { + switch (FIELD_GET(KSZ9x31_LMD_VCT_ST_MASK, status)) { + case KSZ9x31_LMD_VCT_ST_OPEN: + fallthrough; + case KSZ9x31_LMD_VCT_ST_SHORT: + return true; + } + return false; + } + + static int ksz9x31_cable_test_fault_length(struct phy_device *phydev, u16 stat) + { + int dt = FIELD_GET(KSZ9x31_LMD_VCT_DATA_MASK, stat); + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * + * distance to fault = (VCT_DATA - 22) * 4 / cable propagation velocity + */ + if ((phydev->phy_id & MICREL_PHY_ID_MASK) == PHY_ID_KSZ9131) + dt = clamp(dt - 22, 0, 255); + + return (dt * 400) / 10; + } + + static int ksz9x31_cable_test_wait_for_completion(struct phy_device *phydev) + { + int val, ret; + + ret = phy_read_poll_timeout(phydev, KSZ9x31_LMD, val, + !(val & KSZ9x31_LMD_VCT_EN), + 30000, 100000, true); + + return ret < 0 ? ret : 0; + } + + static int ksz9x31_cable_test_get_pair(int pair) + { + static const int ethtool_pair[] = { + ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_PAIR_B, + ETHTOOL_A_CABLE_PAIR_C, + ETHTOOL_A_CABLE_PAIR_D, + }; + + return ethtool_pair[pair]; + } + + static int ksz9x31_cable_test_one_pair(struct phy_device *phydev, int pair) + { + int ret, val; + + /* KSZ9131RNX, DS00002841B-page 38, 4.14 LinkMD (R) Cable Diagnostic + * To test each individual cable pair, set the cable pair in the Cable + * Diagnostics Test Pair (VCT_PAIR[1:0]) field of the LinkMD Cable + * Diagnostic Register, along with setting the Cable Diagnostics Test + * Enable (VCT_EN) bit. The Cable Diagnostics Test Enable (VCT_EN) bit + * will self clear when the test is concluded. + */ + ret = phy_write(phydev, KSZ9x31_LMD, + KSZ9x31_LMD_VCT_EN | KSZ9x31_LMD_VCT_PAIR(pair)); + if (ret) + return ret; + + ret = ksz9x31_cable_test_wait_for_completion(phydev); + if (ret) + return ret; + + val = phy_read(phydev, KSZ9x31_LMD); + if (val < 0) + return val; + + if (ksz9x31_cable_test_failed(val)) + return -EAGAIN; + + ret = ethnl_cable_test_result(phydev, + ksz9x31_cable_test_get_pair(pair), + ksz9x31_cable_test_result_trans(val)); + if (ret) + return ret; + + if (!ksz9x31_cable_test_fault_length_valid(val)) + return 0; + + return ethnl_cable_test_fault_length(phydev, + ksz9x31_cable_test_get_pair(pair), + ksz9x31_cable_test_fault_length(phydev, val)); + } + + static int ksz9x31_cable_test_get_status(struct phy_device *phydev, + bool *finished) + { + struct kszphy_priv *priv = phydev->priv; + unsigned long pair_mask = 0xf; + int retries = 20; + int pair, ret, rv; + + *finished = false; + + /* Try harder if link partner is active */ + while (pair_mask && retries--) { + for_each_set_bit(pair, &pair_mask, 4) { + ret = ksz9x31_cable_test_one_pair(phydev, pair); + if (ret == -EAGAIN) + continue; + if (ret < 0) + return ret; + clear_bit(pair, &pair_mask); + } + /* If link partner is in autonegotiation mode it will send 2ms + * of FLPs with at least 6ms of silence. + * Add 2ms sleep to have better chances to hit this silence. + */ + if (pair_mask) + usleep_range(2000, 3000); + } + + /* Report remaining unfinished pair result as unknown. */ + for_each_set_bit(pair, &pair_mask, 4) { + ret = ethnl_cable_test_result(phydev, + ksz9x31_cable_test_get_pair(pair), + ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC); + } + + *finished = true; + + /* Restore cached bits from before LinkMD got started. */ + rv = phy_modify(phydev, MII_CTRL1000, + CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER, + priv->vct_ctrl1000); + if (rv) + return rv; + + return ret; + } + static int ksz8873mll_config_aneg(struct phy_device *phydev) { return 0; @@@ -1743,7 -1959,7 +1959,7 @@@ static int ksz886x_cable_test_get_statu
static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr) { - u32 data; + int data;
phy_lock_mdio_bus(phydev); __phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page); @@@ -2444,7 -2660,8 +2660,7 @@@ static int lan8804_config_init(struct p
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev) { - u16 tsu_irq_status; - int irq_status; + int irq_status, tsu_irq_status;
irq_status = phy_read(phydev, LAN8814_INTS); if (irq_status > 0 && (irq_status & LAN8814_INT_LINK)) @@@ -2513,6 -2730,10 +2729,10 @@@ static void lan8814_ptp_init(struct phy struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv; u32 temp;
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || + !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) + return; + lanphy_write_page_reg(phydev, 5, TSU_HARD_RESET, TSU_HARD_RESET_);
temp = lanphy_read_page_reg(phydev, 5, PTP_TX_MOD); @@@ -2551,6 -2772,10 +2771,10 @@@ static int lan8814_ptp_probe_once(struc { struct lan8814_shared_priv *shared = phydev->shared->priv;
+ if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || + !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) + return 0; + /* Initialise shared lock for clock*/ mutex_init(&shared->shared_lock);
@@@ -2613,6 -2838,21 +2837,21 @@@ static int lan8814_config_init(struct p return 0; }
+ static int lan8814_release_coma_mode(struct phy_device *phydev) + { + struct gpio_desc *gpiod; + + gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode", + GPIOD_OUT_HIGH_OPEN_DRAIN); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + + gpiod_set_consumer_name(gpiod, "LAN8814 coma mode"); + gpiod_set_value_cansleep(gpiod, 0); + + return 0; + } + static int lan8814_probe(struct phy_device *phydev) { struct kszphy_priv *priv; @@@ -2627,10 -2867,6 +2866,6 @@@
phydev->priv = priv;
- if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) || - !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) - return 0; - /* Strap-in value for PHY address, below register read gives starting * phy address value */ @@@ -2639,6 -2875,10 +2874,10 @@@ addr, sizeof(struct lan8814_shared_priv));
if (phy_package_init_once(phydev)) { + err = lan8814_release_coma_mode(phydev); + if (err) + return err; + err = lan8814_ptp_probe_once(phydev); if (err) return err; @@@ -2656,7 -2896,6 +2895,7 @@@ static struct phy_driver ksphy_driver[ .name = "Micrel KS8737", /* PHY_BASIC_FEATURES */ .driver_data = &ks8737_type, + .probe = kszphy_probe, .config_init = kszphy_config_init, .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, @@@ -2782,8 -3021,8 +3021,8 @@@ .config_init = ksz8061_config_init, .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, - .suspend = kszphy_suspend, - .resume = kszphy_resume, + .suspend = genphy_suspend, + .resume = genphy_resume, }, { .phy_id = PHY_ID_KSZ9021, .phy_id_mask = 0x000ffffe, @@@ -2806,6 -3045,7 +3045,7 @@@ .phy_id = PHY_ID_KSZ9031, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ9031 Gigabit PHY", + .flags = PHY_POLL_CABLE_TEST, .driver_data = &ksz9021_type, .probe = kszphy_probe, .get_features = ksz9031_get_features, @@@ -2819,6 -3059,8 +3059,8 @@@ .get_stats = kszphy_get_stats, .suspend = kszphy_suspend, .resume = kszphy_resume, + .cable_test_start = ksz9x31_cable_test_start, + .cable_test_get_status = ksz9x31_cable_test_get_status, }, { .phy_id = PHY_ID_LAN8814, .phy_id_mask = MICREL_PHY_ID_MASK, @@@ -2853,6 -3095,7 +3095,7 @@@ .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9131 Gigabit PHY", /* PHY_GBIT_FEATURES */ + .flags = PHY_POLL_CABLE_TEST, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9131_config_init, @@@ -2863,6 -3106,8 +3106,8 @@@ .get_stats = kszphy_get_stats, .suspend = kszphy_suspend, .resume = kszphy_resume, + .cable_test_start = ksz9x31_cable_test_start, + .cable_test_get_status = ksz9x31_cable_test_get_status, }, { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = MICREL_PHY_ID_MASK, diff --combined drivers/net/phy/phy.c index f122026c4682,9034c6a8e18f..ef62f357b76d --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@@ -295,20 -295,20 +295,20 @@@ int phy_mii_ioctl(struct phy_device *ph if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - devad = mdiobus_c45_addr(devad, mii_data->reg_num); + mii_data->val_out = mdiobus_c45_read( + phydev->mdio.bus, prtad, devad, + mii_data->reg_num); } else { - prtad = mii_data->phy_id; - devad = mii_data->reg_num; + mii_data->val_out = mdiobus_read( + phydev->mdio.bus, mii_data->phy_id, + mii_data->reg_num); } - mii_data->val_out = mdiobus_read(phydev->mdio.bus, prtad, - devad); return 0;
case SIOCSMIIREG: if (mdio_phy_id_is_c45(mii_data->phy_id)) { prtad = mdio_phy_id_prtad(mii_data->phy_id); devad = mdio_phy_id_devad(mii_data->phy_id); - devad = mdiobus_c45_addr(devad, mii_data->reg_num); } else { prtad = mii_data->phy_id; devad = mii_data->reg_num; @@@ -351,7 -351,11 +351,11 @@@ } }
- mdiobus_write(phydev->mdio.bus, prtad, devad, val); + if (mdio_phy_id_is_c45(mii_data->phy_id)) + mdiobus_c45_write(phydev->mdio.bus, prtad, devad, + mii_data->reg_num, val); + else + mdiobus_write(phydev->mdio.bus, prtad, devad, val);
if (prtad == phydev->mdio.addr && devad == MII_BMCR && @@@ -970,13 -974,8 +974,13 @@@ static irqreturn_t phy_interrupt(int ir { struct phy_device *phydev = phy_dat; struct phy_driver *drv = phydev->drv; + irqreturn_t ret;
- return drv->handle_interrupt(phydev); + mutex_lock(&phydev->lock); + ret = drv->handle_interrupt(phydev); + mutex_unlock(&phydev->lock); + + return ret; }
/** diff --combined drivers/net/wireless/mac80211_hwsim.c index e9ec63e0e395,afdf48550588..2f746eb64507 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@@ -2189,7 -2189,7 +2189,7 @@@ mac80211_hwsim_sta_rc_update(struct iee u32 bw = U32_MAX; enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT;
- switch (sta->bandwidth) { + switch (sta->deflink.bandwidth) { #define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break C(20); C(40); @@@ -2202,19 -2202,16 +2202,19 @@@ if (!data->use_chanctx) { confbw = data->bw; } else { - struct ieee80211_chanctx_conf *chanctx_conf = - rcu_dereference(vif->chanctx_conf); + struct ieee80211_chanctx_conf *chanctx_conf; + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (!WARN_ON(!chanctx_conf)) confbw = chanctx_conf->def.width; + rcu_read_unlock(); }
WARN(bw > hwsim_get_chanwidth(confbw), "intf %pM: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n", - vif->addr, sta->addr, bw, sta->bandwidth, + vif->addr, sta->addr, bw, sta->deflink.bandwidth, hwsim_get_chanwidth(data->bw), data->bw); }
@@@ -2478,13 -2475,11 +2478,13 @@@ static void hw_scan_work(struct work_st if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len);
+ rcu_read_lock(); if (!ieee80211_tx_prepare_skb(hwsim->hw, hwsim->hw_scan_vif, probe, hwsim->tmp_chan->band, NULL)) { + rcu_read_unlock(); kfree_skb(probe); continue; } @@@ -2492,7 -2487,6 +2492,7 @@@ local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); + rcu_read_unlock(); local_bh_enable(); } } diff --combined net/core/skbuff.c index c90c74de90d5,15f7b6f99a8f..bd16e158b366 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -80,7 -80,6 +80,6 @@@ #include <linux/user_namespace.h> #include <linux/indirect_call_wrapper.h>
- #include "datagram.h" #include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init; @@@ -204,7 -203,7 +203,7 @@@ static void __build_skb_around(struct s skb_set_end_offset(skb, size); skb->mac_header = (typeof(skb->mac_header))~0U; skb->transport_header = (typeof(skb->transport_header))~0U; - + skb->alloc_cpu = raw_smp_processor_id(); /* make sure we initialize shinfo sequentially */ shinfo = skb_shinfo(skb); memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); @@@ -1037,6 -1036,7 +1036,7 @@@ static void __copy_skb_header(struct sk #ifdef CONFIG_NET_RX_BUSY_POLL CHECK_SKB_FIELD(napi_id); #endif + CHECK_SKB_FIELD(alloc_cpu); #ifdef CONFIG_XPS CHECK_SKB_FIELD(sender_cpu); #endif @@@ -1165,7 -1165,7 +1165,7 @@@ void mm_unaccount_pinned_pages(struct m } EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
- struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) + static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) { struct ubuf_info *uarg; struct sk_buff *skb; @@@ -1196,7 -1196,6 +1196,6 @@@
return uarg; } - EXPORT_SYMBOL_GPL(msg_zerocopy_alloc);
static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) { @@@ -1339,18 -1338,11 +1338,11 @@@ void msg_zerocopy_put_abort(struct ubuf } EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
- int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) - { - return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); - } - EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram); - int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct msghdr *msg, int len, struct ubuf_info *uarg) { struct ubuf_info *orig_uarg = skb_zcopy(skb); - struct iov_iter orig_iter = msg->msg_iter; int err, orig_len = skb->len;
/* An skb can only point to one uarg. This edge case happens when @@@ -1364,7 -1356,7 +1356,7 @@@ struct sock *save_sk = skb->sk;
/* Streams do not free skb on error. Reset to prev state. */ - msg->msg_iter = orig_iter; + iov_iter_revert(&msg->msg_iter, skb->len - orig_len); skb->sk = sk; ___pskb_trim(skb, orig_len); skb->sk = save_sk; @@@ -3897,7 -3889,7 +3889,7 @@@ struct sk_buff *skb_segment_list(struc unsigned int delta_len = 0; struct sk_buff *tail = NULL; struct sk_buff *nskb, *tmp; - int err; + int len_diff, err;
skb_push(skb, -skb_network_offset(skb) + offset);
@@@ -3937,11 -3929,9 +3929,11 @@@ skb_push(nskb, -skb_network_offset(nskb) + offset);
skb_release_head_state(nskb); + len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); __copy_skb_header(nskb, skb);
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); + nskb->transport_header += len_diff; skb_copy_from_linear_data_offset(skb, -tnl_hlen, nskb->data - tnl_hlen, offset + tnl_hlen); @@@ -5603,7 -5593,7 +5595,7 @@@ err_free } EXPORT_SYMBOL(skb_vlan_untag);
- int skb_ensure_writable(struct sk_buff *skb, int write_len) + int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len) { if (!pskb_may_pull(skb, write_len)) return -ENOMEM; @@@ -6488,3 -6478,51 +6480,51 @@@ free_now } EXPORT_SYMBOL(__skb_ext_put); #endif /* CONFIG_SKB_EXTENSIONS */ + + /** + * skb_attempt_defer_free - queue skb for remote freeing + * @skb: buffer + * + * Put @skb in a per-cpu list, using the cpu which + * allocated the skb/pages to reduce false sharing + * and memory zone spinlock contention. + */ + void skb_attempt_defer_free(struct sk_buff *skb) + { + int cpu = skb->alloc_cpu; + struct softnet_data *sd; + unsigned long flags; + bool kick; + + if (WARN_ON_ONCE(cpu >= nr_cpu_ids) || + !cpu_online(cpu) || + cpu == raw_smp_processor_id()) { + __kfree_skb(skb); + return; + } + + sd = &per_cpu(softnet_data, cpu); + /* We do not send an IPI or any signal. + * Remote cpu will eventually call skb_defer_free_flush() + */ + spin_lock_irqsave(&sd->defer_lock, flags); + skb->next = sd->defer_list; + /* Paired with READ_ONCE() in skb_defer_free_flush() */ + WRITE_ONCE(sd->defer_list, skb); + sd->defer_count++; + + /* kick every time queue length reaches 128. + * This should avoid blocking in smp_call_function_single_async(). + * This condition should hardly be bit under normal conditions, + * unless cpu suddenly stopped to receive NIC interrupts. + */ + kick = sd->defer_count == 128; + + spin_unlock_irqrestore(&sd->defer_lock, flags); + + /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU + * if we are unlucky enough (this seems very unlikely). + */ + if (unlikely(kick)) + smp_call_function_single_async(cpu, &sd->defer_csd); + } diff --combined net/dsa/port.c index bdccb613285d,48e5a309ca5c..ecf0395cbddd --- a/net/dsa/port.c +++ b/net/dsa/port.c @@@ -242,6 -242,59 +242,59 @@@ void dsa_port_disable(struct dsa_port * rtnl_unlock(); }
+ static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, + struct dsa_bridge bridge) + { + struct netlink_ext_ack extack = {0}; + bool change_vlan_filtering = false; + struct dsa_switch *ds = dp->ds; + bool vlan_filtering; + int err; + + if (ds->needs_standalone_vlan_filtering && + !br_vlan_enabled(bridge.dev)) { + change_vlan_filtering = true; + vlan_filtering = true; + } else if (!ds->needs_standalone_vlan_filtering && + br_vlan_enabled(bridge.dev)) { + change_vlan_filtering = true; + vlan_filtering = false; + } + + /* If the bridge was vlan_filtering, the bridge core doesn't trigger an + * event for changing vlan_filtering setting upon slave ports leaving + * it. That is a good thing, because that lets us handle it and also + * handle the case where the switch's vlan_filtering setting is global + * (not per port). When that happens, the correct moment to trigger the + * vlan_filtering callback is only when the last port leaves the last + * VLAN-aware bridge. + */ + if (change_vlan_filtering && ds->vlan_filtering_is_global) { + dsa_switch_for_each_port(dp, ds) { + struct net_device *br = dsa_port_bridge_dev_get(dp); + + if (br && br_vlan_enabled(br)) { + change_vlan_filtering = false; + break; + } + } + } + + if (!change_vlan_filtering) + return; + + err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); + if (extack._msg) { + dev_err(ds->dev, "port %d: %s\n", dp->index, + extack._msg); + } + if (err && err != -EOPNOTSUPP) { + dev_err(ds->dev, + "port %d failed to reset VLAN filtering to %d: %pe\n", + dp->index, vlan_filtering, ERR_PTR(err)); + } + } + static int dsa_port_inherit_brport_flags(struct dsa_port *dp, struct netlink_ext_ack *extack) { @@@ -313,7 -366,8 +366,8 @@@ static int dsa_port_switchdev_sync_attr return 0; }
- static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) + static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, + struct dsa_bridge bridge) { /* Configure the port for standalone mode (no address learning, * flood everything). @@@ -333,7 -387,7 +387,7 @@@ */ dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
- /* VLAN filtering is handled by dsa_switch_bridge_leave */ + dsa_port_reset_vlan_filtering(dp, bridge);
/* Ageing time may be global to the switch chip, so don't change it * here because we have no good reason (or value) to change it to. @@@ -405,9 -459,7 +459,7 @@@ int dsa_port_bridge_join(struct dsa_por struct netlink_ext_ack *extack) { struct dsa_notifier_bridge_info info = { - .tree_index = dp->ds->dst->index, - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .extack = extack, }; struct net_device *dev = dp->slave; @@@ -451,7 -503,6 +503,7 @@@ out_rollback_unoffload switchdev_bridge_port_unoffload(brport_dev, dp, &dsa_slave_switchdev_notifier, &dsa_slave_switchdev_blocking_notifier); + dsa_flush_workqueue(); out_rollback_unbridge: dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); out_rollback: @@@ -477,9 -528,7 +529,7 @@@ void dsa_port_pre_bridge_leave(struct d void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) { struct dsa_notifier_bridge_info info = { - .tree_index = dp->ds->dst->index, - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, }; int err;
@@@ -502,15 -551,14 +552,14 @@@ "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", dp->index, ERR_PTR(err));
- dsa_port_switchdev_unsync_attrs(dp); + dsa_port_switchdev_unsync_attrs(dp, info.bridge); }
int dsa_port_lag_change(struct dsa_port *dp, struct netdev_lag_lower_state_info *linfo) { struct dsa_notifier_lag_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, }; bool tx_enabled;
@@@ -579,8 -627,7 +628,7 @@@ int dsa_port_lag_join(struct dsa_port * struct netlink_ext_ack *extack) { struct dsa_notifier_lag_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .info = uinfo, }; struct net_device *bridge_dev; @@@ -625,8 -672,7 +673,7 @@@ void dsa_port_lag_leave(struct dsa_por { struct net_device *br = dsa_port_bridge_dev_get(dp); struct dsa_notifier_lag_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, }; int err;
@@@ -884,13 -930,10 +931,10 @@@ int dsa_port_vlan_msti(struct dsa_port return ds->ops->vlan_msti_set(ds, *dp->bridge, msti); }
- int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu, - bool targeted_match) + int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu) { struct dsa_notifier_mtu_info info = { - .sw_index = dp->ds->index, - .targeted_match = targeted_match, - .port = dp->index, + .dp = dp, .mtu = new_mtu, };
@@@ -901,8 -944,7 +945,7 @@@ int dsa_port_fdb_add(struct dsa_port *d u16 vid) { struct dsa_notifier_fdb_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .addr = addr, .vid = vid, .db = { @@@ -925,8 -967,7 +968,7 @@@ int dsa_port_fdb_del(struct dsa_port *d u16 vid) { struct dsa_notifier_fdb_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .addr = addr, .vid = vid, .db = { @@@ -946,8 -987,7 +988,7 @@@ static int dsa_port_host_fdb_add(struc struct dsa_db db) { struct dsa_notifier_fdb_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .addr = addr, .vid = vid, .db = db, @@@ -998,8 -1038,7 +1039,7 @@@ static int dsa_port_host_fdb_del(struc struct dsa_db db) { struct dsa_notifier_fdb_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .addr = addr, .vid = vid, .db = db, @@@ -1094,8 -1133,7 +1134,7 @@@ int dsa_port_mdb_add(const struct dsa_p const struct switchdev_obj_port_mdb *mdb) { struct dsa_notifier_mdb_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .mdb = mdb, .db = { .type = DSA_DB_BRIDGE, @@@ -1113,8 -1151,7 +1152,7 @@@ int dsa_port_mdb_del(const struct dsa_p const struct switchdev_obj_port_mdb *mdb) { struct dsa_notifier_mdb_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .mdb = mdb, .db = { .type = DSA_DB_BRIDGE, @@@ -1133,8 -1170,7 +1171,7 @@@ static int dsa_port_host_mdb_add(const struct dsa_db db) { struct dsa_notifier_mdb_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .mdb = mdb, .db = db, }; @@@ -1178,8 -1214,7 +1215,7 @@@ static int dsa_port_host_mdb_del(const struct dsa_db db) { struct dsa_notifier_mdb_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .mdb = mdb, .db = db, }; @@@ -1223,8 -1258,7 +1259,7 @@@ int dsa_port_vlan_add(struct dsa_port * struct netlink_ext_ack *extack) { struct dsa_notifier_vlan_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .vlan = vlan, .extack = extack, }; @@@ -1236,8 -1270,7 +1271,7 @@@ int dsa_port_vlan_del(struct dsa_port * const struct switchdev_obj_port_vlan *vlan) { struct dsa_notifier_vlan_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .vlan = vlan, };
@@@ -1249,8 -1282,7 +1283,7 @@@ int dsa_port_host_vlan_add(struct dsa_p struct netlink_ext_ack *extack) { struct dsa_notifier_vlan_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .vlan = vlan, .extack = extack, }; @@@ -1270,8 -1302,7 +1303,7 @@@ int dsa_port_host_vlan_del(struct dsa_p const struct switchdev_obj_port_vlan *vlan) { struct dsa_notifier_vlan_info info = { - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .vlan = vlan, }; struct dsa_port *cpu_dp = dp->cpu_dp; @@@ -1692,9 -1723,7 +1724,7 @@@ void dsa_port_hsr_leave(struct dsa_por int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast) { struct dsa_notifier_tag_8021q_vlan_info info = { - .tree_index = dp->ds->dst->index, - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .vid = vid, };
@@@ -1707,9 -1736,7 +1737,7 @@@ void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast) { struct dsa_notifier_tag_8021q_vlan_info info = { - .tree_index = dp->ds->dst->index, - .sw_index = dp->ds->index, - .port = dp->index, + .dp = dp, .vid = vid, }; int err; diff --combined net/ipv4/ping.c index aa9a11b20d18,5f8cad2978b3..1a43ca73f94d --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@@ -305,7 -305,6 +305,7 @@@ static int ping_check_bind_addr(struct struct net *net = sock_net(sk); if (sk->sk_family == AF_INET) { struct sockaddr_in *addr = (struct sockaddr_in *) uaddr; + u32 tb_id = RT_TABLE_LOCAL; int chk_addr_ret;
if (addr_len < sizeof(*addr)) @@@ -319,8 -318,7 +319,8 @@@ pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n", sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
- chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); + tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; + chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk), addr->sin_addr.s_addr, @@@ -357,14 -355,6 +357,14 @@@ return -ENODEV; } } + + if (!dev && sk->sk_bound_dev_if) { + dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); + if (!dev) { + rcu_read_unlock(); + return -ENODEV; + } + } has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev, scoped); rcu_read_unlock(); @@@ -600,7 -590,7 +600,7 @@@ EXPORT_SYMBOL_GPL(ping_err) int ping_getfrag(void *from, char *to, int offset, int fraglen, int odd, struct sk_buff *skb) { - struct pingfakehdr *pfh = (struct pingfakehdr *)from; + struct pingfakehdr *pfh = from;
if (offset == 0) { fraglen -= sizeof(struct icmphdr); @@@ -854,8 -844,8 +854,8 @@@ do_confirm goto out; }
- int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, - int flags, int *addr_len) + int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, + int *addr_len) { struct inet_sock *isk = inet_sk(sk); int family = sk->sk_family; @@@ -871,7 -861,7 +871,7 @@@ if (flags & MSG_ERRQUEUE) return inet_recv_error(sk, msg, len, addr_len);
- skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out;
@@@ -944,16 -934,24 +944,24 @@@ out } EXPORT_SYMBOL_GPL(ping_recvmsg);
- int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk, + struct sk_buff *skb) { + enum skb_drop_reason reason; + pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", inet_sk(sk), inet_sk(sk)->inet_num, skb); - if (sock_queue_rcv_skb(sk, skb) < 0) { - kfree_skb(skb); + if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) { + kfree_skb_reason(skb, reason); pr_debug("ping_queue_rcv_skb -> failed\n"); - return -1; + return reason; } - return 0; + return SKB_NOT_DROPPED_YET; + } + + int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) + { + return __ping_queue_rcv_skb(sk, skb) ? -1 : 0; } EXPORT_SYMBOL_GPL(ping_queue_rcv_skb);
@@@ -962,12 -960,12 +970,12 @@@ * All we need to do is get the socket. */
- bool ping_rcv(struct sk_buff *skb) + enum skb_drop_reason ping_rcv(struct sk_buff *skb) { + enum skb_drop_reason reason = SKB_DROP_REASON_NO_SOCKET; struct sock *sk; struct net *net = dev_net(skb->dev); struct icmphdr *icmph = icmp_hdr(skb); - bool rc = false;
/* We assume the packet has already been checked by icmp_rcv */
@@@ -982,15 -980,17 +990,17 @@@ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
pr_debug("rcv on socket %p\n", sk); - if (skb2 && !ping_queue_rcv_skb(sk, skb2)) - rc = true; + if (skb2) + reason = __ping_queue_rcv_skb(sk, skb2); + else + reason = SKB_DROP_REASON_NOMEM; sock_put(sk); }
- if (!rc) + if (reason) pr_debug("no socket, dropping\n");
- return rc; + return reason; } EXPORT_SYMBOL_GPL(ping_rcv);
diff --combined net/ipv4/route.c index 57abd27e842c,ffbe2e4f8c89..444d4a2a422d --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@@ -503,28 -503,29 +503,29 @@@ static void ip_rt_fix_tos(struct flowi __u8 tos = RT_FL_TOS(fl4);
fl4->flowi4_tos = tos & IPTOS_RT_MASK; - fl4->flowi4_scope = tos & RTO_ONLINK ? - RT_SCOPE_LINK : RT_SCOPE_UNIVERSE; + if (tos & RTO_ONLINK) + fl4->flowi4_scope = RT_SCOPE_LINK; }
static void __build_flow_key(const struct net *net, struct flowi4 *fl4, - const struct sock *sk, - const struct iphdr *iph, - int oif, u8 tos, - u8 prot, u32 mark, int flow_flags) + const struct sock *sk, const struct iphdr *iph, + int oif, __u8 tos, u8 prot, u32 mark, + int flow_flags) { + __u8 scope = RT_SCOPE_UNIVERSE; + if (sk) { const struct inet_sock *inet = inet_sk(sk);
oif = sk->sk_bound_dev_if; mark = sk->sk_mark; - tos = RT_CONN_FLAGS(sk); + tos = ip_sock_rt_tos(sk); + scope = ip_sock_rt_scope(sk); prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; } - flowi4_init_output(fl4, oif, mark, tos, - RT_SCOPE_UNIVERSE, prot, - flow_flags, - iph->daddr, iph->saddr, 0, 0, + + flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope, + prot, flow_flags, iph->daddr, iph->saddr, 0, 0, sock_net_uid(net, sk)); }
@@@ -534,9 -535,9 +535,9 @@@ static void build_skb_flow_key(struct f const struct net *net = dev_net(skb->dev); const struct iphdr *iph = ip_hdr(skb); int oif = skb->dev->ifindex; - u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; + __u8 tos = iph->tos;
__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0); } @@@ -552,7 -553,8 +553,8 @@@ static void build_sk_flow_key(struct fl if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, - RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, + ip_sock_rt_tos(sk) & IPTOS_RT_MASK, + ip_sock_rt_scope(sk), inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk), daddr, inet->inet_saddr, 0, 0, sk->sk_uid); @@@ -825,14 -827,13 +827,13 @@@ static void ip_do_redirect(struct dst_e const struct iphdr *iph = (const struct iphdr *) skb->data; struct net *net = dev_net(skb->dev); int oif = skb->dev->ifindex; - u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; + __u8 tos = iph->tos;
rt = (struct rtable *) dst;
__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0); - ip_rt_fix_tos(&fl4); __ip_do_redirect(rt, skb, &fl4, true); }
@@@ -945,6 -946,7 +946,7 @@@ static int ip_error(struct sk_buff *skb struct inet_peer *peer; unsigned long now; struct net *net; + SKB_DR(reason); bool send; int code;
@@@ -964,10 -966,12 +966,12 @@@ if (!IN_DEV_FORWARD(in_dev)) { switch (rt->dst.error) { case EHOSTUNREACH: + SKB_DR_SET(reason, IP_INADDRERRORS); __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS); break;
case ENETUNREACH: + SKB_DR_SET(reason, IP_INNOROUTES); __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); break; } @@@ -983,6 -987,7 +987,7 @@@ break; case ENETUNREACH: code = ICMP_NET_UNREACH; + SKB_DR_SET(reason, IP_INNOROUTES); __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES); break; case EACCES: @@@ -1009,7 -1014,7 +1014,7 @@@ if (send) icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
- out: kfree_skb(skb); + out: kfree_skb_reason(skb, reason); return 0; }
@@@ -1057,7 -1062,6 +1062,6 @@@ static void ip_rt_update_pmtu(struct ds struct flowi4 fl4;
ip_rt_build_flow_key(&fl4, sk, skb); - ip_rt_fix_tos(&fl4);
/* Don't make lookup fail for bridged encapsulations */ if (skb && netif_is_any_bridge_port(skb->dev)) @@@ -1074,8 -1078,8 +1078,8 @@@ void ipv4_update_pmtu(struct sk_buff *s struct rtable *rt; u32 mark = IP4_REPLY_MARK(net, skb->mark);
- __build_flow_key(net, &fl4, NULL, iph, oif, - RT_TOS(iph->tos), protocol, mark, 0); + __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark, + 0); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { __ip_rt_update_pmtu(rt, &fl4, mtu); @@@ -1132,8 -1136,6 +1136,6 @@@ void ipv4_sk_update_pmtu(struct sk_buf goto out;
new = true; - } else { - ip_rt_fix_tos(&fl4); }
__ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu); @@@ -1165,8 -1167,7 +1167,7 @@@ void ipv4_redirect(struct sk_buff *skb struct flowi4 fl4; struct rtable *rt;
- __build_flow_key(net, &fl4, NULL, iph, oif, - RT_TOS(iph->tos), protocol, 0, 0); + __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { __ip_do_redirect(rt, skb, &fl4, false); @@@ -1753,7 -1754,6 +1754,7 @@@ static int ip_route_input_mc(struct sk_ #endif RT_CACHE_STAT_INC(in_slow_mc);
+ skb_dst_drop(skb); skb_dst_set(skb, &rth->dst); return 0; } @@@ -3395,7 -3395,7 +3396,7 @@@ static int inet_rtm_getroute(struct sk_ fri.tb_id = table_id; fri.dst = res.prefix; fri.dst_len = res.prefixlen; - fri.tos = fl4.flowi4_tos; + fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos); fri.type = rt->rt_type; fri.offload = 0; fri.trap = 0; @@@ -3408,7 -3408,7 +3409,7 @@@
if (fa->fa_slen == slen && fa->tb_id == fri.tb_id && - fa->fa_dscp == inet_dsfield_to_dscp(fri.tos) && + fa->fa_dscp == fri.dscp && fa->fa_info == res.fi && fa->fa_type == fri.type) { fri.offload = READ_ONCE(fa->offload); diff --combined net/mac80211/mlme.c index dc8aec1a5d3d,b857915881e0..07a96f7c5dc3 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@@ -3342,7 -3342,7 +3342,7 @@@ static bool ieee80211_twt_req_supported if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT)) return false;
- return sta->sta.he_cap.he_cap_elem.mac_cap_info[0] & + return sta->sta.deflink.he_cap.he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES; }
@@@ -3369,7 -3369,7 +3369,7 @@@ static bool ieee80211_twt_bcast_support ieee80211_vif_type_p2p(&sdata->vif));
return bss_conf->he_support && - (sta->sta.he_cap.he_cap_elem.mac_cap_info[2] & + (sta->sta.deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BCAST_TWT) && own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] & @@@ -3587,7 -3587,7 +3587,7 @@@ static bool ieee80211_assoc_success(str elems->he_6ghz_capa, sta);
- bss_conf->he_support = sta->sta.he_cap.has_he; + bss_conf->he_support = sta->sta.deflink.he_cap.has_he; if (elems->rsnx && elems->rsnx_len && (elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) && wiphy_ext_feature_isset(local->hw.wiphy, @@@ -3607,7 -3607,7 +3607,7 @@@ elems->eht_cap_len, sta);
- bss_conf->eht_support = sta->sta.eht_cap.has_eht; + bss_conf->eht_support = sta->sta.deflink.eht_cap.has_eht; } else { bss_conf->eht_support = false; } @@@ -3657,12 -3657,6 +3657,12 @@@ cbss->transmitted_bss->bssid); bss_conf->bssid_indicator = cbss->max_bssid_indicator; bss_conf->bssid_index = cbss->bssid_index; + } else { + bss_conf->nontransmitted = false; + memset(bss_conf->transmitter_bssid, 0, + sizeof(bss_conf->transmitter_bssid)); + bss_conf->bssid_indicator = 0; + bss_conf->bssid_index = 0; }
/* @@@ -3684,7 -3678,7 +3684,7 @@@ nss = *elems->opmode_notif & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK; nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; nss += 1; - sta->sta.rx_nss = nss; + sta->sta.deflink.rx_nss = nss; }
rate_control_rate_init(sta); @@@ -4842,9 -4836,9 +4842,9 @@@ static void ieee80211_sta_conn_mon_time if (!sta) return;
- timeout = sta->status_stats.last_ack; - if (time_before(sta->status_stats.last_ack, sta->rx_stats.last_rx)) - timeout = sta->rx_stats.last_rx; + timeout = sta->deflink.status_stats.last_ack; + if (time_before(sta->deflink.status_stats.last_ack, sta->deflink.rx_stats.last_rx)) + timeout = sta->deflink.rx_stats.last_rx; timeout += IEEE80211_CONNECTION_IDLE_TIME;
/* If timeout is after now, then update timer to fire at @@@ -5644,7 -5638,7 +5644,7 @@@ static int ieee80211_prep_connection(st }
if (rates) - new_sta->sta.supp_rates[cbss->channel->band] = rates; + new_sta->sta.deflink.supp_rates[cbss->channel->band] = rates; else sdata_info(sdata, "No rates found, keeping mandatory only\n"); diff --combined net/mac80211/rx.c index 88d797fa82ff,959a36fd658b..3c08ae04ddbc --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@@ -221,7 -221,7 +221,7 @@@ static void __ieee80211_queue_skb_to_if skb_queue_tail(&sdata->skb_queue, skb); ieee80211_queue_work(&sdata->local->hw, &sdata->work); if (sta) - sta->rx_stats.packets++; + sta->deflink.rx_stats.packets++; }
static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, @@@ -1405,7 -1405,8 +1405,7 @@@ static void ieee80211_rx_reorder_ampdu( goto dont_reorder;
/* not part of a BA session */ - if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && - ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) + if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK) goto dont_reorder;
/* new, potentially un-ordered, ampdu frame - process it */ @@@ -1464,7 -1465,7 +1464,7 @@@ ieee80211_rx_h_check_dup(struct ieee802 if (unlikely(ieee80211_has_retry(hdr->frame_control) && rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); - rx->sta->rx_stats.num_duplicates++; + rx->sta->deflink.rx_stats.num_duplicates++; return RX_DROP_UNUSABLE; } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; @@@ -1760,46 -1761,47 +1760,47 @@@ ieee80211_rx_h_sta_process(struct ieee8 NL80211_IFTYPE_ADHOC); if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { - sta->rx_stats.last_rx = jiffies; + sta->deflink.rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1)) - sta->rx_stats.last_rate = + sta->deflink.rx_stats.last_rate = sta_stats_encode_rate(status); } } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { - sta->rx_stats.last_rx = jiffies; + sta->deflink.rx_stats.last_rx = jiffies; } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1)) { /* * Mesh beacons will update last_rx when if they are found to * match the current local configuration when processed. */ - sta->rx_stats.last_rx = jiffies; + sta->deflink.rx_stats.last_rx = jiffies; if (ieee80211_is_data(hdr->frame_control)) - sta->rx_stats.last_rate = sta_stats_encode_rate(status); + sta->deflink.rx_stats.last_rate = sta_stats_encode_rate(status); }
- sta->rx_stats.fragments++; + sta->deflink.rx_stats.fragments++;
- u64_stats_update_begin(&rx->sta->rx_stats.syncp); - sta->rx_stats.bytes += rx->skb->len; - u64_stats_update_end(&rx->sta->rx_stats.syncp); + u64_stats_update_begin(&rx->sta->deflink.rx_stats.syncp); + sta->deflink.rx_stats.bytes += rx->skb->len; + u64_stats_update_end(&rx->sta->deflink.rx_stats.syncp);
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { - sta->rx_stats.last_signal = status->signal; - ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); + sta->deflink.rx_stats.last_signal = status->signal; + ewma_signal_add(&sta->deflink.rx_stats_avg.signal, + -status->signal); }
if (status->chains) { - sta->rx_stats.chains = status->chains; + sta->deflink.rx_stats.chains = status->chains; for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { int signal = status->chain_signal[i];
if (!(status->chains & BIT(i))) continue;
- sta->rx_stats.chain_signal_last[i] = signal; - ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], + sta->deflink.rx_stats.chain_signal_last[i] = signal; + ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i], -signal); } } @@@ -1860,7 -1862,7 +1861,7 @@@ * Update counter and free packet here to avoid * counting this as a dropped packed. */ - sta->rx_stats.packets++; + sta->deflink.rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } @@@ -1892,11 -1894,11 +1893,11 @@@ ieee80211_rx_get_bigtk(struct ieee80211 }
if (rx->sta) - key = rcu_dereference(rx->sta->gtk[idx]); + key = rcu_dereference(rx->sta->deflink.gtk[idx]); if (!key) key = rcu_dereference(sdata->keys[idx]); if (!key && rx->sta) - key = rcu_dereference(rx->sta->gtk[idx2]); + key = rcu_dereference(rx->sta->deflink.gtk[idx2]); if (!key) key = rcu_dereference(sdata->keys[idx2]);
@@@ -2011,7 -2013,7 +2012,7 @@@ ieee80211_rx_h_decrypt(struct ieee80211 test_sta_flag(rx->sta, WLAN_STA_MFP)) return RX_DROP_MONITOR;
- rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); + rx->key = rcu_dereference(rx->sta->deflink.gtk[mmie_keyidx]); } if (!rx->key) rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); @@@ -2034,7 -2036,7 +2035,7 @@@ } else { if (rx->sta) { for (i = 0; i < NUM_DEFAULT_KEYS; i++) { - key = rcu_dereference(rx->sta->gtk[i]); + key = rcu_dereference(rx->sta->deflink.gtk[i]); if (key) break; } @@@ -2071,7 -2073,7 +2072,7 @@@
/* check per-station GTK first, if multicast packet */ if (is_multicast_ether_addr(hdr->addr1) && rx->sta) - rx->key = rcu_dereference(rx->sta->gtk[keyidx]); + rx->key = rcu_dereference(rx->sta->deflink.gtk[keyidx]);
/* if not found, try default key */ if (!rx->key) { @@@ -2397,7 -2399,7 +2398,7 @@@ ieee80211_rx_h_defragment(struct ieee80 out: ieee80211_led_rx(rx->local); if (rx->sta) - rx->sta->rx_stats.packets++; + rx->sta->deflink.rx_stats.packets++; return RX_CONTINUE; }
@@@ -2644,9 -2646,9 +2645,9 @@@ ieee80211_deliver_skb(struct ieee80211_ * for non-QoS-data frames. Here we know it's a data * frame, so count MSDUs. */ - u64_stats_update_begin(&rx->sta->rx_stats.syncp); - rx->sta->rx_stats.msdu[rx->seqno_idx]++; - u64_stats_update_end(&rx->sta->rx_stats.syncp); + u64_stats_update_begin(&rx->sta->deflink.rx_stats.syncp); + rx->sta->deflink.rx_stats.msdu[rx->seqno_idx]++; + u64_stats_update_end(&rx->sta->deflink.rx_stats.syncp); }
if ((sdata->vif.type == NL80211_IFTYPE_AP || @@@ -3177,6 -3179,49 +3178,49 @@@ static void ieee80211_process_sa_query_ ieee80211_tx_skb(sdata, skb); }
+ static void + ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) + { + struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + const struct element *ie; + size_t baselen; + + if (!wiphy_ext_feature_isset(rx->local->hw.wiphy, + NL80211_EXT_FEATURE_BSS_COLOR)) + return; + + if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION)) + return; + + if (rx->sdata->vif.csa_active) + return; + + baselen = mgmt->u.beacon.variable - rx->skb->data; + if (baselen > rx->skb->len) + return; + + ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, + mgmt->u.beacon.variable, + rx->skb->len - baselen); + if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) && + ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) { + struct ieee80211_bss_conf *bss_conf = &rx->sdata->vif.bss_conf; + const struct ieee80211_he_operation *he_oper; + u8 color; + + he_oper = (void *)(ie->data + 1); + if (le32_get_bits(he_oper->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED)) + return; + + color = le32_get_bits(he_oper->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_MASK); + if (color == bss_conf->he_bss_color.color) + ieeee80211_obss_color_collision_notify(&rx->sdata->vif, + BIT_ULL(color)); + } + } + static ieee80211_rx_result debug_noinline ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) { @@@ -3202,6 -3247,9 +3246,9 @@@ !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { int sig = 0;
+ /* sw bss color collision detection */ + ieee80211_rx_check_bss_color_collision(rx); + if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) sig = status->signal; @@@ -3295,7 -3343,7 +3342,7 @@@ ieee80211_rx_h_action(struct ieee80211_ switch (mgmt->u.action.category) { case WLAN_CATEGORY_HT: /* reject HT action frames from stations not supporting HT */ - if (!rx->sta->sta.ht_cap.ht_supported) + if (!rx->sta->sta.deflink.ht_cap.ht_supported) goto invalid;
if (sdata->vif.type != NL80211_IFTYPE_STATION && @@@ -3359,7 -3407,7 +3406,7 @@@ struct sta_opmode_info sta_opmode = {};
/* If it doesn't support 40 MHz it can't change ... */ - if (!(rx->sta->sta.ht_cap.cap & + if (!(rx->sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) goto handled;
@@@ -3369,13 -3417,13 +3416,13 @@@ max_bw = ieee80211_sta_cap_rx_bw(rx->sta);
/* set cur_max_bandwidth and recalc sta bw */ - rx->sta->cur_max_bandwidth = max_bw; + rx->sta->deflink.cur_max_bandwidth = max_bw; new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
- if (rx->sta->sta.bandwidth == new_bw) + if (rx->sta->sta.deflink.bandwidth == new_bw) goto handled;
- rx->sta->sta.bandwidth = new_bw; + rx->sta->sta.deflink.bandwidth = new_bw; sband = rx->local->hw.wiphy->bands[status->band]; sta_opmode.bw = ieee80211_sta_rx_bw_to_chan_width(rx->sta); @@@ -3572,7 -3620,7 +3619,7 @@@
handled: if (rx->sta) - rx->sta->rx_stats.packets++; + rx->sta->deflink.rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED;
@@@ -3606,7 -3654,7 +3653,7 @@@ ieee80211_rx_h_userspace_mgmt(struct ie ieee80211_rx_status_to_khz(status), sig, rx->skb->data, rx->skb->len, 0)) { if (rx->sta) - rx->sta->rx_stats.packets++; + rx->sta->deflink.rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } @@@ -3644,7 -3692,7 +3691,7 @@@ ieee80211_rx_h_action_post_userspace(st
handled: if (rx->sta) - rx->sta->rx_stats.packets++; + rx->sta->deflink.rx_stats.packets++; dev_kfree_skb(rx->skb); return RX_QUEUED; } @@@ -3864,7 -3912,7 +3911,7 @@@ static void ieee80211_rx_handlers_resul case RX_DROP_MONITOR: I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) - rx->sta->rx_stats.dropped++; + rx->sta->deflink.rx_stats.dropped++; fallthrough; case RX_CONTINUE: { struct ieee80211_rate *rate = NULL; @@@ -3883,7 -3931,7 +3930,7 @@@ case RX_DROP_UNUSABLE: I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) - rx->sta->rx_stats.dropped++; + rx->sta->deflink.rx_stats.dropped++; dev_kfree_skb(rx->skb); break; case RX_QUEUED: @@@ -4435,15 -4483,15 +4482,15 @@@ static void ieee80211_rx_8023(struct ie void *sa = skb->data + ETH_ALEN; void *da = skb->data;
- stats = &sta->rx_stats; + stats = &sta->deflink.rx_stats; if (fast_rx->uses_rss) - stats = this_cpu_ptr(sta->pcpu_rx_stats); + stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats);
/* statistics part of ieee80211_rx_h_sta_process() */ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { stats->last_signal = status->signal; if (!fast_rx->uses_rss) - ewma_signal_add(&sta->rx_stats_avg.signal, + ewma_signal_add(&sta->deflink.rx_stats_avg.signal, -status->signal); }
@@@ -4459,7 -4507,7 +4506,7 @@@
stats->chain_signal_last[i] = signal; if (!fast_rx->uses_rss) - ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], + ewma_signal_add(&sta->deflink.rx_stats_avg.chain_signal[i], -signal); } } @@@ -4535,7 -4583,7 +4582,7 @@@ static bool ieee80211_invoke_fast_rx(st u8 da[ETH_ALEN]; u8 sa[ETH_ALEN]; } addrs __aligned(2); - struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; + struct ieee80211_sta_rx_stats *stats = &sta->deflink.rx_stats;
/* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write * to a common data structure; drivers can implement that per queue @@@ -4637,7 -4685,7 +4684,7 @@@ drop: dev_kfree_skb(skb); if (fast_rx->uses_rss) - stats = this_cpu_ptr(sta->pcpu_rx_stats); + stats = this_cpu_ptr(sta->deflink.pcpu_rx_stats);
stats->dropped++; return true; diff --combined net/netlink/af_netlink.c index 73e9c0a9c187,1b5a9c2e1c29..0cd91f813a3b --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@@ -1931,7 -1931,6 +1931,6 @@@ static int netlink_recvmsg(struct socke struct scm_cookie scm; struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); - int noblock = flags & MSG_DONTWAIT; size_t copied; struct sk_buff *skb, *data_skb; int err, ret; @@@ -1941,7 -1940,7 +1940,7 @@@
copied = 0;
- skb = skb_recv_datagram(sk, flags, noblock, &err); + skb = skb_recv_datagram(sk, flags, &err); if (skb == NULL) goto out;
@@@ -1975,6 -1974,7 +1974,6 @@@ copied = len; }
- skb_reset_transport_header(data_skb); err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
if (msg->msg_name) { diff --combined net/sched/act_pedit.c index 0eaaf1f45de1,e01ef7f109f4..d1221daa0952 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@@ -149,7 -149,7 +149,7 @@@ static int tcf_pedit_init(struct net *n struct nlattr *pattr; struct tcf_pedit *p; int ret = 0, err; - int ksize; + int i, ksize; u32 index;
if (!nla) { @@@ -228,18 -228,6 +228,18 @@@ p->tcfp_nkeys = parm->nkeys; } memcpy(p->tcfp_keys, parm->keys, ksize); + p->tcfp_off_max_hint = 0; + for (i = 0; i < p->tcfp_nkeys; ++i) { + u32 cur = p->tcfp_keys[i].off; + + /* The AT option can read a single byte, we can bound the actual + * value with uchar max. + */ + cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift; + + /* Each key touches 4 bytes starting from the computed offset */ + p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4); + }
p->tcfp_flags = parm->flags; goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); @@@ -320,18 -308,13 +320,18 @@@ static int tcf_pedit_act(struct sk_buf struct tcf_result *res) { struct tcf_pedit *p = to_pedit(a); + u32 max_offset; int i;
- if (skb_unclone(skb, GFP_ATOMIC)) - return p->tcf_action; - spin_lock(&p->tcf_lock);
+ max_offset = (skb_transport_header_was_set(skb) ? + skb_transport_offset(skb) : + skb_network_offset(skb)) + + p->tcfp_off_max_hint; + if (skb_ensure_writable(skb, min(skb->len, max_offset))) + goto unlock; + tcf_lastuse_update(&p->tcf_tm);
if (p->tcfp_nkeys > 0) { @@@ -420,7 -403,6 +420,7 @@@ bad p->tcf_qstats.overlimits++; done: bstats_update(&p->tcf_bstats, skb); +unlock: spin_unlock(&p->tcf_lock); return p->tcf_action; } @@@ -506,7 -488,8 +506,8 @@@ static int tcf_pedit_search(struct net }
static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; @@@ -521,6 -504,7 +522,7 @@@ entry->id = FLOW_ACTION_ADD; break; default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit command offload"); return -EOPNOTSUPP; } entry->mangle.htype = tcf_pedit_htype(act, k); diff --combined net/sunrpc/svcsock.c index b3c9740cfd35,45336e68bf79..2fc98fea59b4 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@@ -117,6 -117,15 +117,6 @@@ static void svc_reclassify_socket(struc */ static void svc_tcp_release_rqst(struct svc_rqst *rqstp) { - struct sk_buff *skb = rqstp->rq_xprt_ctxt; - - if (skb) { - struct svc_sock *svsk = - container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt); - - rqstp->rq_xprt_ctxt = NULL; - skb_free_datagram_locked(svsk->sk_sk, skb); - } }
/** @@@ -250,6 -259,8 +250,6 @@@ static ssize_t svc_tcp_read_msg(struct ssize_t len; size_t t;
- rqstp->rq_xprt_hlen = 0; - clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
for (i = 0, t = 0; t < buflen; i++, t += PAGE_SIZE) { @@@ -298,9 -309,9 +298,9 @@@ static void svc_sock_setbufsize(struct static void svc_sock_secure_port(struct svc_rqst *rqstp) { if (svc_port_is_privileged(svc_addr(rqstp))) - set_bit(RQ_SECURE, &rqstp->rq_flags); + __set_bit(RQ_SECURE, &rqstp->rq_flags); else - clear_bit(RQ_SECURE, &rqstp->rq_flags); + __clear_bit(RQ_SECURE, &rqstp->rq_flags); }
/* @@@ -453,7 -464,7 +453,7 @@@ static int svc_udp_recvfrom(struct svc_ 0, 0, MSG_PEEK | MSG_DONTWAIT); if (err < 0) goto out_recv_err; - skb = skb_recv_udp(svsk->sk_sk, 0, 1, &err); + skb = skb_recv_udp(svsk->sk_sk, MSG_DONTWAIT, &err); if (!skb) goto out_recv_err;
@@@ -1008,9 -1019,9 +1008,9 @@@ static int svc_tcp_recvfrom(struct svc_ rqstp->rq_xprt_ctxt = NULL; rqstp->rq_prot = IPPROTO_TCP; if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags)) - set_bit(RQ_LOCAL, &rqstp->rq_flags); + __set_bit(RQ_LOCAL, &rqstp->rq_flags); else - clear_bit(RQ_LOCAL, &rqstp->rq_flags); + __clear_bit(RQ_LOCAL, &rqstp->rq_flags);
p = (__be32 *)rqstp->rq_arg.head[0].iov_base; calldir = p[1]; diff --combined net/sunrpc/xprtsock.c index 650102a9c86a,5c91c5457197..fcdd0fca408e --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@@ -1355,7 -1355,7 +1355,7 @@@ static void xs_udp_data_receive(struct if (sk == NULL) goto out; for (;;) { - skb = skb_recv_udp(sk, 0, 1, &err); + skb = skb_recv_udp(sk, MSG_DONTWAIT, &err); if (skb == NULL) break; xs_udp_data_read_skb(&transport->xprt, sk, skb); @@@ -1418,26 -1418,6 +1418,26 @@@ static size_t xs_tcp_bc_maxpayload(stru } #endif /* CONFIG_SUNRPC_BACKCHANNEL */
+/** + * xs_local_state_change - callback to handle AF_LOCAL socket state changes + * @sk: socket whose state has changed + * + */ +static void xs_local_state_change(struct sock *sk) +{ + struct rpc_xprt *xprt; + struct sock_xprt *transport; + + if (!(xprt = xprt_from_sock(sk))) + return; + transport = container_of(xprt, struct sock_xprt, xprt); + if (sk->sk_shutdown & SHUTDOWN_MASK) { + clear_bit(XPRT_CONNECTED, &xprt->state); + /* Trigger the socket release */ + xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); + } +} + /** * xs_tcp_state_change - callback to handle TCP socket state changes * @sk: socket whose state has changed @@@ -1886,7 -1866,6 +1886,7 @@@ static int xs_local_finish_connecting(s sk->sk_user_data = xprt; sk->sk_data_ready = xs_data_ready; sk->sk_write_space = xs_udp_write_space; + sk->sk_state_change = xs_local_state_change; sk->sk_error_report = xs_error_report;
xprt_clear_connected(xprt); @@@ -1971,9 -1950,6 +1971,9 @@@ static void xs_local_connect(struct rpc struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); int ret;
+ if (transport->file) + goto force_disconnect; + if (RPC_IS_ASYNC(task)) { /* * We want the AF_LOCAL connect to be resolved in the @@@ -1986,17 -1962,11 +1986,17 @@@ */ task->tk_rpc_status = -ENOTCONN; rpc_exit(task, -ENOTCONN); - return; + goto out_wake; } ret = xs_local_setup_socket(transport); if (ret && !RPC_IS_SOFTCONN(task)) msleep_interruptible(15000); + return; +force_disconnect: + xprt_force_disconnect(xprt); +out_wake: + xprt_clear_connecting(xprt); + xprt_wake_pending_tasks(xprt, -ENOTCONN); }
#if IS_ENABLED(CONFIG_SUNRPC_SWAP) @@@ -2875,6 -2845,9 +2875,6 @@@ static struct rpc_xprt *xs_setup_local( } xprt_set_bound(xprt); xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); - ret = ERR_PTR(xs_local_setup_socket(transport)); - if (ret) - goto out_err; break; default: ret = ERR_PTR(-EAFNOSUPPORT); diff --combined net/wireless/nl80211.c index 1a3551b6d18b,945ed87d12e0..02a29052e41d --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -3173,15 -3173,6 +3173,15 @@@ int nl80211_parse_chandef(struct cfg802 } else if (attrs[NL80211_ATTR_CHANNEL_WIDTH]) { chandef->width = nla_get_u32(attrs[NL80211_ATTR_CHANNEL_WIDTH]); + if (chandef->chan->band == NL80211_BAND_S1GHZ) { + /* User input error for channel width doesn't match channel */ + if (chandef->width != ieee80211_s1g_channel_width(chandef->chan)) { + NL_SET_ERR_MSG_ATTR(extack, + attrs[NL80211_ATTR_CHANNEL_WIDTH], + "bad channel width"); + return -EINVAL; + } + } if (attrs[NL80211_ATTR_CENTER_FREQ1]) { chandef->center_freq1 = nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ1]); @@@ -3719,6 -3710,7 +3719,7 @@@ static int nl80211_send_iface(struct sk wdev_lock(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: if (wdev->ssid_len && nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid)) goto nla_put_failure_locked; @@@ -11666,23 -11658,18 +11667,23 @@@ static int nl80211_set_tx_bitrate_mask( struct cfg80211_bitrate_mask mask; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; + struct wireless_dev *wdev = dev->ieee80211_ptr; int err;
if (!rdev->ops->set_bitrate_mask) return -EOPNOTSUPP;
+ wdev_lock(wdev); err = nl80211_parse_tx_bitrate_mask(info, info->attrs, NL80211_ATTR_TX_RATES, &mask, dev, true); if (err) - return err; + goto out;
- return rdev_set_bitrate_mask(rdev, dev, NULL, &mask); + err = rdev_set_bitrate_mask(rdev, dev, NULL, &mask); +out: + wdev_unlock(wdev); + return err; }
static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) diff --combined tools/testing/selftests/net/Makefile index e1f998defd10,0fbdacfdcd6a..811e9b712bea --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@@ -25,7 -25,6 +25,7 @@@ TEST_PROGS += bareudp.s TEST_PROGS += amt.sh TEST_PROGS += unicast_extensions.sh TEST_PROGS += udpgro_fwd.sh +TEST_PROGS += udpgro_frglist.sh TEST_PROGS += veth.sh TEST_PROGS += ioam6.sh TEST_PROGS += gro.sh @@@ -37,6 -36,7 +37,7 @@@ TEST_PROGS += srv6_end_dt4_l3vpn_test.s TEST_PROGS += srv6_end_dt6_l3vpn_test.sh TEST_PROGS += vrf_strict_mode_test.sh TEST_PROGS += arp_ndisc_evict_nocarrier.sh + TEST_PROGS += ndisc_unsolicited_na_test.sh TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh TEST_GEN_FILES = socket nettest @@@ -62,8 -62,6 +63,8 @@@ TEST_FILES := setting KSFT_KHDR_INSTALL := 1 include ../lib.mk
+include bpf/Makefile + $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma $(OUTPUT)/tcp_mmap: LDLIBS += -lpthread $(OUTPUT)/tcp_inq: LDLIBS += -lpthread
linux-merge@lists.open-mesh.org