The following commit has been merged in the master branch: commit d7fb078193282955420f78b6fd7b77924e77c965 Merge: 9cea3a60af28fbe324d600a56e4cfb33a86e2cc8 545d95e5f1ba87db17534ee8c36409dd2ade848b Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Apr 10 10:44:47 2024 +1000
Merge branch 'main' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
diff --combined Documentation/devicetree/bindings/net/stm32-dwmac.yaml index f2714b5b6cf42,857d58949b029..7ccf75676b6d5 --- a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml @@@ -82,6 -82,13 +82,13 @@@ properties Should be phandle/offset pair. The phandle to the syscon node which encompases the glue register, and the offset of the control register
+ st,ext-phyclk: + description: + set this property in RMII mode when you have PHY without crystal 50MHz and want to + select RCC clock instead of ETH_REF_CLK. OR in RGMII mode when you want to select + RCC clock instead of ETH_CLK125. + type: boolean + st,eth-clk-sel: description: set this property in RGMII PHY when you want to select RCC clock instead of ETH_CLK125. @@@ -93,10 -100,6 +100,10 @@@ select RCC clock instead of ETH_REF_CLK. type: boolean
+ access-controllers: + minItems: 1 + maxItems: 2 + required: - compatible - clocks diff --combined MAINTAINERS index f7678b3cce185,4745ea94d4636..025f618a218b4 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -479,13 -479,6 +479,13 @@@ L: linux-wireless@vger.kernel.or S: Orphan F: drivers/net/wireless/admtek/adm8211.*
+ADP1050 HARDWARE MONITOR DRIVER +M: Radu Sabau radu.sabau@analog.com +L: linux-hwmon@vger.kernel.org +S: Supported +W: https://ez.analog.com/linux-software-drivers +F: Dcumentation/devicetree/bindings/hwmon/pmbus/adi,adp1050.yaml + ADP1653 FLASH CONTROLLER DRIVER M: Sakari Ailus sakari.ailus@iki.fi L: linux-media@vger.kernel.org @@@ -560,7 -553,7 +560,7 @@@ F: Documentation/devicetree/bindings/ii F: drivers/input/misc/adxl34x.c
ADXL355 THREE-AXIS DIGITAL ACCELEROMETER DRIVER -M: Puranjay Mohan puranjay12@gmail.com +M: Puranjay Mohan puranjay@kernel.org L: linux-iio@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml @@@ -2198,6 -2191,7 +2198,6 @@@ N: mx
ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE M: Shawn Guo shawnguo@kernel.org -M: Li Yang leoyang.li@nxp.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git @@@ -2592,8 -2586,12 +2592,8 @@@ F: arch/arm64/boot/dts/qcom/sc7180 F: arch/arm64/boot/dts/qcom/sc7280* F: arch/arm64/boot/dts/qcom/sdm845-cheza*
-ARM/QUALCOMM SUPPORT -M: Bjorn Andersson andersson@kernel.org -M: Konrad Dybcio konrad.dybcio@linaro.org +ARM/QUALCOMM MAILING LIST L: linux-arm-msm@vger.kernel.org -S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git F: Documentation/devicetree/bindings/*/qcom* F: Documentation/devicetree/bindings/soc/qcom/ F: arch/arm/boot/dts/qcom/ @@@ -2630,33 -2628,6 +2630,33 @@@ F: include/dt-bindings/*/qcom F: include/linux/*/qcom* F: include/linux/soc/qcom/
+ARM/QUALCOMM SUPPORT +M: Bjorn Andersson andersson@kernel.org +M: Konrad Dybcio konrad.dybcio@linaro.org +L: linux-arm-msm@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git +F: Documentation/devicetree/bindings/arm/qcom-soc.yaml +F: Documentation/devicetree/bindings/arm/qcom.yaml +F: Documentation/devicetree/bindings/bus/qcom* +F: Documentation/devicetree/bindings/cache/qcom,llcc.yaml +F: Documentation/devicetree/bindings/firmware/qcom,scm.yaml +F: Documentation/devicetree/bindings/reserved-memory/qcom +F: Documentation/devicetree/bindings/soc/qcom/ +F: arch/arm/boot/dts/qcom/ +F: arch/arm/configs/qcom_defconfig +F: arch/arm/mach-qcom/ +F: arch/arm64/boot/dts/qcom/ +F: drivers/bus/qcom* +F: drivers/firmware/qcom/ +F: drivers/soc/qcom/ +F: include/dt-bindings/arm/qcom,ids.h +F: include/dt-bindings/firmware/qcom,scm.h +F: include/dt-bindings/soc/qcom* +F: include/linux/firmware/qcom +F: include/linux/soc/qcom/ +F: include/soc/qcom/ + ARM/RDA MICRO ARCHITECTURE M: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -2737,7 -2708,7 +2737,7 @@@ F: sound/soc/rockchip N: rockchip
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org R: Alim Akhtar alim.akhtar@samsung.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org @@@ -3602,7 -3573,6 +3602,7 @@@ S: Supporte C: irc://irc.oftc.net/bcache T: git https://evilpiepirate.org/git/bcachefs.git F: fs/bcachefs/ +F: Documentation/filesystems/bcachefs/
BDISP ST MEDIA DRIVER M: Fabien Dessenne fabien.dessenne@foss.st.com @@@ -3744,7 -3714,7 +3744,7 @@@ F: drivers/iio/imu/bmi323
BPF JIT for ARM M: Russell King linux@armlinux.org.uk -M: Puranjay Mohan puranjay12@gmail.com +M: Puranjay Mohan puranjay@kernel.org L: bpf@vger.kernel.org S: Maintained F: arch/arm/net/ @@@ -3794,8 -3764,6 +3794,8 @@@ X: arch/riscv/net/bpf_jit_comp64.
BPF JIT for RISC-V (64-bit) M: Björn Töpel bjorn@kernel.org +R: Pu Lehui pulehui@huawei.com +R: Puranjay Mohan puranjay@kernel.org L: bpf@vger.kernel.org S: Maintained F: arch/riscv/net/ @@@ -5286,13 -5254,6 +5286,13 @@@ S: Supporte F: Documentation/process/code-of-conduct-interpretation.rst F: Documentation/process/code-of-conduct.rst
+CODE TAGGING +M: Suren Baghdasaryan surenb@google.com +M: Kent Overstreet kent.overstreet@linux.dev +S: Maintained +F: include/linux/codetag.h +F: lib/codetag.c + COMEDI DRIVERS M: Ian Abbott abbotti@mev.co.uk M: H Hartley Sweeten hsweeten@visionengravers.com @@@ -5596,7 -5557,7 +5596,7 @@@ F: drivers/cpuidle/cpuidle-big_little. CPUIDLE DRIVER - ARM EXYNOS M: Daniel Lezcano daniel.lezcano@linaro.org M: Kukjin Kim kgene@kernel.org -R: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +R: Krzysztof Kozlowski krzk@kernel.org L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained @@@ -8054,6 -8015,8 +8054,8 @@@ F: include/linux/mii. F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h + F: include/linux/phy_link_topology.h + F: include/linux/phy_link_topology_core.h F: include/linux/phylib_stubs.h F: include/linux/platform_data/mdio-bcm-unimac.h F: include/linux/platform_data/mdio-gpio.h @@@ -8562,6 -8525,7 +8564,6 @@@ S: Maintaine F: drivers/video/fbdev/fsl-diu-fb.*
FREESCALE DMA DRIVER -M: Li Yang leoyang.li@nxp.com M: Zhang Wei zw@zh-kernel.org L: linuxppc-dev@lists.ozlabs.org S: Maintained @@@ -8726,9 -8690,10 +8728,9 @@@ F: drivers/soc/fsl/qe/tsa. F: include/dt-bindings/soc/cpm1-fsl,tsa.h
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER -M: Li Yang leoyang.li@nxp.com L: netdev@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org -S: Maintained +S: Orphan F: drivers/net/ethernet/freescale/ucc_geth*
FREESCALE QUICC ENGINE UCC HDLC DRIVER @@@ -8745,9 -8710,10 +8747,9 @@@ S: Maintaine F: drivers/tty/serial/ucc_uart.c
FREESCALE SOC DRIVERS -M: Li Yang leoyang.li@nxp.com L: linuxppc-dev@lists.ozlabs.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained +S: Orphan F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml F: Documentation/devicetree/bindings/soc/fsl/ F: drivers/soc/fsl/ @@@ -8781,15 -8747,17 +8783,15 @@@ F: Documentation/devicetree/bindings/so F: sound/soc/fsl/fsl_qmc_audio.c
FREESCALE USB PERIPHERAL DRIVERS -M: Li Yang leoyang.li@nxp.com L: linux-usb@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org -S: Maintained +S: Orphan F: drivers/usb/gadget/udc/fsl*
FREESCALE USB PHY DRIVER -M: Ran Wang ran.wang_1@nxp.com L: linux-usb@vger.kernel.org L: linuxppc-dev@lists.ozlabs.org -S: Maintained +S: Orphan F: drivers/usb/phy/phy-fsl-usb*
FREEVXFS FILESYSTEM @@@ -9034,7 -9002,7 +9036,7 @@@ F: drivers/i2c/muxes/i2c-mux-gpio. F: include/linux/platform_data/i2c-mux-gpio.h
GENERIC GPIO RESET DRIVER -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org S: Maintained F: drivers/reset/reset-gpio.c
@@@ -9617,7 -9585,7 +9619,7 @@@ F: kernel/power
HID CORE LAYER M: Jiri Kosina jikos@kernel.org -M: Benjamin Tissoires benjamin.tissoires@redhat.com +M: Benjamin Tissoires bentiss@kernel.org L: linux-input@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git @@@ -13329,7 -13297,7 +13331,7 @@@ F: drivers/iio/adc/max11205.
MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS R: Iskren Chernev iskren.chernev@gmail.com -R: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +R: Krzysztof Kozlowski krzk@kernel.org R: Marek Szyprowski m.szyprowski@samsung.com R: Matheus Castello matheus@castello.eng.br L: linux-pm@vger.kernel.org @@@ -13339,7 -13307,7 +13341,7 @@@ F: drivers/power/supply/max17040_batter
MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS R: Hans de Goede hdegoede@redhat.com -R: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +R: Krzysztof Kozlowski krzk@kernel.org R: Marek Szyprowski m.szyprowski@samsung.com R: Sebastian Krzyszkowiak sebastian.krzyszkowiak@puri.sm R: Purism Kernel Team kernel@puri.sm @@@ -13397,7 -13365,7 +13399,7 @@@ F: Documentation/devicetree/bindings/po F: drivers/power/supply/max77976_charger.c
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org L: linux-pm@vger.kernel.org S: Maintained B: mailto:linux-samsung-soc@vger.kernel.org @@@ -13408,7 -13376,7 +13410,7 @@@ F: drivers/power/supply/max77693_charge
MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS M: Chanwoo Choi cw00.choi@samsung.com -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org L: linux-kernel@vger.kernel.org S: Maintained B: mailto:linux-samsung-soc@vger.kernel.org @@@ -14191,18 -14159,8 +14193,18 @@@ F: mm/memblock. F: mm/mm_init.c F: tools/testing/memblock/
+MEMORY ALLOCATION PROFILING +M: Suren Baghdasaryan surenb@google.com +M: Kent Overstreet kent.overstreet@linux.dev +L: linux-mm@kvack.org +S: Maintained +F: include/linux/alloc_tag.h +F: include/linux/codetag_ctx.h +F: lib/alloc_tag.c +F: lib/pgalloc_tag.c + MEMORY CONTROLLER DRIVERS -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org L: linux-kernel@vger.kernel.org S: Maintained B: mailto:krzysztof.kozlowski@linaro.org @@@ -14407,7 -14365,7 +14409,7 @@@ F: drivers/dma/at_xdmac. F: include/dt-bindings/dma/at91.h
MICROCHIP AT91 SERIAL DRIVER -M: Richard Genoud richard.genoud@gmail.com +M: Richard Genoud richard.genoud@bootlin.com S: Maintained F: Documentation/devicetree/bindings/serial/atmel,at91-usart.yaml F: drivers/tty/serial/atmel_serial.c @@@ -15583,7 -15541,7 +15585,7 @@@ F: include/uapi/linux/nexthop. F: net/ipv4/nexthop.c
NFC SUBSYSTEM -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/nfc/ @@@ -15960,7 -15918,7 +15962,7 @@@ F: Documentation/devicetree/bindings/re F: drivers/regulator/pf8x00-regulator.c
NXP PTN5150A CC LOGIC AND EXTCON DRIVER -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org L: linux-kernel@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml @@@ -16571,7 -16529,7 +16573,7 @@@ K: of_overlay_remov
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS M: Rob Herring robh@kernel.org -M: Krzysztof Kozlowski krzysztof.kozlowski+dt@linaro.org +M: Krzysztof Kozlowski krzk+dt@kernel.org M: Conor Dooley conor+dt@kernel.org L: devicetree@vger.kernel.org S: Maintained @@@ -16777,9 -16735,9 +16779,9 @@@ F: include/uapi/linux/ppdev.
PARAVIRT_OPS INTERFACE M: Juergen Gross jgross@suse.com -R: Ajay Kaher akaher@vmware.com -R: Alexey Makhalov amakhalov@vmware.com -R: VMware PV-Drivers Reviewers pv-drivers@vmware.com +R: Ajay Kaher ajay.kaher@broadcom.com +R: Alexey Makhalov alexey.amakhalov@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: virtualization@lists.linux.dev L: x86@kernel.org S: Supported @@@ -17018,6 -16976,7 +17020,6 @@@ F: drivers/pci/controller/dwc/pci-exyno
PCI DRIVER FOR SYNOPSYS DESIGNWARE M: Jingoo Han jingoohan1@gmail.com -M: Gustavo Pimentel gustavo.pimentel@synopsys.com M: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org L: linux-pci@vger.kernel.org S: Maintained @@@ -17347,7 -17306,6 +17349,7 @@@ R: Alexander Shishkin <alexander.shishk R: Jiri Olsa jolsa@kernel.org R: Ian Rogers irogers@google.com R: Adrian Hunter adrian.hunter@intel.com +R: "Liang, Kan" kan.liang@linux.intel.com L: linux-perf-users@vger.kernel.org L: linux-kernel@vger.kernel.org S: Supported @@@ -17529,7 -17487,7 +17531,7 @@@ F: Documentation/devicetree/bindings/pi F: drivers/pinctrl/renesas/
PIN CONTROLLER - SAMSUNG -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org M: Sylwester Nawrocki s.nawrocki@samsung.com R: Alim Akhtar alim.akhtar@samsung.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -19497,7 -19455,7 +19499,7 @@@ F: Documentation/devicetree/bindings/so F: sound/soc/samsung/
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org L: linux-crypto@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained @@@ -19532,7 -19490,7 +19534,7 @@@ S: Maintaine F: drivers/platform/x86/samsung-laptop.c
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org L: linux-kernel@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained @@@ -19558,7 -19516,7 +19560,7 @@@ F: drivers/media/platform/samsung/s3c-c F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org S: Maintained F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml F: drivers/nfc/s3fwrn5 @@@ -19579,7 -19537,7 +19581,7 @@@ S: Supporte F: drivers/media/i2c/s5k5baf.c
SAMSUNG S5P Security SubSystem (SSS) DRIVER -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org M: Vladimir Zapolskiy vz@mleia.com L: linux-crypto@vger.kernel.org L: linux-samsung-soc@vger.kernel.org @@@ -19601,7 -19559,7 +19603,7 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/platform/samsung/exynos4-is/
SAMSUNG SOC CLOCK DRIVERS -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org M: Sylwester Nawrocki s.nawrocki@samsung.com M: Chanwoo Choi cw00.choi@samsung.com R: Alim Akhtar alim.akhtar@samsung.com @@@ -19633,7 -19591,7 +19635,7 @@@ F: drivers/net/ethernet/samsung/sxgbe
SAMSUNG THERMAL DRIVER M: Bartlomiej Zolnierkiewicz bzolnier@gmail.com -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained @@@ -20894,13 -20852,6 +20896,13 @@@ T: git git://linuxtv.org/media_tree.gi F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml F: drivers/media/i2c/st-mipid02.c
+ST STM32 FIREWALL +M: Gatien Chevallier gatien.chevallier@foss.st.com +S: Maintained +F: drivers/bus/stm32_etzpc.c +F: drivers/bus/stm32_firewall.c +F: drivers/bus/stm32_rifsc.c + ST STM32 I2C/SMBUS DRIVER M: Pierre-Yves MORDRET pierre-yves.mordret@foss.st.com M: Alain Volmat alain.volmat@foss.st.com @@@ -21729,6 -21680,7 +21731,7 @@@ TEAM DRIVE M: Jiri Pirko jiri@resnulli.us L: netdev@vger.kernel.org S: Supported + F: Documentation/netlink/specs/team.yaml F: drivers/net/team/ F: include/linux/if_team.h F: include/uapi/linux/if_team.h @@@ -21774,7 -21726,6 +21777,7 @@@ F: Documentation/driver-api/tee.rs F: Documentation/tee/ F: Documentation/userspace-api/tee.rst F: drivers/tee/ +F: include/linux/tee_core.h F: include/linux/tee_drv.h F: include/uapi/linux/tee.h
@@@ -21986,7 -21937,7 +21989,7 @@@ F: include/linux/soc/ti/ti_sci_inta_msi F: include/linux/soc/ti/ti_sci_protocol.h
TEXAS INSTRUMENTS' TMP117 TEMPERATURE SENSOR DRIVER -M: Puranjay Mohan puranjay12@gmail.com +M: Puranjay Mohan puranjay@kernel.org L: linux-iio@vger.kernel.org S: Supported F: Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml @@@ -22482,7 -22433,6 +22485,7 @@@ S: Maintaine W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity Q: https://patchwork.kernel.org/project/linux-integrity/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git +F: Documentation/devicetree/bindings/tpm/ F: drivers/char/tpm/
TPS546D24 DRIVER @@@ -22568,15 -22518,6 +22571,15 @@@ F: Documentation/ABI/testing/configfs-t F: drivers/virt/coco/tsm.c F: include/linux/tsm.h
+TRUSTED SERVICES TEE DRIVER +M: Balint Dobszay balint.dobszay@arm.com +M: Sudeep Holla sudeep.holla@arm.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: trusted-services@lists.trustedfirmware.org +S: Maintained +F: Documentation/tee/ts-tee.rst +F: drivers/tee/tstee/ + TTY LAYER AND SERIAL DRIVERS M: Greg Kroah-Hartman gregkh@linuxfoundation.org M: Jiri Slaby jirislaby@kernel.org @@@ -22907,7 -22848,7 +22910,7 @@@ F: drivers/usb/host/ehci
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...) M: Jiri Kosina jikos@kernel.org -M: Benjamin Tissoires benjamin.tissoires@redhat.com +M: Benjamin Tissoires bentiss@kernel.org L: linux-usb@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git @@@ -23716,9 -23657,9 +23719,9 @@@ S: Supporte F: drivers/misc/vmw_balloon.c
VMWARE HYPERVISOR INTERFACE -M: Ajay Kaher akaher@vmware.com -M: Alexey Makhalov amakhalov@vmware.com -R: VMware PV-Drivers Reviewers pv-drivers@vmware.com +M: Ajay Kaher ajay.kaher@broadcom.com +M: Alexey Makhalov alexey.amakhalov@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: virtualization@lists.linux.dev L: x86@kernel.org S: Supported @@@ -23727,34 -23668,33 +23730,34 @@@ F: arch/x86/include/asm/vmware. F: arch/x86/kernel/cpu/vmware.c
VMWARE PVRDMA DRIVER -M: Bryan Tan bryantan@vmware.com -M: Vishnu Dasa vdasa@vmware.com -R: VMware PV-Drivers Reviewers pv-drivers@vmware.com +M: Bryan Tan bryan-bt.tan@broadcom.com +M: Vishnu Dasa vishnu.dasa@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/hw/vmw_pvrdma/
VMWARE PVSCSI DRIVER -M: Vishal Bhakta vbhakta@vmware.com -R: VMware PV-Drivers Reviewers pv-drivers@vmware.com +M: Vishal Bhakta vishal.bhakta@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-scsi@vger.kernel.org S: Supported F: drivers/scsi/vmw_pvscsi.c F: drivers/scsi/vmw_pvscsi.h
VMWARE VIRTUAL PTP CLOCK DRIVER -R: Ajay Kaher akaher@vmware.com -R: Alexey Makhalov amakhalov@vmware.com -R: VMware PV-Drivers Reviewers pv-drivers@vmware.com +M: Nick Shi nick.shi@broadcom.com +R: Ajay Kaher ajay.kaher@broadcom.com +R: Alexey Makhalov alexey.amakhalov@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Supported F: drivers/ptp/ptp_vmw.c
VMWARE VMCI DRIVER -M: Bryan Tan bryantan@vmware.com -M: Vishnu Dasa vdasa@vmware.com -R: VMware PV-Drivers Reviewers pv-drivers@vmware.com +M: Bryan Tan bryan-bt.tan@broadcom.com +M: Vishnu Dasa vishnu.dasa@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-kernel@vger.kernel.org S: Supported F: drivers/misc/vmw_vmci/ @@@ -23769,16 -23709,16 +23772,16 @@@ F: drivers/input/mouse/vmmouse. F: drivers/input/mouse/vmmouse.h
VMWARE VMXNET3 ETHERNET DRIVER -M: Ronak Doshi doshir@vmware.com -R: VMware PV-Drivers Reviewers pv-drivers@vmware.com +M: Ronak Doshi ronak.doshi@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: netdev@vger.kernel.org S: Supported F: drivers/net/vmxnet3/
VMWARE VSOCK VMCI TRANSPORT DRIVER -M: Bryan Tan bryantan@vmware.com -M: Vishnu Dasa vdasa@vmware.com -R: VMware PV-Drivers Reviewers pv-drivers@vmware.com +M: Bryan Tan bryan-bt.tan@broadcom.com +M: Vishnu Dasa vishnu.dasa@broadcom.com +R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-kernel@vger.kernel.org S: Supported F: net/vmw_vsock/vmci_transport* @@@ -23846,7 -23786,7 +23849,7 @@@ S: Orpha F: drivers/mmc/host/vub300.c
W1 DALLAS'S 1-WIRE BUS -M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org +M: Krzysztof Kozlowski krzk@kernel.org S: Maintained F: Documentation/devicetree/bindings/w1/ F: Documentation/w1/ diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index 57e61f9631678,795f3f957eb5e..5a67848551f26 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -1296,9 -1296,9 +1296,9 @@@ static int bnxt_agg_bufs_valid(struct b return RX_AGG_CMP_VALID(agg, *raw_cons); }
- static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, - unsigned int len, - dma_addr_t mapping) + static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) { struct bnxt *bp = bnapi->bp; struct pci_dev *pdev = bp->pdev; @@@ -1318,6 -1318,39 +1318,39 @@@ bp->rx_dir);
skb_put(skb, len); + + return skb; + } + + static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) + { + return bnxt_copy_data(bnapi, data, len, mapping); + } + + static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, + struct xdp_buff *xdp, + unsigned int len, + dma_addr_t mapping) + { + unsigned int metasize = 0; + u8 *data = xdp->data; + struct sk_buff *skb; + + len = xdp->data_end - xdp->data_meta; + metasize = xdp->data - xdp->data_meta; + data = xdp->data_meta; + + skb = bnxt_copy_data(bnapi, data, len, mapping); + if (!skb) + return skb; + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + return skb; }
@@@ -2104,14 -2137,17 +2137,17 @@@ static int bnxt_rx_pkt(struct bnxt *bp }
if (xdp_active) { - if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) { + if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { rc = 1; goto next_rx; } }
if (len <= bp->rx_copy_thresh) { - skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); + if (!xdp_active) + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); + else + skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr); bnxt_reuse_rx_data(rxr, cons, data); if (!skb) { if (agg_bufs) { @@@ -2489,6 -2525,9 +2525,9 @@@ static bool bnxt_event_error_report(str } return false; } + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: + netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n"); + break; default: netdev_err(bp->dev, "FW reported unknown error type %u\n", err_type); @@@ -3559,14 -3598,15 +3598,15 @@@ static void bnxt_free_rx_rings(struct b }
static int bnxt_alloc_rx_page_pool(struct bnxt *bp, - struct bnxt_rx_ring_info *rxr) + struct bnxt_rx_ring_info *rxr, + int numa_node) { struct page_pool_params pp = { 0 };
pp.pool_size = bp->rx_agg_ring_size; if (BNXT_RX_PAGE_MODE(bp)) pp.pool_size += bp->rx_ring_size; - pp.nid = dev_to_node(&bp->pdev->dev); + pp.nid = numa_node; pp.napi = &rxr->bnapi->napi; pp.netdev = bp->dev; pp.dev = &bp->pdev->dev; @@@ -3586,7 -3626,8 +3626,8 @@@
static int bnxt_alloc_rx_rings(struct bnxt *bp) { - int i, rc = 0, agg_rings = 0; + int numa_node = dev_to_node(&bp->pdev->dev); + int i, rc = 0, agg_rings = 0, cpu;
if (!bp->rx_ring) return -ENOMEM; @@@ -3597,10 -3638,15 +3638,15 @@@ for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring; + int cpu_node;
ring = &rxr->rx_ring_struct;
- rc = bnxt_alloc_rx_page_pool(bp, rxr); + cpu = cpumask_local_spread(i, numa_node); + cpu_node = cpu_to_node(cpu); + netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", + i, cpu_node); + rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); if (rc) return rc;
@@@ -4241,6 -4287,7 +4287,7 @@@ static void bnxt_init_vnics(struct bnx int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID; + vnic->vnic_id = i; for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
@@@ -5788,8 -5835,22 +5835,22 @@@ void bnxt_fill_ipv6_mask(__be32 mask[4] static void bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_input *req, - u16 rxq) + struct bnxt_ntuple_filter *fltr) { + struct bnxt_rss_ctx *rss_ctx, *tmp; + u16 rxq = fltr->base.rxq; + + if (fltr->base.flags & BNXT_ACT_RSS_CTX) { + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + if (rss_ctx->index == fltr->base.fw_vnic_id) { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); + break; + } + } + return; + } if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { struct bnxt_vnic_info *vnic; u32 enables; @@@ -5830,7 -5891,7 +5891,7 @@@ int bnxt_hwrm_cfa_ntuple_filter_alloc(s req->flags = cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { - bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq); + bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); } else { vnic = &bp->vnic_info[fltr->base.rxq + 1]; req->dst_id = cpu_to_le16(vnic->fw_vnic_id); @@@ -5938,9 -5999,9 +5999,9 @@@ static void bnxt_hwrm_vnic_update_tunl_ req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); }
- static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) + int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u32 tpa_flags) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; struct hwrm_vnic_tpa_cfg_input *req; int rc; @@@ -6025,9 -6086,10 +6086,10 @@@ static u16 bnxt_cp_ring_for_tx(struct b return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); }
- static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) + int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) { int entries; + u16 *tbl;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; @@@ -6035,16 -6097,22 +6097,22 @@@ entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries; - bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), - GFP_KERNEL); - if (!bp->rss_indir_tbl) + tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); + if (!tbl) return -ENOMEM; + + if (rss_ctx) + rss_ctx->rss_indir_tbl = tbl; + else + bp->rss_indir_tbl = tbl; + return 0; }
- static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) + void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) { u16 max_rings, max_entries, pad, i; + u16 *rss_indir_tbl;
if (!bp->rx_nr_rings) return; @@@ -6055,13 -6123,17 +6123,17 @@@ max_rings = bp->rx_nr_rings;
max_entries = bnxt_get_rxfh_indir_size(bp->dev); + if (rss_ctx) + rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; + else + rss_indir_tbl = &bp->rss_indir_tbl[0];
for (i = 0; i < max_entries; i++) - bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); + rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
pad = bp->rss_indir_tbl_entries - max_entries; if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); }
static u16 bnxt_get_max_rss_ring(struct bnxt *bp) @@@ -6117,6 -6189,8 +6189,8 @@@ static void bnxt_fill_hw_rss_tbl_p5(str
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); + else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) + j = vnic->rss_ctx->rss_indir_tbl[i]; else j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; @@@ -6154,9 -6228,9 +6228,9 @@@ __bnxt_hwrm_vnic_set_rss(struct bnxt *b req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); }
- static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) + static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, + bool set_rss) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input *req; int rc;
@@@ -6174,9 -6248,9 +6248,9 @@@ return hwrm_req_send(bp, req); }
- static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) + static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic, bool set_rss) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input *req; dma_addr_t ring_tbl_map; u32 i, nr_ctxs; @@@ -6229,9 -6303,8 +6303,8 @@@ static void bnxt_hwrm_update_rss_hash_c hwrm_req_drop(bp, req); }
- static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) + static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_plcmodes_cfg_input *req; int rc;
@@@ -6256,7 -6329,8 +6329,8 @@@ return hwrm_req_send(bp, req); }
- static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, + static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 ctx_idx) { struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; @@@ -6265,10 -6339,10 +6339,10 @@@ return;
req->rss_cos_lb_ctx_id = - cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); + cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_req_send(bp, req); - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; }
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) @@@ -6280,13 -6354,14 +6354,14 @@@
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, i, j); + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); } } bp->rsscos_nr_ctxs = 0; }
- static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) + static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 ctx_idx) { struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; @@@ -6299,7 -6374,7 +6374,7 @@@ resp = hwrm_req_hold(bp, req); rc = hwrm_req_send(bp, req); if (!rc) - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = + vnic->fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); hwrm_req_drop(bp, req);
@@@ -6313,10 -6388,9 +6388,9 @@@ static u32 bnxt_get_roce_vnic_mode(stru return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; }
- int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) + int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input *req; unsigned int ring = 0, grp_idx; u16 def_vlan = 0; @@@ -6364,8 -6438,8 +6438,8 @@@ if (vnic->flags & BNXT_VNIC_RSS_FLAG) ring = 0; else if (vnic->flags & BNXT_VNIC_RFS_FLAG) - ring = vnic_id - 1; - else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) + ring = vnic->vnic_id - 1; + else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index; @@@ -6381,25 -6455,25 +6455,25 @@@ vnic_mru #endif if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); - if (!vnic_id && bnxt_ulp_registered(bp->edev)) + if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev)) req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req); }
- static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) + static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, + struct bnxt_vnic_info *vnic) { - if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { + if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { struct hwrm_vnic_free_input *req;
if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) return;
- req->vnic_id = - cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
hwrm_req_send(bp, req); - bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; + vnic->fw_vnic_id = INVALID_HW_RING_ID; } }
@@@ -6408,15 -6482,14 +6482,14 @@@ static void bnxt_hwrm_vnic_free(struct u16 i;
for (i = 0; i < bp->nr_vnics; i++) - bnxt_hwrm_vnic_free_one(bp, i); + bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); }
- static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, - unsigned int start_rx_ring_idx, - unsigned int nr_rings) + int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, + unsigned int start_rx_ring_idx, + unsigned int nr_rings) { unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_alloc_output *resp; struct hwrm_vnic_alloc_input *req; int rc; @@@ -6442,7 -6515,7 +6515,7 @@@ vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; - if (vnic_id == BNXT_VNIC_DEFAULT) + if (vnic->vnic_id == BNXT_VNIC_DEFAULT) req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req); @@@ -7341,7 -7414,7 +7414,7 @@@ static void bnxt_check_rss_tbl_no_rmgr( if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { hw_resc->resv_rx_rings = bp->rx_nr_rings; if (!netif_is_rxfh_configured(bp->dev)) - bnxt_set_dflt_rss_indir_tbl(bp); + bnxt_set_dflt_rss_indir_tbl(bp, NULL); } }
@@@ -7349,7 -7422,7 +7422,7 @@@ static int bnxt_get_total_vnics(struct { if (bp->flags & BNXT_FLAG_RFS) { if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - return 2; + return 2 + bp->num_rss_ctx; if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) return rx_rings + 1; } @@@ -7497,7 -7570,7 +7570,7 @@@ static int __bnxt_reserve_rings(struct return -ENOMEM;
if (!netif_is_rxfh_configured(bp->dev)) - bnxt_set_dflt_rss_indir_tbl(bp); + bnxt_set_dflt_rss_indir_tbl(bp, NULL);
return rc; } @@@ -9676,7 -9749,7 +9749,7 @@@ static int bnxt_set_tpa(struct bnxt *bp else if (BNXT_NO_FW_ACCESS(bp)) return 0; for (i = 0; i < bp->nr_vnics; i++) { - rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); + rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); if (rc) { netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", i, rc); @@@ -9691,7 -9764,7 +9764,7 @@@ static void bnxt_hwrm_clear_vnic_rss(st int i;
for (i = 0; i < bp->nr_vnics; i++) - bnxt_hwrm_vnic_set_rss(bp, i, false); + bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); }
static void bnxt_clear_vnic(struct bnxt *bp) @@@ -9769,28 -9842,27 +9842,27 @@@ static int bnxt_hwrm_set_cache_line_siz return hwrm_req_send(bp, req); }
- static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) + static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; int rc;
if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) goto skip_rss_ctx;
/* allocate context for vnic */ - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } bp->rsscos_nr_ctxs++;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); if (rc) { netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; } bp->rsscos_nr_ctxs++; @@@ -9798,26 -9870,26 +9870,26 @@@
skip_rss_ctx: /* configure default vnic, ring grp */ - rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); + rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; }
/* Enable RSS hashing on vnic */ - rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true); + rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); goto vnic_setup_err; }
if (bp->flags & BNXT_FLAG_AGG_RINGS) { - rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + rc = bnxt_hwrm_vnic_set_hds(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); } }
@@@ -9825,16 -9897,33 +9897,33 @@@ vnic_setup_err return rc; }
- static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) + int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) + { + int rc; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic->vnic_id, rc); + return rc; + } + + int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) { int rc, i, nr_ctxs;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); for (i = 0; i < nr_ctxs; i++) { - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); if (rc) { netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", - vnic_id, i, rc); + vnic->vnic_id, i, rc); break; } bp->rsscos_nr_ctxs++; @@@ -9842,63 -9931,57 +9931,57 @@@ if (i < nr_ctxs) return -ENOMEM;
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", - vnic_id, rc); - return rc; - } - rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", - vnic_id, rc); + rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + if (rc) return rc; - } + if (bp->flags & BNXT_FLAG_AGG_RINGS) { - rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id); + rc = bnxt_hwrm_vnic_set_hds(bp, vnic); if (rc) { netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); } } return rc; }
- static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) + static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) - return __bnxt_setup_vnic_p5(bp, vnic_id); + return __bnxt_setup_vnic_p5(bp, vnic); else - return __bnxt_setup_vnic(bp, vnic_id); + return __bnxt_setup_vnic(bp, vnic); }
- static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id, + static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, + struct bnxt_vnic_info *vnic, u16 start_rx_ring_idx, int rx_rings) { int rc;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", - vnic_id, rc); + vnic->vnic_id, rc); return rc; } - return bnxt_setup_vnic(bp, vnic_id); + return bnxt_setup_vnic(bp, vnic); }
static int bnxt_alloc_rfs_vnics(struct bnxt *bp) { + struct bnxt_vnic_info *vnic; int i, rc = 0;
- if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0, - bp->rx_nr_rings); + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; + return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings); + }
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) return 0;
for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_vnic_info *vnic; u16 vnic_id = i + 1; u16 ring_id = i;
@@@ -9909,12 -9992,104 +9992,104 @@@ vnic->flags |= BNXT_VNIC_RFS_FLAG; if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; - if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1)) + if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) break; } return rc; }
+ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + bool all) + { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + struct bnxt_filter_base *usr_fltr, *tmp; + struct bnxt_ntuple_filter *ntp_fltr; + int i; + + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + } + if (!all) + return; + + list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { + if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && + usr_fltr->fw_vnic_id == rss_ctx->index) { + ntp_fltr = container_of(usr_fltr, + struct bnxt_ntuple_filter, + base); + bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); + bnxt_del_ntp_filter(bp, ntp_fltr); + bnxt_del_one_usr_fltr(bp, usr_fltr); + } + } + + if (vnic->rss_table) + dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, + vnic->rss_table, + vnic->rss_table_dma_addr); + kfree(rss_ctx->rss_indir_tbl); + list_del(&rss_ctx->list); + bp->num_rss_ctx--; + clear_bit(rss_ctx->index, bp->rss_ctx_bmap); + kfree(rss_ctx); + } + + static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) + { + bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || + bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || + __bnxt_setup_vnic_p5(bp, vnic)) { + netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", + rss_ctx->index); + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + } + } + } + + struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) + { + struct bnxt_rss_ctx *rss_ctx = NULL; + + rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); + if (rss_ctx) { + rss_ctx->vnic.rss_ctx = rss_ctx; + list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); + bp->num_rss_ctx++; + } + return rss_ctx; + } + + void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) + { + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) + bnxt_del_one_rss_ctx(bp, rss_ctx, all); + + if (all) + bitmap_free(bp->rss_ctx_bmap); + } + + static void bnxt_init_multi_rss_ctx(struct bnxt *bp) + { + bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); + if (bp->rss_ctx_bmap) { + /* burn index 0 since we cannot have context 0 */ + __set_bit(0, bp->rss_ctx_bmap); + INIT_LIST_HEAD(&bp->rss_ctx_list); + bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + } + } + /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ static bool bnxt_promisc_ok(struct bnxt *bp) { @@@ -9927,16 -10102,17 +10102,17 @@@
static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[1]; unsigned int rc = 0;
- rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1); if (rc) { netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", rc); return rc; }
- rc = bnxt_hwrm_vnic_cfg(bp, 1); + rc = bnxt_hwrm_vnic_cfg(bp, vnic); if (rc) { netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", rc); @@@ -9979,7 -10155,7 +10155,7 @@@ static int bnxt_init_chip(struct bnxt * rx_nr_rings--;
/* default vnic 0 */ - rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings); + rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); goto err_out; @@@ -9988,7 -10164,7 +10164,7 @@@ if (BNXT_VF(bp)) bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT); + rc = bnxt_setup_vnic(bp, vnic); if (rc) goto err_out; if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) @@@ -11674,6 -11850,46 +11850,46 @@@ static void bnxt_cfg_usr_fltrs(struct b bnxt_cfg_one_usr_fltr(bp, usr_fltr); }
+ static int bnxt_set_xps_mapping(struct bnxt *bp) + { + int numa_node = dev_to_node(&bp->pdev->dev); + unsigned int q_idx, map_idx, cpu, i; + const struct cpumask *cpu_mask_ptr; + int nr_cpus = num_online_cpus(); + cpumask_t *q_map; + int rc = 0; + + q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); + if (!q_map) + return -ENOMEM; + + /* Create CPU mask for all TX queues across MQPRIO traffic classes. + * Each TC has the same number of TX queues. The nth TX queue for each + * TC will have the same CPU mask. + */ + for (i = 0; i < nr_cpus; i++) { + map_idx = i % bp->tx_nr_rings_per_tc; + cpu = cpumask_local_spread(i, numa_node); + cpu_mask_ptr = get_cpu_mask(cpu); + cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); + } + + /* Register CPU mask for each TX queue except the ones marked for XDP */ + for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { + map_idx = q_idx % bp->tx_nr_rings_per_tc; + rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); + if (rc) { + netdev_warn(bp->dev, "Error setting XPS for q:%d\n", + q_idx); + break; + } + } + + kfree(q_map); + + return rc; + } + static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@@ -11736,8 -11952,12 +11952,12 @@@ } }
- if (irq_re_init) + if (irq_re_init) { udp_tunnel_nic_reset_ntf(bp->dev); + rc = bnxt_set_xps_mapping(bp); + if (rc) + netdev_warn(bp->dev, "failed to set xps mapping\n"); + }
if (bp->tx_nr_rings_xdp < num_possible_cpus()) { if (!static_key_enabled(&bnxt_xdp_locking_key)) @@@ -11758,10 -11978,10 +11978,12 @@@ /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) bnxt_vf_reps_open(bp); + if (bp->ptp_cfg) + atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_hwrm_realloc_rss_ctx_vnic(bp); bnxt_cfg_usr_fltrs(bp); return 0;
@@@ -11910,6 -12130,8 +12132,8 @@@ static void __bnxt_close_nic(struct bnx while (bnxt_drv_busy(bp)) msleep(20);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, false); /* Flush rings and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init);
@@@ -12407,33 -12629,26 +12631,26 @@@ static bool bnxt_rfs_supported(struct b }
/* If runtime conditions support RFS */ - static bool bnxt_rfs_capable(struct bnxt *bp) + bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) { struct bnxt_hw_rings hwr = {0}; int max_vnics, max_rss_ctxs;
- hwr.rss_ctx = 1; - if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { - /* 2 VNICS: default + Ntuple */ - hwr.vnic = 2; - hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * - hwr.vnic; - goto check_reserve_vnic; - } - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) return bnxt_rfs_supported(bp); + if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false;
- hwr.vnic = 1 + bp->rx_nr_rings; - check_reserve_vnic: + hwr.grp = bp->rx_nr_rings; + hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); + if (new_rss_ctx) + hwr.vnic++; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); max_vnics = bnxt_get_max_func_vnics(bp); max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && - !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)) - hwr.rss_ctx = hwr.vnic; - if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { if (bp->rx_nr_rings > 1) netdev_warn(bp->dev, @@@ -12467,7 -12682,7 +12684,7 @@@ static netdev_features_t bnxt_fix_featu struct bnxt *bp = netdev_priv(dev); netdev_features_t vlan_features;
- if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) features &= ~NETIF_F_NTUPLE;
if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog) @@@ -13603,7 -13818,7 +13820,7 @@@ static void bnxt_set_dflt_rfs(struct bn bp->flags &= ~BNXT_FLAG_RFS; if (bnxt_rfs_supported(bp)) { dev->hw_features |= NETIF_F_NTUPLE; - if (bnxt_rfs_capable(bp)) { + if (bnxt_rfs_capable(bp, false)) { bp->flags |= BNXT_FLAG_RFS; dev->features |= NETIF_F_NTUPLE; } @@@ -14456,12 -14671,9 +14673,9 @@@ static int bnxt_bridge_setlink(struct n if (!br_spec) return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) { + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { u16 mode;
- if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - mode = nla_get_u16(attr); if (mode == bp->br_mode) break; @@@ -14603,6 -14815,8 +14817,8 @@@ static void bnxt_remove_one(struct pci_ unregister_netdev(dev); bnxt_free_l2_filters(bp, true); bnxt_free_ntp_fltrs(bp, true); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, true); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); /* Flush any pending tasks */ cancel_work_sync(&bp->sp_task); @@@ -15061,7 -15275,7 +15277,7 @@@ static int bnxt_init_one(struct pci_de bp->flags |= BNXT_FLAG_CHIP_P7; }
- rc = bnxt_alloc_rss_indir_tbl(bp); + rc = bnxt_alloc_rss_indir_tbl(bp, NULL); if (rc) goto init_err_pci_clean;
@@@ -15214,6 -15428,9 +15430,9 @@@
INIT_LIST_HEAD(&bp->usr_fltr_list);
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + bnxt_init_multi_rss_ctx(bp); + rc = register_netdev(dev); if (rc) goto init_err_cleanup; @@@ -15234,6 -15451,8 +15453,8 @@@ init_err_dl bnxt_clear_int_mode(bp);
init_err_pci_clean: + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, true); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_hwmon_uninit(bp); @@@ -15421,6 -15640,10 +15642,10 @@@ static pci_ers_result_t bnxt_io_slot_re
netdev_info(bp->dev, "PCI Slot Reset\n");
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) + msleep(900); + rtnl_lock();
if (pci_enable_device(pdev)) { diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 195c02dc06830,86dcd2c76587b..01508b4b8fb7a --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@@ -71,7 -71,7 +71,7 @@@ int bnxt_register_dev(struct bnxt_en_de rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state)) - bnxt_hwrm_vnic_cfg(bp, 0); + bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries); edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; @@@ -210,9 -210,6 +210,9 @@@ void bnxt_ulp_start(struct bnxt *bp, in if (err) return;
+ if (edev->ulp_tbl->msix_requested) + bnxt_fill_msix_vecs(bp, edev->msix_entries); + if (aux_priv) { struct auxiliary_device *adev;
@@@ -395,13 -392,12 +395,13 @@@ void bnxt_rdma_aux_device_init(struct b if (!edev) goto aux_dev_uninit;
+ aux_priv->edev = edev; + ulp = kzalloc(sizeof(*ulp), GFP_KERNEL); if (!ulp) goto aux_dev_uninit;
edev->ulp_tbl = ulp; - aux_priv->edev = edev; bp->edev = edev; bnxt_set_edev_info(edev, bp);
diff --combined drivers/net/ethernet/realtek/r8169_main.c index fc8e6771ea9ff,60540ca634646..2c91ce8471bf8 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@@ -2227,6 -2227,8 +2227,8 @@@ static enum mac_version rtl8169_get_mac * the wild. Let's disable detection. * { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, */ + /* Realtek calls it RTL8168M, but it's handled like RTL8168H */ + { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 },
/* 8168G family. */ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, @@@ -5099,7 -5101,7 +5101,7 @@@ static int rtl_alloc_irq(struct rtl8169 rtl_lock_config_regs(tp); fallthrough; case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17: - flags = PCI_IRQ_LEGACY; + flags = PCI_IRQ_INTX; break; default: flags = PCI_IRQ_ALL_TYPES; diff --combined drivers/net/geneve.c index 6c2835086b57e,163f94a5a58f3..f918ca6146c82 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@@ -225,10 -225,11 +225,11 @@@ static void geneve_rx(struct geneve_de void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) { - __be16 flags; + IP_TUNNEL_DECLARE_FLAGS(flags) = { };
- flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) | - (gnvh->critical ? TUNNEL_CRIT_OPT : 0); + __set_bit(IP_TUNNEL_KEY_BIT, flags); + __assign_bit(IP_TUNNEL_OAM_BIT, flags, gnvh->oam); + __assign_bit(IP_TUNNEL_CRIT_OPT_BIT, flags, gnvh->critical);
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, vni_to_tunnel_id(gnvh->vni), @@@ -238,9 -239,11 +239,11 @@@ goto drop; } /* Update tunnel dst according to Geneve options. */ + ip_tunnel_flags_zero(flags); + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, flags); ip_tunnel_info_opts_set(&tun_dst->u.tun_info, gnvh->options, gnvh->opt_len * 4, - TUNNEL_GENEVE_OPT); + flags); } else { /* Drop packets w/ critical options, * since we don't support any... @@@ -745,14 -748,15 +748,15 @@@ static void geneve_build_header(struct { geneveh->ver = GENEVE_VER; geneveh->opt_len = info->options_len / 4; - geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM); - geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT); + geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, info->key.tun_flags); + geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT, + info->key.tun_flags); geneveh->rsvd1 = 0; tunnel_id_to_vni(info->key.tun_id, geneveh->vni); geneveh->proto_type = inner_proto; geneveh->rsvd2 = 0;
- if (info->key.tun_flags & TUNNEL_GENEVE_OPT) + if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags)) ip_tunnel_info_opts_get(geneveh->options, info); }
@@@ -761,7 -765,7 +765,7 @@@ static int geneve_build_skb(struct dst_ bool xnet, int ip_hdr_len, bool inner_proto_inherit) { - bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); + bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); struct genevehdr *gnvh; __be16 inner_proto; int min_headroom; @@@ -822,7 -826,7 +826,7 @@@ static int geneve_xmit_skb(struct sk_bu __be16 sport; int err;
- if (!pskb_inet_may_pull(skb)) + if (!skb_vlan_inet_prepare(skb)) return -EINVAL;
if (!gs4) @@@ -878,7 -882,8 +882,8 @@@ if (geneve->cfg.collect_md) { ttl = key->ttl;
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; + df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ? + htons(IP_DF) : 0; } else { if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); @@@ -910,7 -915,8 +915,8 @@@ udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst, tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, !net_eq(geneve->net, dev_net(geneve->dev)), - !(info->key.tun_flags & TUNNEL_CSUM)); + !test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags)); return 0; }
@@@ -929,7 -935,7 +935,7 @@@ static int geneve6_xmit_skb(struct sk_b __be16 sport; int err;
- if (!pskb_inet_may_pull(skb)) + if (!skb_vlan_inet_prepare(skb)) return -EINVAL;
if (!gs6) @@@ -998,7 -1004,8 +1004,8 @@@ udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, &saddr, &key->u.ipv6.dst, prio, ttl, info->key.label, sport, geneve->cfg.info.key.tp_dst, - !(info->key.tun_flags & TUNNEL_CSUM)); + !test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags)); return 0; } #endif @@@ -1297,7 -1304,8 +1304,8 @@@ static struct geneve_dev *geneve_find_d
static bool is_tnl_info_zero(const struct ip_tunnel_info *info) { - return !(info->key.tun_id || info->key.tun_flags || info->key.tos || + return !(info->key.tun_id || info->key.tos || + !ip_tunnel_flags_empty(info->key.tun_flags) || info->key.ttl || info->key.label || info->key.tp_src || memchr_inv(&info->key.u, 0, sizeof(info->key.u))); } @@@ -1435,7 -1443,7 +1443,7 @@@ static int geneve_nl2info(struct nlatt "Remote IPv6 address cannot be Multicast"); return -EINVAL; } - info->key.tun_flags |= TUNNEL_CSUM; + __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); cfg->use_udp6_rx_checksums = true; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], @@@ -1510,7 -1518,7 +1518,7 @@@ goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) - info->key.tun_flags |= TUNNEL_CSUM; + __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); }
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { @@@ -1520,7 -1528,7 +1528,7 @@@ goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) - info->key.tun_flags &= ~TUNNEL_CSUM; + __clear_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags); #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX], "IPv6 support not enabled in the kernel"); @@@ -1753,7 -1761,8 +1761,8 @@@ static int geneve_fill_info(struct sk_b info->key.u.ipv4.dst)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, - !!(info->key.tun_flags & TUNNEL_CSUM))) + test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags))) goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6) @@@ -1762,7 -1771,8 +1771,8 @@@ &info->key.u.ipv6.dst)) goto nla_put_failure; if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, - !(info->key.tun_flags & TUNNEL_CSUM))) + !test_bit(IP_TUNNEL_CSUM_BIT, + info->key.tun_flags))) goto nla_put_failure; #endif } diff --combined include/linux/cpumask.h index 121f3ac757ffe,6519f9c777095..3fbf82747de37 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@@ -388,29 -388,6 +388,29 @@@ unsigned int cpumask_any_but(const stru return i; }
+/** + * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one. + * @mask1: the first input cpumask + * @mask2: the second input cpumask + * @cpu: the cpu to ignore + * + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline +unsigned int cpumask_any_and_but(const struct cpumask *mask1, + const struct cpumask *mask2, + unsigned int cpu) +{ + unsigned int i; + + cpumask_check(cpu); + i = cpumask_first_and(mask1, mask2); + if (i != cpu) + return i; + + return cpumask_next_and(cpu, mask1, mask2); +} + /** * cpumask_nth - get the Nth cpu in a cpumask * @srcp: the cpumask pointer @@@ -876,7 -853,7 +876,7 @@@ static inline int cpulist_parse(const c */ static inline unsigned int cpumask_size(void) { - return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long); + return bitmap_size(large_cpumask_bits); }
/* diff --combined include/net/ip_tunnels.h index c286cc2e766ee,d8f574fbb11ef..9a6a08ec77139 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@@ -36,6 -36,24 +36,24 @@@ (sizeof_field(struct ip_tunnel_key, u) - \ sizeof_field(struct ip_tunnel_key, u.ipv4))
+ #define __ipt_flag_op(op, ...) \ + op(__VA_ARGS__, __IP_TUNNEL_FLAG_NUM) + + #define IP_TUNNEL_DECLARE_FLAGS(...) \ + __ipt_flag_op(DECLARE_BITMAP, __VA_ARGS__) + + #define ip_tunnel_flags_zero(...) __ipt_flag_op(bitmap_zero, __VA_ARGS__) + #define ip_tunnel_flags_copy(...) __ipt_flag_op(bitmap_copy, __VA_ARGS__) + #define ip_tunnel_flags_and(...) __ipt_flag_op(bitmap_and, __VA_ARGS__) + #define ip_tunnel_flags_or(...) __ipt_flag_op(bitmap_or, __VA_ARGS__) + + #define ip_tunnel_flags_empty(...) \ + __ipt_flag_op(bitmap_empty, __VA_ARGS__) + #define ip_tunnel_flags_intersect(...) \ + __ipt_flag_op(bitmap_intersects, __VA_ARGS__) + #define ip_tunnel_flags_subset(...) \ + __ipt_flag_op(bitmap_subset, __VA_ARGS__) + struct ip_tunnel_key { __be64 tun_id; union { @@@ -48,11 -66,11 +66,11 @@@ struct in6_addr dst; } ipv6; } u; - __be16 tun_flags; - u8 tos; /* TOS for IPv4, TC for IPv6 */ - u8 ttl; /* TTL for IPv4, HL for IPv6 */ + IP_TUNNEL_DECLARE_FLAGS(tun_flags); __be32 label; /* Flow Label for IPv6 */ u32 nhid; + u8 tos; /* TOS for IPv4, TC for IPv6 */ + u8 ttl; /* TTL for IPv4, HL for IPv6 */ __be16 tp_src; __be16 tp_dst; __u8 flow_flags; @@@ -110,6 -128,17 +128,17 @@@ struct ip_tunnel_prl_entry
struct metadata_dst;
+ /* Kernel-side variant of ip_tunnel_parm */ + struct ip_tunnel_parm_kern { + char name[IFNAMSIZ]; + IP_TUNNEL_DECLARE_FLAGS(i_flags); + IP_TUNNEL_DECLARE_FLAGS(o_flags); + __be32 i_key; + __be32 o_key; + int link; + struct iphdr iph; + }; + struct ip_tunnel { struct ip_tunnel __rcu *next; struct hlist_node hash_node; @@@ -136,7 -165,7 +165,7 @@@
struct dst_cache dst_cache;
- struct ip_tunnel_parm parms; + struct ip_tunnel_parm_kern parms;
int mlink; int encap_hlen; /* Encap header length (FOU,GUE) */ @@@ -157,7 -186,7 +186,7 @@@ };
struct tnl_ptk_info { - __be16 flags; + IP_TUNNEL_DECLARE_FLAGS(flags); __be16 proto; __be32 key; __be32 seq; @@@ -179,11 -208,80 +208,80 @@@ struct ip_tunnel_net int type; };
+ static inline void ip_tunnel_set_options_present(unsigned long *flags) + { + IP_TUNNEL_DECLARE_FLAGS(present) = { }; + + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); + + ip_tunnel_flags_or(flags, flags, present); + } + + static inline void ip_tunnel_clear_options_present(unsigned long *flags) + { + IP_TUNNEL_DECLARE_FLAGS(present) = { }; + + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); + + __ipt_flag_op(bitmap_andnot, flags, flags, present); + } + + static inline bool ip_tunnel_is_options_present(const unsigned long *flags) + { + IP_TUNNEL_DECLARE_FLAGS(present) = { }; + + __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, present); + __set_bit(IP_TUNNEL_VXLAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_ERSPAN_OPT_BIT, present); + __set_bit(IP_TUNNEL_GTP_OPT_BIT, present); + __set_bit(IP_TUNNEL_PFCP_OPT_BIT, present); + + return ip_tunnel_flags_intersect(flags, present); + } + + static inline bool ip_tunnel_flags_is_be16_compat(const unsigned long *flags) + { + IP_TUNNEL_DECLARE_FLAGS(supp) = { }; + + bitmap_set(supp, 0, BITS_PER_TYPE(__be16)); + __set_bit(IP_TUNNEL_VTI_BIT, supp); + + return ip_tunnel_flags_subset(flags, supp); + } + + static inline void ip_tunnel_flags_from_be16(unsigned long *dst, __be16 flags) + { + ip_tunnel_flags_zero(dst); + + bitmap_write(dst, be16_to_cpu(flags), 0, BITS_PER_TYPE(__be16)); + __assign_bit(IP_TUNNEL_VTI_BIT, dst, flags & VTI_ISVTI); + } + + static inline __be16 ip_tunnel_flags_to_be16(const unsigned long *flags) + { + __be16 ret; + + ret = cpu_to_be16(bitmap_read(flags, 0, BITS_PER_TYPE(__be16))); + if (test_bit(IP_TUNNEL_VTI_BIT, flags)) + ret |= VTI_ISVTI; + + return ret; + } + static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, __be32 saddr, __be32 daddr, u8 tos, u8 ttl, __be32 label, __be16 tp_src, __be16 tp_dst, - __be64 tun_id, __be16 tun_flags) + __be64 tun_id, + const unsigned long *tun_flags) { key->tun_id = tun_id; key->u.ipv4.src = saddr; @@@ -193,7 -291,7 +291,7 @@@ key->tos = tos; key->ttl = ttl; key->label = label; - key->tun_flags = tun_flags; + ip_tunnel_flags_copy(key->tun_flags, tun_flags);
/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of * the upper tunnel are used. @@@ -214,12 -312,8 +312,8 @@@ ip_tunnel_dst_cache_usable(const struc { if (skb->mark) return false; - if (!info) - return true; - if (info->key.tun_flags & TUNNEL_NOCACHE) - return false;
- return true; + return !info || !test_bit(IP_TUNNEL_NOCACHE_BIT, info->key.tun_flags); }
static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info @@@ -291,14 -385,18 +385,18 @@@ void ip_tunnel_xmit(struct sk_buff *skb const struct iphdr *tnl_params, const u8 protocol); void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const u8 proto, int tunnel_hlen); - int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); + int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, + int cmd); + bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, + const void __user *data); + bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp); int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd); int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, - int link, __be16 flags, + int link, const unsigned long *flags, __be32 remote, __be32 local, __be32 key);
@@@ -307,16 -405,16 +405,16 @@@ int ip_tunnel_rcv(struct ip_tunnel *tun const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, bool log_ecn_error); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p, __u32 fwmark); + struct ip_tunnel_parm_kern *p, __u32 fwmark); int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p, __u32 fwmark); + struct ip_tunnel_parm_kern *p, __u32 fwmark); void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], struct ip_tunnel_encap *encap);
void ip_tunnel_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms); + struct ip_tunnel_parm_kern *parms);
extern const struct header_ops ip_tunnel_header_ops; __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); @@@ -361,39 -459,6 +459,39 @@@ static inline bool pskb_inet_may_pull(s return pskb_network_may_pull(skb, nhlen); }
+/* Variant of pskb_inet_may_pull(). + */ +static inline bool skb_vlan_inet_prepare(struct sk_buff *skb) +{ + int nhlen = 0, maclen = ETH_HLEN; + __be16 type = skb->protocol; + + /* Essentially this is skb_protocol(skb, true) + * And we get MAC len. + */ + if (eth_type_vlan(type)) + type = __vlan_get_protocol(skb, type, &maclen); + + switch (type) { +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): + nhlen = sizeof(struct ipv6hdr); + break; +#endif + case htons(ETH_P_IP): + nhlen = sizeof(struct iphdr); + break; + } + /* For ETH_P_IPV6/ETH_P_IP we make sure to pull + * a base network header in skb->head. + */ + if (!pskb_may_pull(skb, maclen + nhlen)) + return false; + + skb_set_network_header(skb, maclen); + return true; +} + static inline int ip_encap_hlen(struct ip_tunnel_encap *e) { const struct ip_tunnel_encap_ops *ops; @@@ -547,12 -612,13 +645,13 @@@ static inline void ip_tunnel_info_opts_
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, const void *from, int len, - __be16 flags) + const unsigned long *flags) { info->options_len = len; if (len > 0) { memcpy(ip_tunnel_info_opts(info), from, len); - info->key.tun_flags |= flags; + ip_tunnel_flags_or(info->key.tun_flags, info->key.tun_flags, + flags); } }
@@@ -596,7 -662,7 +695,7 @@@ static inline void ip_tunnel_info_opts_
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, const void *from, int len, - __be16 flags) + const unsigned long *flags) { info->options_len = 0; } diff --combined include/net/mac80211.h index 2d7f87bc5324b,f57c29de3a91a..645ceeb1be67b --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@@ -361,7 -361,7 +361,7 @@@ struct ieee80211_vif_chanctx_switch * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response * status changed. * @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed. - * @BSS_CHANGED_MLD_TTLM: TID to link mapping was changed + * @BSS_CHANGED_MLD_TTLM: negotiated TID to link mapping was changed */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@@ -953,8 -953,6 +953,8 @@@ enum mac80211_tx_info_flags * of their QoS TID or other priority field values. * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally * for sequence number assignment + * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted + * due to scanning, not in normal operation on the interface. * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this * frame should be transmitted on the specific link. This really is * only relevant for frames that do not have data present, and is @@@ -975,7 -973,6 +975,7 @@@ enum mac80211_tx_control_flags IEEE80211_TX_CTRL_NO_SEQNO = BIT(7), IEEE80211_TX_CTRL_DONT_REORDER = BIT(8), IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9), + IEEE80211_TX_CTRL_SCAN_TX = BIT(10), IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000, };
@@@ -1924,10 -1921,12 +1924,12 @@@ enum ieee80211_neg_ttlm_res * @active_links: The bitmap of active links, or 0 for non-MLO. * The driver shouldn't change this directly, but use the * API calls meant for that purpose. - * @dormant_links: bitmap of valid but disabled links, or 0 for non-MLO. - * Must be a subset of valid_links. + * @dormant_links: subset of the valid links that are disabled/suspended + * due to advertised or negotiated TTLM respectively. + * 0 for non-MLO. * @suspended_links: subset of dormant_links representing links that are - * suspended. + * suspended due to negotiated TTLM, and could be activated in the + * future by tearing down the TTLM negotiation. * 0 for non-MLO. * @neg_ttlm: negotiated TID to link mapping info. * see &struct ieee80211_neg_ttlm. @@@ -2780,6 -2779,8 +2782,8 @@@ struct ieee80211_txq * * @IEEE80211_HW_DISALLOW_PUNCTURING: HW requires disabling puncturing in EHT * and connecting with a lower bandwidth instead + * @IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ: HW requires disabling puncturing in + * EHT in 5 GHz and connecting with a lower bandwidth instead * * @IEEE80211_HW_HANDLES_QUIET_CSA: HW/driver handles quieting for CSA, so * no need to stop queues. This really should be set by a driver that @@@ -2844,6 -2845,7 +2848,7 @@@ enum ieee80211_hw_flags IEEE80211_HW_DETECTS_COLOR_COLLISION, IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX, IEEE80211_HW_DISALLOW_PUNCTURING, + IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ, IEEE80211_HW_HANDLES_QUIET_CSA,
/* keep last, obviously */ @@@ -7589,6 -7591,15 +7594,15 @@@ int ieee80211_set_active_links(struct i void ieee80211_set_active_links_async(struct ieee80211_vif *vif, u16 active_links);
+ /** + * ieee80211_send_teardown_neg_ttlm - tear down a negotiated TTLM request + * @vif: the interface on which the tear down request should be sent. + * + * This function can be used to tear down a previously accepted negotiated + * TTLM request. + */ + void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif); + /* for older drivers - let's not document these ... */ int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx); diff --combined kernel/bpf/syscall.c index 9cb89e875f0d3,c0f2f052a02cf..03e291f0e9db0 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@@ -980,7 -980,7 +980,7 @@@ static unsigned long bpf_get_unmapped_a if (map->ops->map_get_unmapped_area) return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); #ifdef CONFIG_MMU - return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); #else return addr; #endif @@@ -3498,17 -3498,12 +3498,12 @@@ out_put_prog return err; }
- struct bpf_raw_tp_link { - struct bpf_link link; - struct bpf_raw_event_map *btp; - }; - static void bpf_raw_tp_link_release(struct bpf_link *link) { struct bpf_raw_tp_link *raw_tp = container_of(link, struct bpf_raw_tp_link, link);
- bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); + bpf_probe_unregister(raw_tp->btp, raw_tp); bpf_put_raw_tracepoint(raw_tp->btp); }
@@@ -3808,7 -3803,7 +3803,7 @@@ static int bpf_perf_link_attach(const u #endif /* CONFIG_PERF_EVENTS */
static int bpf_raw_tp_link_attach(struct bpf_prog *prog, - const char __user *user_tp_name) + const char __user *user_tp_name, u64 cookie) { struct bpf_link_primer link_primer; struct bpf_raw_tp_link *link; @@@ -3855,6 -3850,7 +3850,7 @@@ bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, &bpf_raw_tp_link_lops, prog); link->btp = btp; + link->cookie = cookie;
err = bpf_link_prime(&link->link, &link_primer); if (err) { @@@ -3862,7 -3858,7 +3858,7 @@@ goto out_put_btp; }
- err = bpf_probe_register(link->btp, prog); + err = bpf_probe_register(link->btp, link); if (err) { bpf_link_cleanup(&link_primer); goto out_put_btp; @@@ -3875,11 -3871,13 +3871,13 @@@ out_put_btp return err; }
- #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd + #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie
static int bpf_raw_tracepoint_open(const union bpf_attr *attr) { struct bpf_prog *prog; + void __user *tp_name; + __u64 cookie; int fd;
if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) @@@ -3889,7 -3887,9 +3887,9 @@@ if (IS_ERR(prog)) return PTR_ERR(prog);
- fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); + tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); + cookie = attr->raw_tracepoint.cookie; + fd = bpf_raw_tp_link_attach(prog, tp_name, cookie); if (fd < 0) bpf_prog_put(prog); return fd; @@@ -5227,7 -5227,7 +5227,7 @@@ static int link_create(union bpf_attr * goto out; } if (prog->expected_attach_type == BPF_TRACE_RAW_TP) - ret = bpf_raw_tp_link_attach(prog, NULL); + ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); else if (prog->expected_attach_type == BPF_TRACE_ITER) ret = bpf_iter_link_attach(attr, uattr, prog); else if (prog->expected_attach_type == BPF_LSM_CGROUP) diff --combined net/batman-adv/translation-table.c index 2243cec18ecc8,0555cb6114898..b21ff3c36b07d --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@@ -208,20 -208,6 +208,6 @@@ batadv_tt_global_hash_find(struct batad return tt_global_entry; }
- /** - * batadv_tt_local_entry_free_rcu() - free the tt_local_entry - * @rcu: rcu pointer of the tt_local_entry - */ - static void batadv_tt_local_entry_free_rcu(struct rcu_head *rcu) - { - struct batadv_tt_local_entry *tt_local_entry; - - tt_local_entry = container_of(rcu, struct batadv_tt_local_entry, - common.rcu); - - kmem_cache_free(batadv_tl_cache, tt_local_entry); - } - /** * batadv_tt_local_entry_release() - release tt_local_entry from lists and queue * for free after rcu grace period @@@ -236,7 -222,7 +222,7 @@@ static void batadv_tt_local_entry_relea
batadv_softif_vlan_put(tt_local_entry->vlan);
- call_rcu(&tt_local_entry->common.rcu, batadv_tt_local_entry_free_rcu); + kfree_rcu(tt_local_entry, common.rcu); }
/** @@@ -254,20 -240,6 +240,6 @@@ batadv_tt_local_entry_put(struct batadv batadv_tt_local_entry_release); }
- /** - * batadv_tt_global_entry_free_rcu() - free the tt_global_entry - * @rcu: rcu pointer of the tt_global_entry - */ - static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu) - { - struct batadv_tt_global_entry *tt_global_entry; - - tt_global_entry = container_of(rcu, struct batadv_tt_global_entry, - common.rcu); - - kmem_cache_free(batadv_tg_cache, tt_global_entry); - } - /** * batadv_tt_global_entry_release() - release tt_global_entry from lists and * queue for free after rcu grace period @@@ -282,7 -254,7 +254,7 @@@ void batadv_tt_global_entry_release(str
batadv_tt_global_del_orig_list(tt_global_entry);
- call_rcu(&tt_global_entry->common.rcu, batadv_tt_global_entry_free_rcu); + kfree_rcu(tt_global_entry, common.rcu); }
/** @@@ -407,19 -379,6 +379,6 @@@ static void batadv_tt_global_size_dec(s batadv_tt_global_size_mod(orig_node, vid, -1); }
- /** - * batadv_tt_orig_list_entry_free_rcu() - free the orig_entry - * @rcu: rcu pointer of the orig_entry - */ - static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu) - { - struct batadv_tt_orig_list_entry *orig_entry; - - orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); - - kmem_cache_free(batadv_tt_orig_cache, orig_entry); - } - /** * batadv_tt_orig_list_entry_release() - release tt orig entry from lists and * queue for free after rcu grace period @@@ -433,7 -392,7 +392,7 @@@ static void batadv_tt_orig_list_entry_r refcount);
batadv_orig_node_put(orig_entry->orig_node); - call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu); + kfree_rcu(orig_entry, rcu); }
/** @@@ -3948,7 -3907,7 +3907,7 @@@ void batadv_tt_local_resize_to_mtu(stru
spin_lock_bh(&bat_priv->tt.commit_lock);
- while (true) { + while (timeout) { table_size = batadv_tt_local_table_transmit_size(bat_priv); if (packet_size_max >= table_size) break; diff --combined net/ipv4/tcp.c index e767721b3a588,b07aa71b24ec1..6cb5b9f74c94b --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -290,6 -290,9 +290,9 @@@ enum DEFINE_PER_CPU(unsigned int, tcp_orphan_count); EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
+ DEFINE_PER_CPU(u32, tcp_tw_isn); + EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn); + long sysctl_tcp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_tcp_mem);
@@@ -1721,7 -1724,7 +1724,7 @@@ int tcp_set_rcvlowat(struct sock *sk, i space = tcp_space_from_win(sk, val); if (space > sk->sk_rcvbuf) { WRITE_ONCE(sk->sk_rcvbuf, space); - tcp_sk(sk)->window_clamp = val; + WRITE_ONCE(tcp_sk(sk)->window_clamp, val); } return 0; } @@@ -3379,7 -3382,7 +3382,7 @@@ int tcp_set_window_clamp(struct sock *s if (!val) { if (sk->sk_state != TCP_CLOSE) return -EINVAL; - tp->window_clamp = 0; + WRITE_ONCE(tp->window_clamp, 0); } else { u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp; u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ? @@@ -3388,7 -3391,7 +3391,7 @@@ if (new_window_clamp == old_window_clamp) return 0;
- tp->window_clamp = new_window_clamp; + WRITE_ONCE(tp->window_clamp, new_window_clamp); if (new_window_clamp < old_window_clamp) { /* need to apply the reserved mem provisioning only * when shrinking the window clamp @@@ -4057,7 -4060,7 +4060,7 @@@ int do_tcp_getsockopt(struct sock *sk, TCP_RTO_MAX / HZ); break; case TCP_WINDOW_CLAMP: - val = tp->window_clamp; + val = READ_ONCE(tp->window_clamp); break; case TCP_INFO: { struct tcp_info info; @@@ -4648,16 -4651,16 +4651,16 @@@ static void __init tcp_struct_check(voi CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns); + CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_clock_cache); + CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_mstamp); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags); - CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89); + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 105);
/* TXRX read-write hotpath cache lines */ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags); - CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache); - CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una); @@@ -4670,7 -4673,7 +4673,7 @@@ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt); - CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92); + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 76);
/* RX read-write hotpath cache lines */ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received); diff --combined net/ipv6/ip6_fib.c index c1f62352a4814,ddd8e3c2df4a5..31d77885bcae3 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@@ -623,23 -623,22 +623,22 @@@ static int inet6_dump_fib(struct sk_buf struct rt6_rtnl_dump_arg arg = { .filter.dump_exceptions = true, .filter.dump_routes = true, - .filter.rtnl_held = true, + .filter.rtnl_held = false, }; const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); - unsigned int h, s_h; unsigned int e = 0, s_e; + struct hlist_head *head; struct fib6_walker *w; struct fib6_table *tb; - struct hlist_head *head; - int res = 0; + unsigned int h, s_h; + int err = 0;
+ rcu_read_lock(); if (cb->strict_check) { - int err; - err = ip_valid_fib_dump_req(net, nlh, &arg.filter, cb); if (err < 0) - return err; + goto unlock; } else if (nlmsg_len(nlh) >= sizeof(struct rtmsg)) { struct rtmsg *rtm = nlmsg_data(nlh);
@@@ -654,8 -653,10 +653,10 @@@ * 1. allocate and initialize walker. */ w = kzalloc(sizeof(*w), GFP_ATOMIC); - if (!w) - return -ENOMEM; + if (!w) { + err = -ENOMEM; + goto unlock; + } w->func = fib6_dump_node; cb->args[2] = (long)w;
@@@ -675,46 -676,46 +676,46 @@@ tb = fib6_get_table(net, arg.filter.table_id); if (!tb) { if (rtnl_msg_family(cb->nlh) != PF_INET6) - goto out; + goto unlock;
NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist"); - return -ENOENT; + err = -ENOENT; + goto unlock; }
if (!cb->args[0]) { - res = fib6_dump_table(tb, skb, cb); - if (!res) + err = fib6_dump_table(tb, skb, cb); + if (!err) cb->args[0] = 1; } - goto out; + goto unlock; }
s_h = cb->args[0]; s_e = cb->args[1];
- rcu_read_lock(); for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { e = 0; head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(tb, head, tb6_hlist) { if (e < s_e) goto next; - res = fib6_dump_table(tb, skb, cb); - if (res != 0) - goto out_unlock; + err = fib6_dump_table(tb, skb, cb); + if (err != 0) + goto out; next: e++; } } - out_unlock: - rcu_read_unlock(); + out: cb->args[1] = e; cb->args[0] = h; - out: - res = res < 0 ? res : skb->len; - if (res <= 0) + + unlock: + rcu_read_unlock(); + if (err <= 0) fib6_dump_end(cb); - return res; + return err; }
void fib6_metric_set(struct fib6_info *f6i, int metric, u32 val) @@@ -1385,10 -1386,7 +1386,10 @@@ int fib6_add(struct fib6_node *root, st struct nl_info *info, struct netlink_ext_ack *extack) { struct fib6_table *table = rt->fib6_table; - struct fib6_node *fn, *pn = NULL; + struct fib6_node *fn; +#ifdef CONFIG_IPV6_SUBTREES + struct fib6_node *pn = NULL; +#endif int err = -ENOMEM; int allow_create = 1; int replace_required = 0; @@@ -1412,9 -1410,9 +1413,9 @@@ goto out; }
+#ifdef CONFIG_IPV6_SUBTREES pn = fn;
-#ifdef CONFIG_IPV6_SUBTREES if (rt->fib6_src.plen) { struct fib6_node *sn;
@@@ -2509,7 -2507,7 +2510,7 @@@ int __init fib6_init(void goto out_kmem_cache_create;
ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL, - inet6_dump_fib, 0); + inet6_dump_fib, RTNL_FLAG_DUMP_UNLOCKED); if (ret) goto out_unregister_subsys;
diff --combined net/mac80211/mlme.c index db7128f6c901e,cd4b54e026538..4a7c610cf9f64 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@@ -599,6 -599,10 +599,10 @@@ static bool ieee80211_chandef_usable(st ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING)) return false;
+ if (chandef->punctured && chandef->chan->band == NL80211_BAND_5GHZ && + ieee80211_hw_check(&sdata->local->hw, DISALLOW_PUNCTURING_5GHZ)) + return false; + return true; }
@@@ -4429,9 -4433,11 +4433,11 @@@ static bool ieee80211_assoc_config_link switch (u8_get_bits(he_6ghz_oper->control, IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { case IEEE80211_6GHZ_CTRL_REG_LPI_AP: + case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: bss_conf->power_type = IEEE80211_REG_LPI_AP; break; case IEEE80211_6GHZ_CTRL_REG_SP_AP: + case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: bss_conf->power_type = IEEE80211_REG_SP_AP; break; case IEEE80211_6GHZ_CTRL_REG_VLP_AP: @@@ -6193,8 -6199,7 +6199,8 @@@ static void ieee80211_rx_mgmt_beacon(st link->u.mgd.dtim_period = elems->dtim_period; link->u.mgd.have_beacon = true; ifmgd->assoc_data->need_beacon = false; - if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) { + if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) && + !ieee80211_is_s1g_beacon(hdr->frame_control)) { link->conf->sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); link->conf->sync_device_ts = @@@ -6793,6 -6798,60 +6799,60 @@@ void ieee80211_process_neg_ttlm_res(str __ieee80211_disconnect(sdata); }
+ static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy, + struct wiphy_work *work) + { + u16 new_dormant_links; + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.neg_ttlm_timeout_work.work); + + if (!sdata->vif.neg_ttlm.valid) + return; + + memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm)); + new_dormant_links = + sdata->vif.dormant_links & ~sdata->vif.suspended_links; + sdata->vif.suspended_links = 0; + ieee80211_vif_set_links(sdata, sdata->vif.valid_links, + new_dormant_links); + ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_TTLM | + BSS_CHANGED_MLD_VALID_LINKS); + } + + void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif) + { + struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + struct sk_buff *skb; + int frame_len = offsetofend(struct ieee80211_mgmt, + u.action.u.ttlm_tear_down); + struct ieee80211_tx_info *info; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + frame_len); + if (!skb) + return; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, frame_len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); + + mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT; + mgmt->u.action.u.ttlm_tear_down.action_code = + WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN; + + info = IEEE80211_SKB_CB(skb); + info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; + info->status_data = IEEE80211_STATUS_TYPE_NEG_TTLM; + ieee80211_tx_skb(sdata, skb); + } + EXPORT_SYMBOL(ieee80211_send_teardown_neg_ttlm); + void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { @@@ -7424,6 -7483,8 +7484,8 @@@ void ieee80211_sta_setup_sdata(struct i ieee80211_tid_to_link_map_work); wiphy_delayed_work_init(&ifmgd->neg_ttlm_timeout_work, ieee80211_neg_ttlm_timeout_work); + wiphy_work_init(&ifmgd->teardown_ttlm_work, + ieee80211_teardown_ttlm_work);
ifmgd->flags = 0; ifmgd->powersave = sdata->wdev.ps; @@@ -8210,6 -8271,14 +8272,14 @@@ int ieee80211_mgd_assoc(struct ieee8021 if (req->ap_mld_addr) { uapsd_supported = true;
+ if (req->flags & (ASSOC_REQ_DISABLE_HT | + ASSOC_REQ_DISABLE_VHT | + ASSOC_REQ_DISABLE_HE | + ASSOC_REQ_DISABLE_EHT)) { + err = -EINVAL; + goto err_free; + } + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { struct ieee80211_supported_band *sband; struct cfg80211_bss *link_cbss = req->links[i].bss; @@@ -8222,19 -8291,13 +8292,13 @@@
if (!bss->wmm_used) { err = -EINVAL; - goto err_free; - } - - if (req->flags & (ASSOC_REQ_DISABLE_HT | - ASSOC_REQ_DISABLE_VHT | - ASSOC_REQ_DISABLE_HE | - ASSOC_REQ_DISABLE_EHT)) { - err = -EINVAL; + req->links[i].error = err; goto err_free; }
if (link_cbss->channel->band == NL80211_BAND_S1GHZ) { err = -EINVAL; + req->links[i].error = err; goto err_free; }
@@@ -8611,6 -8674,8 +8675,8 @@@ void ieee80211_mgd_stop(struct ieee8021 &ifmgd->beacon_connection_loss_work); wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifmgd->teardown_ttlm_work); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->tdls_peer_del_work); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, diff --combined net/mac80211/rx.c index 685185dc04f97,4b4cbd8bf35d2..7ae59a671618c --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@@ -3780,10 -3780,6 +3780,10 @@@ ieee80211_rx_h_action(struct ieee80211_ } break; case WLAN_CATEGORY_PROTECTED_EHT: + if (len < offsetofend(typeof(*mgmt), + u.action.u.ttlm_req.action_code)) + break; + switch (mgmt->u.action.u.ttlm_req.action_code) { case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ: if (sdata->vif.type != NL80211_IFTYPE_STATION) @@@ -3962,8 -3958,8 +3962,8 @@@ ieee80211_rx_h_action_return(struct iee __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1, status->band); } - dev_kfree_skb(rx->skb); - return RX_QUEUED; + + return RX_DROP_U_UNKNOWN_ACTION_REJECTED; }
static ieee80211_rx_result debug_noinline diff --combined net/mac80211/scan.c index 73850312580f7,977f8eb0a67bc..3da1c5c450358 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@@ -648,7 -648,6 +648,7 @@@ static void ieee80211_send_scan_probe_r cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); } IEEE80211_SKB_CB(skb)->flags |= tx_flags; + IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_SCAN_TX; ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); } } @@@ -708,19 -707,11 +708,11 @@@ static int __ieee80211_start_scan(struc return -EBUSY;
/* For an MLO connection, if a link ID was specified, validate that it - * is indeed active. If no link ID was specified, select one of the - * active links. + * is indeed active. */ - if (ieee80211_vif_is_mld(&sdata->vif)) { - if (req->tsf_report_link_id >= 0) { - if (!(sdata->vif.active_links & - BIT(req->tsf_report_link_id))) - return -EINVAL; - } else { - req->tsf_report_link_id = - __ffs(sdata->vif.active_links); - } - } + if (ieee80211_vif_is_mld(&sdata->vif) && req->tsf_report_link_id >= 0 && + !(sdata->vif.active_links & BIT(req->tsf_report_link_id))) + return -EINVAL;
if (!__ieee80211_can_leave_ch(sdata)) return -EBUSY; diff --combined net/mac80211/tx.c index cfd0a62d0152b,ac5ae7c05e364..f861d99e5f055 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@@ -698,16 -698,11 +698,16 @@@ ieee80211_tx_h_rate_ctrl(struct ieee802 txrc.bss_conf = &tx->sdata->vif.bss_conf; txrc.skb = tx->skb; txrc.reported_rate.idx = -1; - txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
- if (tx->sdata->rc_has_mcs_mask[info->band]) - txrc.rate_idx_mcs_mask = - tx->sdata->rc_rateidx_mcs_mask[info->band]; + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) { + txrc.rate_idx_mask = ~0; + } else { + txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; + + if (tx->sdata->rc_has_mcs_mask[info->band]) + txrc.rate_idx_mcs_mask = + tx->sdata->rc_rateidx_mcs_mask[info->band]; + }
txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || @@@ -1609,8 -1604,8 +1609,8 @@@ int ieee80211_txq_setup_flows(struct ie local->cparams.target = MS2TIME(20); local->cparams.ecn = true;
- local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]), - GFP_KERNEL); + local->cvars = kvcalloc(fq->flows_cnt, sizeof(local->cvars[0]), + GFP_KERNEL); if (!local->cvars) { spin_lock_bh(&fq->lock); fq_reset(fq, fq_skb_free_func); @@@ -1630,7 -1625,7 +1630,7 @@@ void ieee80211_txq_teardown_flows(struc { struct fq *fq = &local->fq;
- kfree(local->cvars); + kvfree(local->cvars); local->cvars = NULL;
spin_lock_bh(&fq->lock); diff --combined net/unix/af_unix.c index d032eb5fa6df1,61ecfa9c9c6b1..142d210b5b035 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@@ -546,7 -546,7 +546,7 @@@ static void unix_write_space(struct soc if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); - sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); } rcu_read_unlock(); } @@@ -979,11 -979,11 +979,11 @@@ static struct sock *unix_create1(struc sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; sk->sk_destruct = unix_sock_destructor; u = unix_sk(sk); - u->inflight = 0; + u->listener = NULL; + u->vertex = NULL; u->path.dentry = NULL; u->path.mnt = NULL; spin_lock_init(&u->lock); - INIT_LIST_HEAD(&u->link); mutex_init(&u->iolock); /* single task reading lock */ mutex_init(&u->bindlock); /* single task binding lock */ init_waitqueue_head(&u->peer_wait); @@@ -1597,6 -1597,7 +1597,7 @@@ restart newsk->sk_type = sk->sk_type; init_peercred(newsk); newu = unix_sk(newsk); + newu->listener = other; RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); otheru = unix_sk(other);
@@@ -1692,8 -1693,8 +1693,8 @@@ static int unix_accept(struct socket *s bool kern) { struct sock *sk = sock->sk; - struct sock *tsk; struct sk_buff *skb; + struct sock *tsk; int err;
err = -EOPNOTSUPP; @@@ -1718,6 -1719,7 +1719,7 @@@ }
tsk = skb->sk; + unix_update_edges(unix_sk(tsk)); skb_free_datagram(sk, skb); wake_up_interruptible(&unix_sk(sk)->peer_wait);
@@@ -1789,81 -1791,29 +1791,29 @@@ static inline bool too_many_unix_fds(st
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { - int i; - if (too_many_unix_fds(current)) return -ETOOMANYREFS;
- /* Need to duplicate file references for the sake of garbage - * collection. Otherwise a socket in the fps might become a - * candidate for GC while the skb is not yet queued. - */ - UNIXCB(skb).fp = scm_fp_dup(scm->fp); - if (!UNIXCB(skb).fp) - return -ENOMEM; + UNIXCB(skb).fp = scm->fp; + scm->fp = NULL;
- for (i = scm->fp->count - 1; i >= 0; i--) - unix_inflight(scm->fp->user, scm->fp->fp[i]); + if (unix_prepare_fpl(UNIXCB(skb).fp)) + return -ENOMEM;
return 0; }
static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) { - int i; - scm->fp = UNIXCB(skb).fp; UNIXCB(skb).fp = NULL;
- for (i = scm->fp->count - 1; i >= 0; i--) - unix_notinflight(scm->fp->user, scm->fp->fp[i]); + unix_destroy_fpl(scm->fp); }
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) { scm->fp = scm_fp_dup(UNIXCB(skb).fp); - - /* - * Garbage collection of unix sockets starts by selecting a set of - * candidate sockets which have reference only from being in flight - * (total_refs == inflight_refs). This condition is checked once during - * the candidate collection phase, and candidates are marked as such, so - * that non-candidates can later be ignored. While inflight_refs is - * protected by unix_gc_lock, total_refs (file count) is not, hence this - * is an instantaneous decision. - * - * Once a candidate, however, the socket must not be reinstalled into a - * file descriptor while the garbage collection is in progress. - * - * If the above conditions are met, then the directed graph of - * candidates (*) does not change while unix_gc_lock is held. - * - * Any operations that changes the file count through file descriptors - * (dup, close, sendmsg) does not change the graph since candidates are - * not installed in fds. - * - * Dequeing a candidate via recvmsg would install it into an fd, but - * that takes unix_gc_lock to decrement the inflight count, so it's - * serialized with garbage collection. - * - * MSG_PEEK is special in that it does not change the inflight count, - * yet does install the socket into an fd. The following lock/unlock - * pair is to ensure serialization with garbage collection. It must be - * done between incrementing the file count and installing the file into - * an fd. - * - * If garbage collection starts after the barrier provided by the - * lock/unlock, then it will see the elevated refcount and not mark this - * as a candidate. If a garbage collection is already in progress - * before the file count was incremented, then the lock/unlock pair will - * ensure that garbage collection is finished before progressing to - * installing the fd. - * - * (*) A -> B where B is on the queue of A or B is on the queue of C - * which is on the queue of listening socket A. - */ - spin_lock(&unix_gc_lock); - spin_unlock(&unix_gc_lock); }
static void unix_destruct_scm(struct sk_buff *skb) @@@ -1937,8 -1887,10 +1887,10 @@@ static void scm_stat_add(struct sock *s struct scm_fp_list *fp = UNIXCB(skb).fp; struct unix_sock *u = unix_sk(sk);
- if (unlikely(fp && fp->count)) + if (unlikely(fp && fp->count)) { atomic_add(fp->count, &u->scm_stat.nr_fds); + unix_add_edges(fp, u); + } }
static void scm_stat_del(struct sock *sk, struct sk_buff *skb) @@@ -1946,8 -1898,10 +1898,10 @@@ struct scm_fp_list *fp = UNIXCB(skb).fp; struct unix_sock *u = unix_sk(sk);
- if (unlikely(fp && fp->count)) + if (unlikely(fp && fp->count)) { atomic_sub(fp->count, &u->scm_stat.nr_fds); + unix_del_edges(fp); + } }
/* @@@ -2665,9 -2619,7 +2619,9 @@@ static struct sk_buff *manage_oob(struc } } else if (!(flags & MSG_PEEK)) { skb_unlink(skb, &sk->sk_receive_queue); - consume_skb(skb); + WRITE_ONCE(u->oob_skb, NULL); + if (!WARN_ON_ONCE(skb_unref(skb))) + kfree_skb(skb); skb = skb_peek(&sk->sk_receive_queue); } } diff --combined scripts/kernel-doc index b463acecad401,43a30f2de5138..d154455e25074 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@@ -1143,6 -1143,7 +1143,7 @@@ sub dump_struct($$) $members =~ s/\s*$attribute/ /gi; $members =~ s/\s*__aligned\s*([^;]*)/ /gos; $members =~ s/\s*__counted_by\s*([^;]*)/ /gos; + $members =~ s/\s*__counted_by_(le|be)\s*([^;]*)/ /gos; $members =~ s/\s*__packed\s*/ /gos; $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos; $members =~ s/\s*____cacheline_aligned_in_smp/ /gos; @@@ -1723,7 -1724,6 +1724,7 @@@ sub dump_function($$) $prototype =~ s/__must_check +//; $prototype =~ s/__weak +//; $prototype =~ s/__sched +//; + $prototype =~ s/_noprof//; $prototype =~ s/__printf\s*(\s*\d*\s*,\s*\d*\s*) +//; $prototype =~ s/__(?:re)?alloc_size\s*(\s*\d+\s*(?:,\s*\d+\s*)?) +//; $prototype =~ s/__diagnose_as\s*(\s*\S+\s*(?:,\s*\d+\s*)*) +//; diff --combined tools/include/linux/mm.h index 7d73da0980473,7a6b98f4e5792..dc0fc7125bc31 --- a/tools/include/linux/mm.h +++ b/tools/include/linux/mm.h @@@ -2,8 -2,8 +2,8 @@@ #ifndef _TOOLS_LINUX_MM_H #define _TOOLS_LINUX_MM_H
+ #include <linux/align.h> #include <linux/mmzone.h> - #include <uapi/linux/const.h>
#define PAGE_SHIFT 12 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) @@@ -11,9 -11,6 +11,6 @@@
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
- #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) - #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) - #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
#define __va(x) ((void *)((unsigned long)(x))) @@@ -37,9 -34,4 +34,9 @@@ static inline void totalram_pages_add(l { }
+static inline int early_pfn_to_nid(unsigned long pfn) +{ + return 0; +} + #endif
linux-merge@lists.open-mesh.org