The following commit has been merged in the master branch: commit 7d3a957ddf6a7e69aed48a2e4043b5e00426d413 Merge: 709e394d2300a8ac77fb7b5b334ca0598398859f c29b068215906d33f75378d44526edc37ad08276 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Sep 21 11:04:08 2022 +1000
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
# Conflicts: # drivers/net/ethernet/freescale/fec.h # drivers/pinctrl/pinctrl-ocelot.c # tools/testing/selftests/drivers/net/bonding/Makefile
diff --combined Documentation/admin-guide/kernel-parameters.txt index 637264df8c28,a6d3c7beaa4a..379af7b660e5 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@@ -966,10 -966,6 +966,6 @@@
debugpat [X86] Enable PAT debugging
- decnet.addr= [HW,NET] - Format: <area>[,<node>] - See also Documentation/networking/decnet.rst. - default_hugepagesz= [HW] The size of the default HugeTLB page. This is the size represented by the legacy /proc/ hugepages @@@ -3207,7 -3203,6 +3203,7 @@@ spectre_v2_user=off [X86] spec_store_bypass_disable=off [X86,PPC] ssbd=force-off [ARM64] + nospectre_bhb [ARM64] l1tf=off [X86] mds=off [X86] tsx_async_abort=off [X86] @@@ -3614,7 -3609,7 +3610,7 @@@
nohugeiomap [KNL,X86,PPC,ARM64] Disable kernel huge I/O mappings.
- nohugevmalloc [PPC] Disable kernel huge vmalloc mappings. + nohugevmalloc [KNL,X86,PPC,ARM64] Disable kernel huge vmalloc mappings.
nosmt [KNL,S390] Disable symmetric multithreading (SMT). Equivalent to smt=1. @@@ -3632,10 -3627,6 +3628,10 @@@ vulnerability. System may allow data leaks with this option.
+ nospectre_bhb [ARM64] Disable all mitigations for Spectre-BHB (branch + history injection) vulnerability. System may allow data leaks + with this option. + nospec_store_bypass_disable [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
@@@ -3746,9 -3737,9 +3742,9 @@@ [X86,PV_OPS] Disable paravirtualized VMware scheduler clock and use the default one.
- no-steal-acc [X86,PV_OPS,ARM64] Disable paravirtualized steal time - accounting. steal time is computed, but won't - influence scheduler behaviour + no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized + steal time accounting. steal time is computed, but + won't influence scheduler behaviour
nolapic [X86-32,APIC] Do not enable or use the local APIC.
diff --combined MAINTAINERS index 4f9e7d306cf3,6705fb8bfd3a..6367b19a6f57 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -878,6 -878,13 +878,13 @@@ L: netdev@vger.kernel.or S: Maintained F: drivers/net/ethernet/altera/
+ ALTERA TSE PCS + M: Maxime Chevallier maxime.chevallier@bootlin.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/pcs/pcs-altera-tse.c + F: include/linux/pcs-altera-tse.h + ALTERA UART/JTAG UART SERIAL DRIVERS M: Tobias Klauser tklauser@distanz.ch L: linux-serial@vger.kernel.org @@@ -1010,6 -1017,7 +1017,6 @@@ F: drivers/spi/spi-amd.
AMD MP2 I2C DRIVER M: Elie Morisse syniurge@gmail.com -M: Nehal Shah nehal-bakulchandra.shah@amd.com M: Shyam Sundar S K shyam-sundar.s-k@amd.com L: linux-i2c@vger.kernel.org S: Maintained @@@ -1333,15 -1341,6 +1340,15 @@@ F: drivers/iio/amplifiers/hmc425a. F: drivers/staging/iio/*/ad* X: drivers/iio/*/adjd*
+ANALOG DEVICES INC MAX31760 DRIVER +M: Ibrahim Tilki Ibrahim.Tilki@analog.com +S: Maintained +W: http://wiki.analog.com/ +W: https://ez.analog.com/linux-software-drivers +F: Documentation/devicetree/bindings/hwmon/adi,max31760.yaml +F: Documentation/hwmon/max31760.rst +F: drivers/hwmon/max31760.c + ANALOGBITS PLL LIBRARIES M: Paul Walmsley paul.walmsley@sifive.com S: Supported @@@ -1811,7 -1810,7 +1818,7 @@@ N: sun[x456789] N: sun50i
ARM/Amlogic Meson SoC CLOCK FRAMEWORK -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org M: Jerome Brunet jbrunet@baylibre.com L: linux-amlogic@lists.infradead.org S: Maintained @@@ -1836,7 -1835,7 +1843,7 @@@ F: Documentation/devicetree/bindings/so F: sound/soc/meson/
ARM/Amlogic Meson SoC support -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org M: Kevin Hilman khilman@baylibre.com R: Jerome Brunet jbrunet@baylibre.com R: Martin Blumenstingl martin.blumenstingl@googlemail.com @@@ -2539,7 -2538,7 +2546,7 @@@ W: http://www.digriz.org.uk/ts78xx/kern F: arch/arm/mach-orion5x/ts78xx-*
ARM/OXNAS platform support -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-oxnas@groups.io (moderated for non-subscribers) S: Maintained @@@ -2587,7 -2586,7 +2594,7 @@@ W: http://www.armlinux.org.uk
ARM/QUALCOMM SUPPORT M: Andy Gross agross@kernel.org -M: Bjorn Andersson bjorn.andersson@linaro.org +M: Bjorn Andersson andersson@kernel.org R: Konrad Dybcio konrad.dybcio@somainline.org L: linux-arm-msm@vger.kernel.org S: Maintained @@@ -2596,7 -2595,6 +2603,7 @@@ F: Documentation/devicetree/bindings/*/ F: Documentation/devicetree/bindings/soc/qcom/ F: arch/arm/boot/dts/qcom-*.dts F: arch/arm/boot/dts/qcom-*.dtsi +F: arch/arm/configs/qcom_defconfig F: arch/arm/mach-qcom/ F: arch/arm64/boot/dts/qcom/ F: drivers/*/*/qcom* @@@ -2659,7 -2657,7 +2666,7 @@@ F: arch/arm/boot/dts/rtd F: arch/arm/mach-realtek/ F: arch/arm64/boot/dts/realtek/
-ARM/RENESAS ARM64 ARCHITECTURE +ARM/RENESAS ARCHITECTURE M: Geert Uytterhoeven geert+renesas@glider.be M: Magnus Damm magnus.damm@gmail.com L: linux-renesas-soc@vger.kernel.org @@@ -2670,16 -2668,6 +2677,16 @@@ T: git git://git.kernel.org/pub/scm/lin F: Documentation/devicetree/bindings/arm/renesas.yaml F: Documentation/devicetree/bindings/hwinfo/renesas,prr.yaml F: Documentation/devicetree/bindings/soc/renesas/ +F: arch/arm/boot/dts/emev2* +F: arch/arm/boot/dts/gr-peach* +F: arch/arm/boot/dts/iwg20d-q7* +F: arch/arm/boot/dts/r7s* +F: arch/arm/boot/dts/r8a* +F: arch/arm/boot/dts/r9a* +F: arch/arm/boot/dts/sh* +F: arch/arm/configs/shmobile_defconfig +F: arch/arm/include/debug/renesas-scif.S +F: arch/arm/mach-shmobile/ F: arch/arm64/boot/dts/renesas/ F: drivers/soc/renesas/ F: include/linux/soc/renesas/ @@@ -2791,6 -2779,29 +2798,6 @@@ L: linux-media@vger.kernel.or S: Maintained F: drivers/media/platform/samsung/s5p-mfc/
-ARM/SHMOBILE ARM ARCHITECTURE -M: Geert Uytterhoeven geert+renesas@glider.be -M: Magnus Damm magnus.damm@gmail.com -L: linux-renesas-soc@vger.kernel.org -S: Supported -Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ -C: irc://irc.libera.chat/renesas-soc -T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next -F: Documentation/devicetree/bindings/arm/renesas.yaml -F: Documentation/devicetree/bindings/soc/renesas/ -F: arch/arm/boot/dts/emev2* -F: arch/arm/boot/dts/gr-peach* -F: arch/arm/boot/dts/iwg20d-q7* -F: arch/arm/boot/dts/r7s* -F: arch/arm/boot/dts/r8a* -F: arch/arm/boot/dts/r9a* -F: arch/arm/boot/dts/sh* -F: arch/arm/configs/shmobile_defconfig -F: arch/arm/include/debug/renesas-scif.S -F: arch/arm/mach-shmobile/ -F: drivers/soc/renesas/ -F: include/linux/soc/renesas/ - ARM/SOCFPGA ARCHITECTURE M: Dinh Nguyen dinguyen@kernel.org S: Maintained @@@ -3229,6 -3240,13 +3236,6 @@@ L: linux-hwmon@vger.kernel.or S: Maintained F: drivers/hwmon/asus_wmi_sensors.c
-ASUS WMI EC HARDWARE MONITOR DRIVER -M: Eugene Shalygin eugene.shalygin@gmail.com -M: Denis Pauk pauk.denis@gmail.com -L: linux-hwmon@vger.kernel.org -S: Maintained -F: drivers/hwmon/asus_wmi_ec_sensors.c - ASUS EC HARDWARE MONITOR DRIVER M: Eugene Shalygin eugene.shalygin@gmail.com L: linux-hwmon@vger.kernel.org @@@ -3814,7 -3832,6 +3821,7 @@@ F: kernel/bpf/dispatcher. F: kernel/bpf/trampoline.c F: include/linux/bpf* F: include/linux/filter.h +F: include/linux/tnum.h
BPF [BTF] M: Martin KaFai Lau martin.lau@linux.dev @@@ -3940,7 -3957,6 +3947,7 @@@ M: William Zhang <william.zhang@broadco M: Anand Gore anand.gore@broadcom.com M: Kursad Oney kursad.oney@broadcom.com M: Florian Fainelli f.fainelli@gmail.com +M: Rafa�� Mi��ecki rafal@milecki.pl R: Broadcom internal kernel review list bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -4950,7 -4966,7 +4957,7 @@@ F: drivers/hwmon/lochnagar-hwmon. F: drivers/mfd/lochnagar-i2c.c F: drivers/pinctrl/cirrus/pinctrl-lochnagar.c F: drivers/regulator/lochnagar-regulator.c -F: include/dt-bindings/clk/lochnagar.h +F: include/dt-bindings/clock/lochnagar.h F: include/dt-bindings/pinctrl/lochnagar.h F: include/linux/mfd/lochnagar* F: sound/soc/codecs/lochnagar-sc.c @@@ -5130,7 -5146,6 +5137,7 @@@ M: Steve French <sfrench@samba.org R: Paulo Alcantara pc@cjr.nz (DFS, global name space) R: Ronnie Sahlberg lsahlber@redhat.com (directory leases, sparse files) R: Shyam Prasad N sprasad@microsoft.com (multichannel) +R: Tom Talpey tom@talpey.com (RDMA, smbdirect) L: linux-cifs@vger.kernel.org L: samba-technical@lists.samba.org (moderated for non-subscribers) S: Supported @@@ -5365,8 -5380,8 +5372,8 @@@ T: git git://git.kernel.org/pub/scm/lin F: drivers/cpuidle/cpuidle-big_little.c
CPUIDLE DRIVER - ARM EXYNOS -M: Bartlomiej Zolnierkiewicz b.zolnierkie@samsung.com M: Daniel Lezcano daniel.lezcano@linaro.org +R: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org M: Kukjin Kim kgene@kernel.org L: linux-pm@vger.kernel.org L: linux-samsung-soc@vger.kernel.org @@@ -5714,13 -5729,6 +5721,6 @@@ F: include/linux/tfrc. F: include/uapi/linux/dccp.h F: net/dccp/
- DECnet NETWORK LAYER - L: linux-decnet-user@lists.sourceforge.net - S: Orphan - W: http://linux-decnet.sourceforge.net - F: Documentation/networking/decnet.rst - F: net/decnet/ - DECSTATION PLATFORM SUPPORT M: "Maciej W. Rozycki" macro@orcam.me.uk L: linux-mips@vger.kernel.org @@@ -6173,7 -6181,7 +6173,7 @@@ F: Documentation/devicetree/bindings/me F: drivers/memory/samsung/exynos5422-dmc.c
DME1737 HARDWARE MONITOR DRIVER -M: Juerg Haefliger juergh@gmail.com +M: Juerg Haefliger juergh@proton.me L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/dme1737.rst @@@ -6784,7 -6792,7 +6784,7 @@@ F: Documentation/devicetree/bindings/di F: drivers/gpu/drm/sun4i/
DRM DRIVERS FOR AMLOGIC SOCS -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org L: dri-devel@lists.freedesktop.org L: linux-amlogic@lists.infradead.org S: Supported @@@ -6806,7 -6814,7 +6806,7 @@@ F: drivers/gpu/drm/atmel-hlcdc
DRM DRIVERS FOR BRIDGE CHIPS M: Andrzej Hajda andrzej.hajda@intel.com -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org M: Robert Foss robert.foss@linaro.org R: Laurent Pinchart Laurent.pinchart@ideasonboard.com R: Jonas Karlman jonas@kwiboo.se @@@ -8644,8 -8652,8 +8644,8 @@@ F: drivers/input/touchscreen/goodix
GOOGLE ETHERNET DRIVERS M: Jeroen de Borst jeroendb@google.com -R: Catherine Sullivan csully@google.com -R: David Awogbemila awogbemila@google.com +M: Catherine Sullivan csully@google.com +R: Shailend Chand shailend@google.com L: netdev@vger.kernel.org S: Supported F: Documentation/networking/device_drivers/ethernet/google/gve.rst @@@ -8899,7 -8907,7 +8899,7 @@@ S: Maintaine F: Documentation/devicetree/bindings/media/nxp,imx8mq-vpu.yaml F: Documentation/devicetree/bindings/media/rockchip,rk3568-vepu.yaml F: Documentation/devicetree/bindings/media/rockchip-vpu.yaml -F: drivers/staging/media/hantro/ +F: drivers/media/platform/verisilicon/
HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER M: Frank Seidel frank@f-seidel.de @@@ -8935,7 -8943,7 +8935,7 @@@ F: include/linux/hw_random.
HARDWARE SPINLOCK CORE M: Ohad Ben-Cohen ohad@wizery.com -M: Bjorn Andersson bjorn.andersson@linaro.org +M: Bjorn Andersson andersson@kernel.org R: Baolin Wang baolin.wang7@gmail.com L: linux-remoteproc@vger.kernel.org S: Maintained @@@ -9045,12 -9053,6 +9045,12 @@@ L: linux-input@vger.kernel.or S: Supported F: drivers/hid/hid-playstation.c
+HID PHOENIX RC FLIGHT CONTROLLER +M: Marcus Folkesson marcus.folkesson@gmail.com +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/hid-pxrc.c + HID SENSOR HUB DRIVERS M: Jiri Kosina jikos@kernel.org M: Jonathan Cameron jic23@kernel.org @@@ -9063,12 -9065,6 +9063,12 @@@ F: drivers/hid/hid-sensor- F: drivers/iio/*/hid-* F: include/linux/hid-sensor-*
+HID VRC-2 CAR CONTROLLER DRIVER +M: Marcus Folkesson marcus.folkesson@gmail.com +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/hid-vrc2.c + HID WACOM DRIVER M: Ping Cheng ping.cheng@wacom.com M: Jason Gerecke jason.gerecke@wacom.com @@@ -9126,7 -9122,7 +9126,7 @@@ S: Maintaine F: drivers/dma/hisi_dma.c
HISILICON GPIO DRIVER -M: Luo Jiaxing luojiaxing@huawei.com +M: Jay Fang f.fangjian@huawei.com L: linux-gpio@vger.kernel.org S: Maintained F: drivers/gpio/gpio-hisi.c @@@ -9212,8 -9208,8 +9212,8 @@@ F: Documentation/ABI/testing/debugfs-hi F: drivers/crypto/hisilicon/zip/
HISILICON ROCE DRIVER +M: Haoyue Xu xuhaoyue1@hisilicon.com M: Wenpeng Liang liangwenpeng@huawei.com -M: Weihang Li liweihang@huawei.com L: linux-rdma@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt @@@ -10832,7 -10828,7 +10832,7 @@@ F: drivers/media/tuners/it913x
ITE IT66121 HDMI BRIDGE DRIVER M: Phong LE ple@baylibre.com -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc F: Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml @@@ -11077,8 -11073,8 +11077,8 @@@ F: tools/testing/selftests KERNEL SMB3 SERVER (KSMBD) M: Namjae Jeon linkinjeon@kernel.org M: Steve French sfrench@samba.org -M: Hyunchul Lee hyc.lee@gmail.com R: Sergey Senozhatsky senozhatsky@chromium.org +R: Tom Talpey tom@talpey.com L: linux-cifs@vger.kernel.org S: Maintained T: git git://git.samba.org/ksmbd.git @@@ -11351,7 -11347,7 +11351,7 @@@ F: kernel/debug F: kernel/module/kdb.c
KHADAS MCU MFD DRIVER -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org L: linux-amlogic@lists.infradead.org S: Maintained F: Documentation/devicetree/bindings/mfd/khadas,mcu.yaml @@@ -12414,6 -12410,7 +12414,6 @@@ F: drivers/power/supply/max77976_charge
MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org -M: Bartlomiej Zolnierkiewicz b.zolnierkie@samsung.com L: linux-pm@vger.kernel.org S: Supported B: mailto:linux-samsung-soc@vger.kernel.org @@@ -12425,6 -12422,7 +12425,6 @@@ F: drivers/power/supply/max77693_charge MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS M: Chanwoo Choi cw00.choi@samsung.com M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org -M: Bartlomiej Zolnierkiewicz b.zolnierkie@samsung.com L: linux-kernel@vger.kernel.org S: Supported B: mailto:linux-samsung-soc@vger.kernel.org @@@ -13220,7 -13218,7 +13220,7 @@@ S: Maintaine F: drivers/watchdog/menz69_wdt.c
MESON AO CEC DRIVER FOR AMLOGIC SOCS -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org L: linux-media@vger.kernel.org L: linux-amlogic@lists.infradead.org S: Supported @@@ -13231,7 -13229,7 +13231,7 @@@ F: drivers/media/cec/platform/meson/ao- F: drivers/media/cec/platform/meson/ao-cec.c
MESON GE2D DRIVER FOR AMLOGIC SOCS -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org L: linux-media@vger.kernel.org L: linux-amlogic@lists.infradead.org S: Supported @@@ -13247,7 -13245,7 +13247,7 @@@ F: Documentation/devicetree/bindings/mt F: drivers/mtd/nand/raw/meson_*
MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS -M: Neil Armstrong narmstrong@baylibre.com +M: Neil Armstrong neil.armstrong@linaro.org L: linux-media@vger.kernel.org L: linux-amlogic@lists.infradead.org S: Supported @@@ -13782,7 -13780,7 +13782,7 @@@ MOTION EYE VAIO PICTUREBOOK CAMERA DRIV S: Orphan W: http://popies.net/meye/ F: Documentation/userspace-api/media/drivers/meye* -F: drivers/media/pci/meye/ +F: drivers/staging/media/deprecated/meye/ F: include/uapi/linux/meye.h
MOTORCOMM PHY DRIVER @@@ -14697,15 -14695,6 +14697,15 @@@ S: Orpha F: Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml F: drivers/nfc/nxp-nci
+NXP i.MX 8MP DW100 V4L2 DRIVER +M: Xavier Roumegue xavier.roumegue@oss.nxp.com +L: linux-media@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/media/nxp,dw100.yaml +F: Documentation/userspace-api/media/drivers/dw100.rst +F: drivers/media/platform/nxp/dw100/ +F: include/uapi/linux/dw100.h + NXP i.MX 8QXP/8QM JPEG V4L2 DRIVER M: Mirela Rabulea mirela.rabulea@nxp.com R: NXP Linux Team linux-imx@nxp.com @@@ -14757,6 -14746,13 +14757,13 @@@ F: net/dsa/tag_ocelot. F: net/dsa/tag_ocelot_8021q.c F: tools/testing/selftests/drivers/net/ocelot/*
+ OCELOT EXTERNAL SWITCH CONTROL + M: Colin Foster colin.foster@in-advantage.com + S: Supported + F: Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml + F: drivers/mfd/ocelot* + F: include/linux/mfd/ocelot.h + OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER M: Frederic Barrat fbarrat@linux.ibm.com M: Andrew Donnellan ajd@linux.ibm.com @@@ -15698,7 -15694,6 +15705,7 @@@ PCI ENDPOINT SUBSYSTE M: Kishon Vijay Abraham I kishon@ti.com M: Lorenzo Pieralisi lpieralisi@kernel.org R: Krzysztof Wilczy��ski kw@linux.com +R: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org L: linux-pci@vger.kernel.org S: Supported Q: https://patchwork.kernel.org/project/linux-pci/list/ @@@ -15712,8 -15707,8 +15719,8 @@@ F: drivers/pci/endpoint F: tools/pci/
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC -M: Russell Currey ruscur@russell.cc -M: Oliver O'Halloran oohall@gmail.com +M: Mahesh J Salgaonkar mahesh@linux.ibm.com +R: Oliver O'Halloran oohall@gmail.com L: linuxppc-dev@lists.ozlabs.org S: Supported F: Documentation/PCI/pci-error-recovery.rst @@@ -16137,7 -16132,7 +16144,7 @@@ F: drivers/gpio/gpio-sama5d2-piobu. F: drivers/pinctrl/pinctrl-at91*
PIN CONTROLLER - QUALCOMM -M: Bjorn Andersson bjorn.andersson@linaro.org +M: Bjorn Andersson andersson@kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pinctrl/qcom,*.txt @@@ -16550,6 -16545,14 +16557,6 @@@ T: git git://linuxtv.org/media_tree.gi F: drivers/media/usb/pwc/* F: include/trace/events/pwc.h
-PWM FAN DRIVER -M: Bartlomiej Zolnierkiewicz b.zolnierkie@samsung.com -L: linux-hwmon@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt -F: Documentation/hwmon/pwm-fan.rst -F: drivers/hwmon/pwm-fan.c - PWM IR Transmitter M: Sean Young sean@mess.org L: linux-media@vger.kernel.org @@@ -16822,7 -16825,7 +16829,7 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/platform/qcom/camss/
QUALCOMM CLOCK DRIVERS -M: Bjorn Andersson bjorn.andersson@linaro.org +M: Bjorn Andersson andersson@kernel.org L: linux-arm-msm@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git @@@ -16861,7 -16864,6 +16868,7 @@@ F: drivers/net/ethernet/qualcomm/emac
QUALCOMM ETHQOS ETHERNET DRIVER M: Vinod Koul vkoul@kernel.org +R: Bhupesh Sharma bhupesh.sharma@linaro.org L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/qcom,ethqos.txt @@@ -17312,7 -17314,7 +17319,7 @@@ S: Supporte F: fs/reiserfs/
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM -M: Bjorn Andersson bjorn.andersson@linaro.org +M: Bjorn Andersson andersson@kernel.org M: Mathieu Poirier mathieu.poirier@linaro.org L: linux-remoteproc@vger.kernel.org S: Maintained @@@ -17325,7 -17327,7 +17332,7 @@@ F: include/linux/remoteproc. F: include/linux/remoteproc/
REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM -M: Bjorn Andersson bjorn.andersson@linaro.org +M: Bjorn Andersson andersson@kernel.org M: Mathieu Poirier mathieu.poirier@linaro.org L: linux-remoteproc@vger.kernel.org S: Maintained @@@ -17552,7 -17554,6 +17559,7 @@@ F: drivers/clk/microchip/clk-mpfs. F: drivers/i2c/busses/i2c-microchip-core.c F: drivers/mailbox/mailbox-mpfs.c F: drivers/pci/controller/pcie-microchip-host.c +F: drivers/reset/reset-mpfs.c F: drivers/rtc/rtc-mpfs.c F: drivers/soc/microchip/ F: drivers/spi/spi-microchip-core.c @@@ -17751,17 -17752,6 +17758,17 @@@ L: linux-rdma@vger.kernel.or S: Maintained F: drivers/infiniband/ulp/rtrs/
+RUNTIME VERIFICATION (RV) +M: Daniel Bristot de Oliveira bristot@kernel.org +M: Steven Rostedt rostedt@goodmis.org +L: linux-trace-devel@vger.kernel.org +S: Maintained +F: Documentation/trace/rv/ +F: include/linux/rv.h +F: include/rv/ +F: kernel/trace/rv/ +F: tools/verification/ + RXRPC SOCKETS (AF_RXRPC) M: David Howells dhowells@redhat.com M: Marc Dionne marc.dionne@auristor.com @@@ -17933,7 -17923,8 +17940,7 @@@ M: Hans Verkuil <hverkuil@xs4all.nl L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git -F: drivers/media/common/saa7146/ -F: drivers/media/pci/saa7146/ +F: drivers/staging/media/deprecated/saa7146/ F: include/media/drv-intf/saa7146*
SAFESETID SECURITY MODULE @@@ -17988,6 -17979,7 +17995,6 @@@ F: drivers/platform/x86/samsung-laptop.
SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org -M: Bartlomiej Zolnierkiewicz b.zolnierkie@samsung.com L: linux-kernel@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Supported @@@ -18052,14 -18044,12 +18059,14 @@@ Q: https://patchwork.linuxtv.org/projec F: drivers/media/platform/samsung/exynos4-is/
SAMSUNG SOC CLOCK DRIVERS +M: Krzysztof Kozlowski krzysztof.kozlowski@linaro.org M: Sylwester Nawrocki s.nawrocki@samsung.com M: Tomasz Figa tomasz.figa@gmail.com M: Chanwoo Choi cw00.choi@samsung.com R: Alim Akhtar alim.akhtar@samsung.com L: linux-samsung-soc@vger.kernel.org S: Supported +T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git F: Documentation/devicetree/bindings/clock/samsung,*.yaml F: Documentation/devicetree/bindings/clock/samsung,s3c* @@@ -19965,7 -19955,6 +19972,7 @@@ S: Supporte F: drivers/net/team/ F: include/linux/if_team.h F: include/uapi/linux/if_team.h +F: tools/testing/selftests/net/team/
TECHNOLOGIC SYSTEMS TS-5500 PLATFORM SUPPORT M: "Savoir-faire Linux Inc." kernel@savoirfairelinux.com @@@ -20353,7 -20342,6 +20360,7 @@@ W: https://linuxtv.or Q: http://patchwork.linuxtv.org/project/linux-media/list/ T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git F: drivers/media/platform/ti/davinci/ +F: drivers/staging/media/deprecated/vpfe_capture/ F: include/media/davinci/
TI ENHANCED QUADRATURE ENCODER PULSE (eQEP) DRIVER @@@ -20494,7 -20482,7 +20501,7 @@@ S: Odd fixe W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git F: Documentation/admin-guide/media/tm6000* -F: drivers/media/usb/tm6000/ +F: drivers/staging/media/deprecated/tm6000/
TMIO/SDHI MMC DRIVER M: Wolfram Sang wsa+renesas@sang-engineering.com @@@ -20594,10 -20582,9 +20601,10 @@@ F: include/linux/toshiba. F: include/uapi/linux/toshiba.h
TOSHIBA TC358743 DRIVER -M: Mats Randgaard matrandg@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/media/i2c/tc358743.txt F: drivers/media/i2c/tc358743* F: include/media/i2c/tc358743.h
@@@ -20618,13 -20605,6 +20625,13 @@@ Q: https://patchwork.kernel.org/project T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git F: drivers/char/tpm/
+TPS546D24 DRIVER +M: Duke Du dukedu83@gmail.com +L: linux-hwmon@vger.kernel.org +S: Maintained +F: Documentation/hwmon/tps546d24.rst +F: drivers/hwmon/pmbus/tps546d24.c + TRACING M: Steven Rostedt rostedt@goodmis.org M: Ingo Molnar mingo@redhat.com @@@ -20638,7 -20618,6 +20645,7 @@@ F: include/*/ftrace. F: include/linux/trace*.h F: include/trace/ F: kernel/trace/ +F: scripts/tracing/ F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE) @@@ -21253,7 -21232,7 +21260,7 @@@ S: Maintaine W: http://royale.zerezo.com/zr364xx/ T: git git://linuxtv.org/media_tree.git F: Documentation/admin-guide/media/zr364xx* -F: drivers/media/usb/zr364xx/ +F: drivers/staging/media/deprecated/zr364xx/
USER-MODE LINUX (UML) M: Richard Weinberger richard@nod.at @@@ -21828,7 -21807,7 +21835,7 @@@ F: lib/test_scanf. F: lib/vsprintf.c
VT1211 HARDWARE MONITOR DRIVER -M: Juerg Haefliger juergh@gmail.com +M: Juerg Haefliger juergh@proton.me L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/hwmon/vt1211.rst @@@ -21887,9 -21866,11 +21894,11 @@@ F: drivers/input/tablet/wacom_serial4.
WANGXUN ETHERNET DRIVER M: Jiawen Wu jiawenwu@trustnetic.com + M: Mengyuan Lou mengyuanlou@net-swift.com + W: https://www.net-swift.com L: netdev@vger.kernel.org S: Maintained - F: Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst + F: Documentation/networking/device_drivers/ethernet/wangxun/* F: drivers/net/ethernet/wangxun/
WATCHDOG DEVICE DRIVERS diff --combined drivers/net/bonding/bond_main.c index bc6d8b0aa6fb,ddd07395827a..da6fc133dcd7 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -865,8 -865,12 +865,8 @@@ static void bond_hw_addr_flush(struct n dev_uc_unsync(slave_dev, bond_dev); dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* del lacpdu mc addr from mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_del(slave_dev, lacpdu_multicast); - } + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_del(slave_dev, lacpdu_mcast_addr); }
/*--------------------------- Active slave change ---------------------------*/ @@@ -886,8 -890,7 +886,8 @@@ static void bond_hw_addr_swap(struct bo if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev); + if (bond->dev->flags & IFF_UP) + bond_hw_addr_flush(bond->dev, old_active->dev); }
if (new_active) { @@@ -898,12 -901,10 +898,12 @@@ if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev); - dev_uc_sync(new_active->dev, bond->dev); - dev_mc_sync(new_active->dev, bond->dev); - netif_addr_unlock_bh(bond->dev); + if (bond->dev->flags & IFF_UP) { + netif_addr_lock_bh(bond->dev); + dev_uc_sync(new_active->dev, bond->dev); + dev_mc_sync(new_active->dev, bond->dev); + netif_addr_unlock_bh(bond->dev); + } } }
@@@ -2165,14 -2166,16 +2165,14 @@@ int bond_enslave(struct net_device *bon } }
- netif_addr_lock_bh(bond_dev); - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - netif_addr_unlock_bh(bond_dev); + if (bond_dev->flags & IFF_UP) { + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_add(slave_dev, lacpdu_multicast); + if (BOND_MODE(bond) == BOND_MODE_8023AD) + dev_mc_add(slave_dev, lacpdu_mcast_addr); } }
@@@ -2444,8 -2447,7 +2444,8 @@@ static int __bond_release_one(struct ne if (old_flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev); + if (old_flags & IFF_UP) + bond_hw_addr_flush(bond_dev, slave_dev); }
slave_disable_netpoll(slave); @@@ -4219,9 -4221,6 +4219,9 @@@ static int bond_open(struct net_device /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; bond_3ad_initiate_agg_selection(bond, 1); + + bond_for_each_slave(bond, slave, iter) + dev_mc_add(slave->dev, lacpdu_mcast_addr); }
if (bond_mode_can_use_xmit_hash(bond)) @@@ -4233,7 -4232,6 +4233,7 @@@ static int bond_close(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave;
bond_work_cancel_all(bond); bond->send_peer_notif = 0; @@@ -4241,19 -4239,6 +4241,19 @@@ bond_alb_deinitialize(bond); bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) { + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (slave) + bond_hw_addr_flush(bond_dev, slave->dev); + rcu_read_unlock(); + } else { + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) + bond_hw_addr_flush(bond_dev, slave->dev); + } + return 0; }
@@@ -5644,7 -5629,7 +5644,7 @@@ static int bond_ethtool_get_link_ksetti static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, struct ethtool_drvinfo *drvinfo) { - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d", BOND_ABI_VERSION); } diff --combined drivers/net/dsa/microchip/ksz9477_i2c.c index 4a719ab8aa89,8fbc122e3384..9bc21194d7d7 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@@ -52,7 -52,7 +52,7 @@@ static int ksz9477_i2c_probe(struct i2c return 0; }
-static int ksz9477_i2c_remove(struct i2c_client *i2c) +static void ksz9477_i2c_remove(struct i2c_client *i2c) { struct ksz_device *dev = i2c_get_clientdata(i2c);
@@@ -60,6 -60,8 +60,6 @@@ ksz_switch_remove(dev);
i2c_set_clientdata(i2c, NULL); - - return 0; }
static void ksz9477_i2c_shutdown(struct i2c_client *i2c) @@@ -89,6 -91,10 +89,10 @@@ static const struct of_device_id ksz947 .compatible = "microchip,ksz9477", .data = &ksz_switch_chips[KSZ9477] }, + { + .compatible = "microchip,ksz9896", + .data = &ksz_switch_chips[KSZ9896] + }, { .compatible = "microchip,ksz9897", .data = &ksz_switch_chips[KSZ9897] diff --combined drivers/net/dsa/microchip/lan937x_main.c index 5579644e8fde,3e83f8ca0f09..cefe8517629a --- a/drivers/net/dsa/microchip/lan937x_main.c +++ b/drivers/net/dsa/microchip/lan937x_main.c @@@ -10,6 -10,8 +10,8 @@@ #include <linux/of_mdio.h> #include <linux/if_bridge.h> #include <linux/if_vlan.h> + #include <linux/irq.h> + #include <linux/irqdomain.h> #include <linux/math.h> #include <net/dsa.h> #include <net/switchdev.h> @@@ -18,6 -20,8 +20,8 @@@ #include "ksz_common.h" #include "lan937x.h"
+ #define LAN937x_PNIRQS 6 + static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) { return regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0); @@@ -128,14 -132,14 +132,14 @@@ static int lan937x_internal_phy_read(st return ksz_read16(dev, REG_VPHY_IND_DATA__2, val); }
- void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data) + int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data) { - lan937x_internal_phy_read(dev, addr, reg, data); + return lan937x_internal_phy_read(dev, addr, reg, data); }
- void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val) + int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val) { - lan937x_internal_phy_write(dev, addr, reg, val); + return lan937x_internal_phy_write(dev, addr, reg, val); }
static int lan937x_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) @@@ -165,6 -169,45 +169,45 @@@ static int lan937x_sw_mdio_write(struc return lan937x_internal_phy_write(dev, addr, regnum, val); }
+ static int lan937x_irq_phy_setup(struct ksz_device *dev) + { + struct dsa_switch *ds = dev->ds; + int phy, err_phy; + int irq; + int ret; + + for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) { + if (BIT(phy) & ds->phys_mii_mask) { + irq = irq_find_mapping(dev->ports[phy].pirq.domain, + PORT_SRC_PHY_INT); + if (irq < 0) { + ret = irq; + goto out; + } + ds->slave_mii_bus->irq[phy] = irq; + } + } + return 0; + out: + err_phy = phy; + + for (phy = 0; phy < err_phy; phy++) + if (BIT(phy) & ds->phys_mii_mask) + irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); + + return ret; + } + + static void lan937x_irq_phy_free(struct ksz_device *dev) + { + struct dsa_switch *ds = dev->ds; + int phy; + + for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) + if (BIT(phy) & ds->phys_mii_mask) + irq_dispose_mapping(ds->slave_mii_bus->irq[phy]); + } + static int lan937x_mdio_register(struct ksz_device *dev) { struct dsa_switch *ds = dev->ds; @@@ -194,10 -237,17 +237,17 @@@
ds->slave_mii_bus = bus;
+ ret = lan937x_irq_phy_setup(dev); + if (ret) { + of_node_put(mdio_np); + return ret; + } + ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np); if (ret) { dev_err(ds->dev, "unable to register MDIO bus %s\n", bus->id); + lan937x_irq_phy_free(dev); }
of_node_put(mdio_np); @@@ -225,6 -275,10 +275,10 @@@ int lan937x_reset_switch(struct ksz_dev if (ret < 0) return ret;
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT); + if (ret < 0) + return ret; + ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF); if (ret < 0) return ret; @@@ -244,6 -298,10 +298,6 @@@ void lan937x_port_setup(struct ksz_devi lan937x_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, true);
- /* disable frame check length field */ - lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0, PORT_CHECK_LENGTH, - false); - /* set back pressure for half duplex */ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); @@@ -311,6 -369,23 +365,23 @@@ int lan937x_change_mtu(struct ksz_devic return 0; }
+ int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs) + { + u32 secs = msecs / 1000; + u32 value; + int ret; + + value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs); + + ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value); + if (ret < 0) + return ret; + + value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs); + + return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value); + } + static void lan937x_set_tune_adj(struct ksz_device *dev, int port, u16 reg, u8 val) { @@@ -379,9 -454,289 +450,289 @@@ void lan937x_setup_rgmii_delay(struct k } }
+ int lan937x_switch_init(struct ksz_device *dev) + { + dev->port_mask = (1 << dev->info->port_cnt) - 1; + + return 0; + } + + static void lan937x_girq_mask(struct irq_data *d) + { + struct ksz_device *dev = irq_data_get_irq_chip_data(d); + unsigned int n = d->hwirq; + + dev->girq.masked |= (1 << n); + } + + static void lan937x_girq_unmask(struct irq_data *d) + { + struct ksz_device *dev = irq_data_get_irq_chip_data(d); + unsigned int n = d->hwirq; + + dev->girq.masked &= ~(1 << n); + } + + static void lan937x_girq_bus_lock(struct irq_data *d) + { + struct ksz_device *dev = irq_data_get_irq_chip_data(d); + + mutex_lock(&dev->lock_irq); + } + + static void lan937x_girq_bus_sync_unlock(struct irq_data *d) + { + struct ksz_device *dev = irq_data_get_irq_chip_data(d); + int ret; + + ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, dev->girq.masked); + if (ret) + dev_err(dev->dev, "failed to change IRQ mask\n"); + + mutex_unlock(&dev->lock_irq); + } + + static const struct irq_chip lan937x_girq_chip = { + .name = "lan937x-global", + .irq_mask = lan937x_girq_mask, + .irq_unmask = lan937x_girq_unmask, + .irq_bus_lock = lan937x_girq_bus_lock, + .irq_bus_sync_unlock = lan937x_girq_bus_sync_unlock, + }; + + static int lan937x_girq_domain_map(struct irq_domain *d, + unsigned int irq, irq_hw_number_t hwirq) + { + struct ksz_device *dev = d->host_data; + + irq_set_chip_data(irq, d->host_data); + irq_set_chip_and_handler(irq, &dev->girq.chip, handle_level_irq); + irq_set_noprobe(irq); + + return 0; + } + + static const struct irq_domain_ops lan937x_girq_domain_ops = { + .map = lan937x_girq_domain_map, + .xlate = irq_domain_xlate_twocell, + }; + + static void lan937x_girq_free(struct ksz_device *dev) + { + int irq, virq; + + free_irq(dev->irq, dev); + + for (irq = 0; irq < dev->girq.nirqs; irq++) { + virq = irq_find_mapping(dev->girq.domain, irq); + irq_dispose_mapping(virq); + } + + irq_domain_remove(dev->girq.domain); + } + + static irqreturn_t lan937x_girq_thread_fn(int irq, void *dev_id) + { + struct ksz_device *dev = dev_id; + unsigned int nhandled = 0; + unsigned int sub_irq; + unsigned int n; + u32 data; + int ret; + + /* Read global interrupt status register */ + ret = ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data); + if (ret) + goto out; + + for (n = 0; n < dev->girq.nirqs; ++n) { + if (data & (1 << n)) { + sub_irq = irq_find_mapping(dev->girq.domain, n); + handle_nested_irq(sub_irq); + ++nhandled; + } + } + out: + return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); + } + + static int lan937x_girq_setup(struct ksz_device *dev) + { + int ret, irq; + + dev->girq.nirqs = dev->info->port_cnt; + dev->girq.domain = irq_domain_add_simple(NULL, dev->girq.nirqs, 0, + &lan937x_girq_domain_ops, dev); + if (!dev->girq.domain) + return -ENOMEM; + + for (irq = 0; irq < dev->girq.nirqs; irq++) + irq_create_mapping(dev->girq.domain, irq); + + dev->girq.chip = lan937x_girq_chip; + dev->girq.masked = ~0; + + ret = request_threaded_irq(dev->irq, NULL, lan937x_girq_thread_fn, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + dev_name(dev->dev), dev); + if (ret) + goto out; + + return 0; + + out: + lan937x_girq_free(dev); + + return ret; + } + + static void lan937x_pirq_mask(struct irq_data *d) + { + struct ksz_port *port = irq_data_get_irq_chip_data(d); + unsigned int n = d->hwirq; + + port->pirq.masked |= (1 << n); + } + + static void lan937x_pirq_unmask(struct irq_data *d) + { + struct ksz_port *port = irq_data_get_irq_chip_data(d); + unsigned int n = d->hwirq; + + port->pirq.masked &= ~(1 << n); + } + + static void lan937x_pirq_bus_lock(struct irq_data *d) + { + struct ksz_port *port = irq_data_get_irq_chip_data(d); + struct ksz_device *dev = port->ksz_dev; + + mutex_lock(&dev->lock_irq); + } + + static void lan937x_pirq_bus_sync_unlock(struct irq_data *d) + { + struct ksz_port *port = irq_data_get_irq_chip_data(d); + struct ksz_device *dev = port->ksz_dev; + + ksz_pwrite8(dev, port->num, REG_PORT_INT_MASK, port->pirq.masked); + mutex_unlock(&dev->lock_irq); + } + + static const struct irq_chip lan937x_pirq_chip = { + .name = "lan937x-port", + .irq_mask = lan937x_pirq_mask, + .irq_unmask = lan937x_pirq_unmask, + .irq_bus_lock = lan937x_pirq_bus_lock, + .irq_bus_sync_unlock = lan937x_pirq_bus_sync_unlock, + }; + + static int lan937x_pirq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hwirq) + { + struct ksz_port *port = d->host_data; + + irq_set_chip_data(irq, d->host_data); + irq_set_chip_and_handler(irq, &port->pirq.chip, handle_level_irq); + irq_set_noprobe(irq); + + return 0; + } + + static const struct irq_domain_ops lan937x_pirq_domain_ops = { + .map = lan937x_pirq_domain_map, + .xlate = irq_domain_xlate_twocell, + }; + + static void lan937x_pirq_free(struct ksz_device *dev, u8 p) + { + struct ksz_port *port = &dev->ports[p]; + int irq, virq; + int irq_num; + + irq_num = irq_find_mapping(dev->girq.domain, p); + if (irq_num < 0) + return; + + free_irq(irq_num, port); + + for (irq = 0; irq < port->pirq.nirqs; irq++) { + virq = irq_find_mapping(port->pirq.domain, irq); + irq_dispose_mapping(virq); + } + + irq_domain_remove(port->pirq.domain); + } + + static irqreturn_t lan937x_pirq_thread_fn(int irq, void *dev_id) + { + struct ksz_port *port = dev_id; + unsigned int nhandled = 0; + struct ksz_device *dev; + unsigned int sub_irq; + unsigned int n; + u8 data; + + dev = port->ksz_dev; + + /* Read port interrupt status register */ + ksz_pread8(dev, port->num, REG_PORT_INT_STATUS, &data); + + for (n = 0; n < port->pirq.nirqs; ++n) { + if (data & (1 << n)) { + sub_irq = irq_find_mapping(port->pirq.domain, n); + handle_nested_irq(sub_irq); + ++nhandled; + } + } + + return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); + } + + static int lan937x_pirq_setup(struct ksz_device *dev, u8 p) + { + struct ksz_port *port = &dev->ports[p]; + int ret, irq; + int irq_num; + + port->pirq.nirqs = LAN937x_PNIRQS; + port->pirq.domain = irq_domain_add_simple(dev->dev->of_node, + port->pirq.nirqs, 0, + &lan937x_pirq_domain_ops, + port); + if (!port->pirq.domain) + return -ENOMEM; + + for (irq = 0; irq < port->pirq.nirqs; irq++) + irq_create_mapping(port->pirq.domain, irq); + + port->pirq.chip = lan937x_pirq_chip; + port->pirq.masked = ~0; + + irq_num = irq_find_mapping(dev->girq.domain, p); + if (irq_num < 0) + return irq_num; + + snprintf(port->pirq.name, sizeof(port->pirq.name), "port_irq-%d", p); + + ret = request_threaded_irq(irq_num, NULL, lan937x_pirq_thread_fn, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING, + port->pirq.name, port); + if (ret) + goto out; + + return 0; + + out: + lan937x_pirq_free(dev, p); + + return ret; + } + int lan937x_setup(struct dsa_switch *ds) { struct ksz_device *dev = ds->priv; + struct dsa_port *dp; int ret;
/* enable Indirect Access from SPI to the VPHY registers */ @@@ -391,10 -746,22 +742,22 @@@ return ret; }
+ if (dev->irq > 0) { + ret = lan937x_girq_setup(dev); + if (ret) + return ret; + + dsa_switch_for_each_user_port(dp, dev->ds) { + ret = lan937x_pirq_setup(dev, dp->index); + if (ret) + goto out_girq; + } + } + ret = lan937x_mdio_register(dev); if (ret < 0) { dev_err(dev->dev, "failed to register the mdio"); - return ret; + goto out_pirq; }
/* The VLAN aware is a global setting. Mixed vlan @@@ -420,13 -787,29 +783,29 @@@ (SW_CLK125_ENB | SW_CLK25_ENB), true);
return 0; + + out_pirq: + if (dev->irq > 0) + dsa_switch_for_each_user_port(dp, dev->ds) + lan937x_pirq_free(dev, dp->index); + out_girq: + if (dev->irq > 0) + lan937x_girq_free(dev); + + return ret; }
- int lan937x_switch_init(struct ksz_device *dev) + void lan937x_teardown(struct dsa_switch *ds) { - dev->port_mask = (1 << dev->info->port_cnt) - 1; + struct ksz_device *dev = ds->priv; + struct dsa_port *dp;
- return 0; + if (dev->irq > 0) { + dsa_switch_for_each_user_port(dp, dev->ds) + lan937x_pirq_free(dev, dp->index); + + lan937x_girq_free(dev); + } }
void lan937x_switch_exit(struct ksz_device *dev) diff --combined drivers/net/ethernet/freescale/fec.h index a5fed00cb971,dd055d734363..b0100fe3c9e4 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@@ -19,6 -19,8 +19,8 @@@ #include <linux/pm_qos.h> #include <linux/ptp_clock_kernel.h> #include <linux/timecounter.h> + #include <dt-bindings/firmware/imx/rsrc.h> + #include <linux/firmware/imx/sci.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ @@@ -561,7 -563,6 +563,7 @@@ struct fec_enet_private struct clk *clk_2x_txclk;
bool ptp_clk_on; + struct mutex ptp_clk_mutex; unsigned int num_tx_queues; unsigned int num_rx_queues;
@@@ -583,6 -584,7 +585,7 @@@ struct device_node *phy_node; bool rgmii_txc_dly; bool rgmii_rxc_dly; + bool rpm_active; int link; int full_duplex; int speed; @@@ -639,6 -641,15 +642,8 @@@ int pps_enable; unsigned int next_counter;
- struct { - struct timespec64 ts_phc; - u64 ns_sys; - u32 at_corr; - u8 at_inc_corr; - } ptp_saved_state; - + struct imx_sc_ipc *ipc_handle; + u64 ethtool_stats[]; };
@@@ -649,5 -660,8 +654,5 @@@ void fec_ptp_disable_hwts(struct net_de int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
-void fec_ptp_save_state(struct fec_enet_private *fep); -int fec_ptp_restore_state(struct fec_enet_private *fep); - /****************************************************************************/ #endif /* FEC_H */ diff --combined drivers/net/ethernet/freescale/fec_main.c index 92c55e1a5507,d8b487f5c1ca..59921218a8a4 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -156,6 -156,13 +156,13 @@@ static const struct fec_devinfo fec_imx FEC_QUIRK_DELAYED_CLKS_SUPPORT, };
+ static const struct fec_devinfo fec_s32v234_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, + }; + static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ @@@ -188,6 -195,9 +195,9 @@@ }, { .name = "imx8qm-fec", .driver_data = (kernel_ulong_t)&fec_imx8qm_info, + }, { + .name = "s32v234-fec", + .driver_data = (kernel_ulong_t)&fec_s32v234_info, }, { /* sentinel */ } @@@ -204,6 -214,7 +214,7 @@@ enum imx_fec_type IMX6UL_FEC, IMX8MQ_FEC, IMX8QM_FEC, + S32V234_FEC, };
static const struct of_device_id fec_dt_ids[] = { @@@ -216,6 -227,7 +227,7 @@@ { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, + { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@@ -286,8 -298,11 +298,8 @@@ MODULE_PARM_DESC(macaddr, "FEC Etherne #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) /* FEC ECR bits definition */ -#define FEC_ECR_RESET BIT(0) -#define FEC_ECR_ETHEREN BIT(1) -#define FEC_ECR_MAGICEN BIT(2) -#define FEC_ECR_SLEEP BIT(3) -#define FEC_ECR_EN1588 BIT(4) +#define FEC_ECR_MAGICEN (1 << 2) +#define FEC_ECR_SLEEP (1 << 3)
#define FEC_MII_TIMEOUT 30000 /* us */
@@@ -983,6 -998,9 +995,6 @@@ fec_restart(struct net_device *ndev u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS }; - - fec_ptp_save_state(fep);
/* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC @@@ -1136,7 -1154,7 +1148,7 @@@ }
if (fep->bufdesc_ex) - ecntl |= FEC_ECR_EN1588; + ecntl |= (1 << 4);
if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && fep->rgmii_txc_dly) @@@ -1157,6 -1175,14 +1169,6 @@@ if (fep->bufdesc_ex) fec_ptp_start_cyclecounter(ndev);
- /* Restart PPS if needed */ - if (fep->pps_enable) { - /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */ - fep->pps_enable = 0; - fec_ptp_restore_state(fep); - fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1); - } - /* Enable interrupts we wish to service */ if (fep->link) writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); @@@ -1168,6 -1194,34 +1180,34 @@@
}
+ static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) + { + if (!(of_machine_is_compatible("fsl,imx8qm") || + of_machine_is_compatible("fsl,imx8qxp") || + of_machine_is_compatible("fsl,imx8dxl"))) + return 0; + + return imx_scu_get_handle(&fep->ipc_handle); + } + + static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) + { + struct device_node *np = fep->pdev->dev.of_node; + u32 rsrc_id, val; + int idx; + + if (!np || !fep->ipc_handle) + return; + + idx = of_alias_get_id(np, "ethernet"); + if (idx < 0) + idx = 0; + rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; + + val = enabled ? 1 : 0; + imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); + } + static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) { struct fec_platform_data *pdata = fep->pdev->dev.platform_data; @@@ -1183,6 -1237,8 +1223,8 @@@ BIT(stop_gpr->bit), 0); } else if (pdata && pdata->sleep_mode_enable) { pdata->sleep_mode_enable(enabled); + } else { + fec_enet_ipg_stop_set(fep, enabled); } }
@@@ -1207,6 -1263,8 +1249,6 @@@ fec_stop(struct net_device *ndev struct fec_enet_private *fep = netdev_priv(ndev); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); u32 val; - struct ptp_clock_request ptp_rq = { .type = PTP_CLK_REQ_PPS }; - u32 ecntl = 0;
/* We cannot expect a graceful transmit stop without link !!! */ if (fep->link) { @@@ -1216,6 -1274,8 +1258,6 @@@ netdev_err(ndev, "Graceful transmit stop did not complete!\n"); }
- fec_ptp_save_state(fep); - /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. @@@ -1235,12 -1295,28 +1277,12 @@@ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
- if (fep->bufdesc_ex) - ecntl |= FEC_ECR_EN1588; - /* We have to keep ENET enabled to have MII interrupt stay working */ if (fep->quirks & FEC_QUIRK_ENET_MAC && !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - ecntl |= FEC_ECR_ETHEREN; + writel(2, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } - - writel(ecntl, fep->hwp + FEC_ECNTRL); - - if (fep->bufdesc_ex) - fec_ptp_start_cyclecounter(ndev); - - /* Restart PPS if needed */ - if (fep->pps_enable) { - /* Clear flag so fec_ptp_enable_pps() doesn't return immediately */ - fep->pps_enable = 0; - fec_ptp_restore_state(fep); - fep->ptp_caps.enable(&fep->ptp_caps, &ptp_rq, 1); - } }
@@@ -1995,6 -2071,7 +2037,6 @@@ static void fec_enet_phy_reset_after_cl static int fec_enet_clk_enable(struct net_device *ndev, bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); - unsigned long flags; int ret;
if (enable) { @@@ -2003,15 -2080,15 +2045,15 @@@ return ret;
if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); ret = clk_prepare_enable(fep->clk_ptp); if (ret) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); goto failed_clk_ptp; } else { fep->ptp_clk_on = true; } - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); }
ret = clk_prepare_enable(fep->clk_ref); @@@ -2026,10 -2103,10 +2068,10 @@@ } else { clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); } clk_disable_unprepare(fep->clk_ref); clk_disable_unprepare(fep->clk_2x_txclk); @@@ -2042,10 -2119,10 +2084,10 @@@ failed_clk_2x_txclk clk_disable_unprepare(fep->clk_ref); failed_clk_ref: if (fep->clk_ptp) { - spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); clk_disable_unprepare(fep->clk_ptp); fep->ptp_clk_on = false; - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); } failed_clk_ptp: clk_disable_unprepare(fep->clk_enet_out); @@@ -2105,13 -2182,13 +2147,13 @@@ static int fec_enet_mii_probe(struct ne continue; if (dev_id--) continue; - strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); break; }
if (phy_id >= PHY_MAX_ADDR) { netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); - strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); + strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); phy_id = 0; }
@@@ -2295,9 -2372,9 +2337,9 @@@ static void fec_enet_get_drvinfo(struc { struct fec_enet_private *fep = netdev_priv(ndev);
- strlcpy(info->driver, fep->pdev->dev.driver->name, + strscpy(info->driver, fep->pdev->dev.driver->name, sizeof(info->driver)); - strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); + strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); }
static int fec_enet_get_regs_len(struct net_device *ndev) @@@ -3824,6 -3901,10 +3866,10 @@@ fec_probe(struct platform_device *pdev !of_property_read_bool(np, "fsl,err006687-workaround-present")) fep->quirks |= FEC_QUIRK_ERR006687;
+ ret = fec_enet_ipc_handle_init(fep); + if (ret) + goto failed_ipc_init; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
@@@ -3880,7 -3961,7 +3926,7 @@@ }
fep->ptp_clk_on = false; - spin_lock_init(&fep->tmreg_lock); + mutex_init(&fep->ptp_clk_mutex);
/* clk_ref is optional, depends on board */ fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); @@@ -4021,6 -4102,7 +4067,7 @@@ failed_rgmii_delay of_phy_deregister_fixed_link(np); of_node_put(phy_node); failed_stop_mode: + failed_ipc_init: failed_phy: dev_id--; failed_ioremap: @@@ -4065,6 -4147,7 +4112,7 @@@ static int __maybe_unused fec_suspend(s { struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + int ret;
rtnl_lock(); if (netif_running(ndev)) { @@@ -4089,6 -4172,15 +4137,15 @@@ } /* It's safe to disable clocks since interrupts are masked */ fec_enet_clk_enable(ndev, false); + + fep->rpm_active = !pm_runtime_status_suspended(dev); + if (fep->rpm_active) { + ret = pm_runtime_force_suspend(dev); + if (ret < 0) { + rtnl_unlock(); + return ret; + } + } } rtnl_unlock();
@@@ -4119,6 -4211,9 +4176,9 @@@ static int __maybe_unused fec_resume(st
rtnl_lock(); if (netif_running(ndev)) { + if (fep->rpm_active) + pm_runtime_force_resume(dev); + ret = fec_enet_clk_enable(ndev, true); if (ret) { rtnl_unlock(); diff --combined drivers/net/ethernet/freescale/fec_ptp.c index 3dc3c0b626c2,7be97ab84e50..cffd9ad499dd --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@@ -365,21 -365,19 +365,21 @@@ static int fec_ptp_adjtime(struct ptp_c */ static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct fec_enet_private *fep = + struct fec_enet_private *adapter = container_of(ptp, struct fec_enet_private, ptp_caps); u64 ns; unsigned long flags;
- spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&adapter->ptp_clk_mutex); /* Check the ptp clock */ - if (!fep->ptp_clk_on) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + if (!adapter->ptp_clk_on) { + mutex_unlock(&adapter->ptp_clk_mutex); return -EINVAL; } - ns = timecounter_read(&fep->tc); - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + spin_lock_irqsave(&adapter->tmreg_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + mutex_unlock(&adapter->ptp_clk_mutex);
*ts = ns_to_timespec64(ns);
@@@ -404,10 -402,10 +404,10 @@@ static int fec_ptp_settime(struct ptp_c unsigned long flags; u32 counter;
- spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); /* Check the ptp clock */ if (!fep->ptp_clk_on) { - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); return -EINVAL; }
@@@ -417,11 -415,9 +417,11 @@@ */ counter = ns & fep->cc.mask;
+ spin_lock_irqsave(&fep->tmreg_lock, flags); writel(counter, fep->hwp + FEC_ATIME); timecounter_init(&fep->tc, &fep->cc, ns); spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex); return 0; }
@@@ -518,13 -514,11 +518,13 @@@ static void fec_time_keep(struct work_s struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep); unsigned long flags;
- spin_lock_irqsave(&fep->tmreg_lock, flags); + mutex_lock(&fep->ptp_clk_mutex); if (fep->ptp_clk_on) { + spin_lock_irqsave(&fep->tmreg_lock, flags); timecounter_read(&fep->tc); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); } - spin_unlock_irqrestore(&fep->tmreg_lock, flags); + mutex_unlock(&fep->ptp_clk_mutex);
schedule_delayed_work(&fep->time_keep, HZ); } @@@ -578,7 -572,7 +578,7 @@@ void fec_ptp_init(struct platform_devic int ret;
fep->ptp_caps.owner = THIS_MODULE; - strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); + strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
fep->ptp_caps.max_adj = 250000000; fep->ptp_caps.n_alarm = 0; @@@ -599,8 -593,6 +599,8 @@@ } fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+ spin_lock_init(&fep->tmreg_lock); + fec_ptp_start_cyclecounter(ndev);
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); @@@ -633,7 -625,36 +633,7 @@@ void fec_ptp_stop(struct platform_devic struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev);
- if (fep->pps_enable) - fec_ptp_enable_pps(fep, 0); - cancel_delayed_work_sync(&fep->time_keep); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); } - -void fec_ptp_save_state(struct fec_enet_private *fep) -{ - u32 atime_inc_corr; - - fec_ptp_gettime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc); - fep->ptp_saved_state.ns_sys = ktime_get_ns(); - - fep->ptp_saved_state.at_corr = readl(fep->hwp + FEC_ATIME_CORR); - atime_inc_corr = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_CORR_MASK; - fep->ptp_saved_state.at_inc_corr = (u8)(atime_inc_corr >> FEC_T_INC_CORR_OFFSET); -} - -int fec_ptp_restore_state(struct fec_enet_private *fep) -{ - u32 atime_inc = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK; - u64 ns_sys; - - writel(fep->ptp_saved_state.at_corr, fep->hwp + FEC_ATIME_CORR); - atime_inc |= ((u32)fep->ptp_saved_state.at_inc_corr) << FEC_T_INC_CORR_OFFSET; - writel(atime_inc, fep->hwp + FEC_ATIME_INC); - - ns_sys = ktime_get_ns() - fep->ptp_saved_state.ns_sys; - timespec64_add_ns(&fep->ptp_saved_state.ts_phc, ns_sys); - return fec_ptp_settime(&fep->ptp_caps, &fep->ptp_saved_state.ts_phc); -} diff --combined drivers/net/ethernet/intel/iavf/iavf_main.c index 0c89f16bf1e2,1671e52b6ba2..79fef8c59d65 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@@ -1077,6 -1077,7 +1077,6 @@@ static int iavf_set_mac(struct net_devi { struct iavf_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; - bool handle_mac = iavf_is_mac_set_handled(netdev, addr->sa_data); int ret;
if (!is_valid_ether_addr(addr->sa_data)) @@@ -1093,9 -1094,10 +1093,9 @@@ return 0; }
- if (handle_mac) - goto done; - - ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, false, msecs_to_jiffies(2500)); + ret = wait_event_interruptible_timeout(adapter->vc_waitqueue, + iavf_is_mac_set_handled(netdev, addr->sa_data), + msecs_to_jiffies(2500));
/* If ret < 0 then it means wait was interrupted. * If ret == 0 then it means we got a timeout. @@@ -1109,6 -1111,7 +1109,6 @@@ if (!ret) return -EAGAIN;
-done: if (!ether_addr_equal(netdev->dev_addr, addr->sa_data)) return -EACCES;
@@@ -1267,66 -1270,138 +1267,138 @@@ static void iavf_up_complete(struct iav }
/** - * iavf_down - Shutdown the connection processing + * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF + * yet and mark other to be removed. * @adapter: board private structure - * - * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. **/ - void iavf_down(struct iavf_adapter *adapter) + static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter) { - struct net_device *netdev = adapter->netdev; - struct iavf_vlan_filter *vlf; - struct iavf_cloud_filter *cf; - struct iavf_fdir_fltr *fdir; - struct iavf_mac_filter *f; - struct iavf_adv_rss *rss; - - if (adapter->state <= __IAVF_DOWN_PENDING) - return; - - netif_carrier_off(netdev); - netif_tx_disable(netdev); - adapter->link_up = false; - iavf_napi_disable_all(adapter); - iavf_irq_disable(adapter); + struct iavf_vlan_filter *vlf, *vlftmp; + struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock); - /* clear the sync flag on all filters */ __dev_uc_unsync(adapter->netdev, NULL); __dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */ - list_for_each_entry(f, &adapter->mac_filter_list, list) { - f->remove = true; + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, + list) { + if (f->add) { + list_del(&f->list); + kfree(f); + } else { + f->remove = true; + } }
/* remove all VLAN filters */ - list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { - vlf->remove = true; + list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list, + list) { + if (vlf->add) { + list_del(&vlf->list); + kfree(vlf); + } else { + vlf->remove = true; + } } - spin_unlock_bh(&adapter->mac_vlan_list_lock); + } + + /** + * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and + * mark other to be removed. + * @adapter: board private structure + **/ + static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) + { + struct iavf_cloud_filter *cf, *cftmp;
/* remove all cloud filters */ spin_lock_bh(&adapter->cloud_filter_list_lock); - list_for_each_entry(cf, &adapter->cloud_filter_list, list) { - cf->del = true; + list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, + list) { + if (cf->add) { + list_del(&cf->list); + kfree(cf); + adapter->num_cloud_filters--; + } else { + cf->del = true; + } } spin_unlock_bh(&adapter->cloud_filter_list_lock); + } + + /** + * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark + * other to be removed. + * @adapter: board private structure + **/ + static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) + { + struct iavf_fdir_fltr *fdir, *fdirtmp;
/* remove all Flow Director filters */ spin_lock_bh(&adapter->fdir_fltr_lock); - list_for_each_entry(fdir, &adapter->fdir_list_head, list) { - fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else { + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + } } spin_unlock_bh(&adapter->fdir_fltr_lock); + } + + /** + * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark + * other to be removed. + * @adapter: board private structure + **/ + static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter) + { + struct iavf_adv_rss *rss, *rsstmp;
/* remove all advance RSS configuration */ spin_lock_bh(&adapter->adv_rss_lock); - list_for_each_entry(rss, &adapter->adv_rss_list_head, list) - rss->state = IAVF_ADV_RSS_DEL_REQUEST; + list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head, + list) { + if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) { + list_del(&rss->list); + kfree(rss); + } else { + rss->state = IAVF_ADV_RSS_DEL_REQUEST; + } + } spin_unlock_bh(&adapter->adv_rss_lock); + } + + /** + * iavf_down - Shutdown the connection processing + * @adapter: board private structure + * + * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock. + **/ + void iavf_down(struct iavf_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; + + if (adapter->state <= __IAVF_DOWN_PENDING) + return; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + adapter->link_up = false; + iavf_napi_disable_all(adapter); + iavf_irq_disable(adapter); + + iavf_clear_mac_vlan_filters(adapter); + iavf_clear_cloud_filters(adapter); + iavf_clear_fdir_filters(adapter); + iavf_clear_adv_rss_conf(adapter);
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) { /* cancel any current operation */ @@@ -1335,11 -1410,16 +1407,16 @@@ * here for this to complete. The watchdog is still running * and it will take care of this. */ - adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; - adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; + if (!list_empty(&adapter->mac_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER; + if (!list_empty(&adapter->vlan_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER; + if (!list_empty(&adapter->cloud_filter_list)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER; + if (!list_empty(&adapter->fdir_list_head)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + if (!list_empty(&adapter->adv_rss_list_head)) + adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG; adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; }
@@@ -4178,6 -4258,7 +4255,7 @@@ err_unlock static int iavf_close(struct net_device *netdev) { struct iavf_adapter *adapter = netdev_priv(netdev); + u64 aq_to_restore; int status;
mutex_lock(&adapter->crit_lock); @@@ -4190,6 -4271,29 +4268,29 @@@ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE; + /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before + * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl + * deadlock with adminq_task() until iavf_close timeouts. We must send + * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make + * disable queues possible for vf. Give only necessary flags to + * iavf_down and save other to set them right before iavf_close() + * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and + * iavf will be in DOWN state. + */ + aq_to_restore = adapter->aq_required; + adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG; + + /* Remove flags which we do not want to send after close or we want to + * send before disable queues. + */ + aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG | + IAVF_FLAG_AQ_ENABLE_QUEUES | + IAVF_FLAG_AQ_CONFIGURE_QUEUES | + IAVF_FLAG_AQ_ADD_VLAN_FILTER | + IAVF_FLAG_AQ_ADD_MAC_FILTER | + IAVF_FLAG_AQ_ADD_CLOUD_FILTER | + IAVF_FLAG_AQ_ADD_FDIR_FILTER | + IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
iavf_down(adapter); iavf_change_state(adapter, __IAVF_DOWN_PENDING); @@@ -4213,6 -4317,10 +4314,10 @@@ msecs_to_jiffies(500)); if (!status) netdev_warn(netdev, "Device resources not yet released\n"); + + mutex_lock(&adapter->crit_lock); + adapter->aq_required |= aq_to_restore; + mutex_unlock(&adapter->crit_lock); return 0; }
diff --combined drivers/net/ethernet/intel/ice/ice_lib.c index 58d483e2f539,d126f4cb3ba8..8a80da8e910e --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@@ -914,7 -914,7 +914,7 @@@ static void ice_set_dflt_vsi_ctx(struc */ static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; + u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0; u16 num_txq_per_tc, num_rxq_per_tc; u16 qcount_tx = vsi->alloc_txq; u16 qcount_rx = vsi->alloc_rxq; @@@ -981,25 -981,23 +981,25 @@@ * at least 1) */ if (offset) - vsi->num_rxq = offset; + rx_count = offset; else - vsi->num_rxq = num_rxq_per_tc; + rx_count = num_rxq_per_tc;
- if (vsi->num_rxq > vsi->alloc_rxq) { + if (rx_count > vsi->alloc_rxq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", - vsi->num_rxq, vsi->alloc_rxq); + rx_count, vsi->alloc_rxq); return -EINVAL; }
- vsi->num_txq = tx_count; - if (vsi->num_txq > vsi->alloc_txq) { + if (tx_count > vsi->alloc_txq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", - vsi->num_txq, vsi->alloc_txq); + tx_count, vsi->alloc_txq); return -EINVAL; }
+ vsi->num_txq = tx_count; + vsi->num_rxq = rx_count; + if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); /* since there is a chance that num_rxq could have been changed @@@ -1524,6 -1522,7 +1524,7 @@@ static int ice_vsi_alloc_rings(struct i ring->netdev = vsi->netdev; ring->dev = dev; ring->count = vsi->num_rx_desc; + ring->cached_phctime = pf->ptp.cached_phc_time; WRITE_ONCE(vsi->rx_rings[i], ring); }
@@@ -1563,6 -1562,22 +1564,22 @@@ void ice_vsi_manage_rss_lut(struct ice_ kfree(lut); }
+ /** + * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI + * @vsi: VSI to be configured + * @disable: set to true to have FCS / CRC in the frame data + */ + void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable) + { + int i; + + ice_for_each_rxq(vsi, i) + if (disable) + vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; + else + vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; + } + /** * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI * @vsi: VSI to be configured @@@ -3278,6 -3293,12 +3295,12 @@@ int ice_vsi_rebuild(struct ice_vsi *vsi */ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) ice_vsi_cfg_rss_lut_key(vsi); + + /* disable or enable CRC stripping */ + if (vsi->netdev) + ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features & + NETIF_F_RXFCS)); + break; case ICE_VSI_VF: ret = ice_vsi_alloc_q_vectors(vsi); @@@ -3492,7 -3513,6 +3515,7 @@@ ice_vsi_setup_q_map_mqprio(struct ice_v u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap; u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0]; int tc0_qcount = vsi->mqprio_qopt.qopt.count[0]; + u16 new_txq, new_rxq; u8 netdev_tc = 0; int i;
@@@ -3533,24 -3553,21 +3556,24 @@@ } }
- /* Set actual Tx/Rx queue pairs */ - vsi->num_txq = offset + qcount_tx; - if (vsi->num_txq > vsi->alloc_txq) { + new_txq = offset + qcount_tx; + if (new_txq > vsi->alloc_txq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", - vsi->num_txq, vsi->alloc_txq); + new_txq, vsi->alloc_txq); return -EINVAL; }
- vsi->num_rxq = offset + qcount_rx; - if (vsi->num_rxq > vsi->alloc_rxq) { + new_rxq = offset + qcount_rx; + if (new_rxq > vsi->alloc_rxq) { dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", - vsi->num_rxq, vsi->alloc_rxq); + new_rxq, vsi->alloc_rxq); return -EINVAL; }
+ /* Set actual Tx/Rx queue pairs */ + vsi->num_txq = new_txq; + vsi->num_rxq = new_rxq; + /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); @@@ -3582,7 -3599,6 +3605,7 @@@ int ice_vsi_cfg_tc(struct ice_vsi *vsi { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; + struct ice_tc_cfg old_tc_cfg; struct ice_vsi_ctx *ctx; struct device *dev; int i, ret = 0; @@@ -3607,7 -3623,6 +3630,7 @@@ max_txqs[i] = vsi->num_txq; }
+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg)); vsi->tc_cfg.ena_tc = ena_tc; vsi->tc_cfg.numtc = num_tc;
@@@ -3624,10 -3639,8 +3647,10 @@@ else ret = ice_vsi_setup_q_map(vsi, ctx);
- if (ret) + if (ret) { + memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg)); goto out; + }
/* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); diff --combined drivers/net/ethernet/intel/ice/ice_main.c index 04836bbaf7d5,aa26672b7205..b0890e6b3fad --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@@ -2399,6 -2399,8 +2399,6 @@@ int ice_schedule_reset(struct ice_pf *p return -EBUSY; }
- ice_unplug_aux_dev(pf); - switch (reset) { case ICE_RESET_PFR: set_bit(ICE_PFR_REQ, pf->state); @@@ -3093,7 -3095,8 +3093,8 @@@ static irqreturn_t ice_misc_intr(int __
if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - ice_ptp_process_ts(pf); + if (!hw->reset_ongoing) + ret = IRQ_WAKE_THREAD; }
if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@@ -3128,7 -3131,8 +3129,8 @@@ ice_service_task_schedule(pf); } } - ret = IRQ_HANDLED; + if (!ret) + ret = IRQ_HANDLED;
ice_service_task_schedule(pf); ice_irq_dynamic_ena(hw, NULL, NULL); @@@ -3136,6 -3140,24 +3138,24 @@@ return ret; }
+ /** + * ice_misc_intr_thread_fn - misc interrupt thread function + * @irq: interrupt number + * @data: pointer to a q_vector + */ + static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data) + { + irqreturn_t ret = IRQ_HANDLED; + struct ice_pf *pf = data; + bool irq_handled; + + irq_handled = ice_ptp_process_ts(pf); + if (!irq_handled) + ret = IRQ_WAKE_THREAD; + + return ret; + } + /** * ice_dis_ctrlq_interrupts - disable control queue interrupts * @hw: pointer to HW structure @@@ -3248,10 -3270,12 +3268,12 @@@ static int ice_req_irq_msix_misc(struc pf->num_avail_sw_msix -= 1; pf->oicr_idx = (u16)oicr_idx;
- err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector, - ice_misc_intr, 0, pf->int_name, pf); + err = devm_request_threaded_irq(dev, + pf->msix_entries[pf->oicr_idx].vector, + ice_misc_intr, ice_misc_intr_thread_fn, + 0, pf->int_name, pf); if (err) { - dev_err(dev, "devm_request_irq for %s failed: %d\n", + dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n", pf->int_name, err); ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID); pf->num_avail_sw_msix += 1; @@@ -3391,6 -3415,11 +3413,11 @@@ static void ice_set_netdev_features(str if (is_dvm_ena) netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + + /* Leave CRC / FCS stripping enabled by default, but allow the value to + * be changed at runtime + */ + netdev->hw_features |= NETIF_F_RXFCS; }
/** @@@ -3922,88 -3951,135 +3949,135 @@@ static int ice_init_pf(struct ice_pf *p return 0; }
+ /** + * ice_reduce_msix_usage - Reduce usage of MSI-X vectors + * @pf: board private structure + * @v_remain: number of remaining MSI-X vectors to be distributed + * + * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. + * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of + * remaining vectors. + */ + static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) + { + int v_rdma; + + if (!ice_is_rdma_ena(pf)) { + pf->num_lan_msix = v_remain; + return; + } + + /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ + v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; + + if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { + dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); + clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); + + pf->num_rdma_msix = 0; + pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; + } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || + (v_remain - v_rdma < v_rdma)) { + /* Support minimum RDMA and give remaining vectors to LAN MSIX */ + pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; + pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; + } else { + /* Split remaining MSIX with RDMA after accounting for AEQ MSIX + */ + pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + + ICE_RDMA_NUM_AEQ_MSIX; + pf->num_lan_msix = v_remain - pf->num_rdma_msix; + } + } + /** * ice_ena_msix_range - Request a range of MSIX vectors from the OS * @pf: board private structure * - * compute the number of MSIX vectors required (v_budget) and request from - * the OS. Return the number of vectors reserved or negative on failure + * Compute the number of MSIX vectors wanted and request from the OS. Adjust + * device usage if there are not enough vectors. Return the number of vectors + * reserved or negative on failure. */ static int ice_ena_msix_range(struct ice_pf *pf) { - int num_cpus, v_left, v_actual, v_other, v_budget = 0; + int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; struct device *dev = ice_pf_to_dev(pf); - int needed, err, i; + int err, i;
- v_left = pf->hw.func_caps.common_cap.num_msix_vectors; + hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; num_cpus = num_online_cpus();
- /* reserve for LAN miscellaneous handler */ - needed = ICE_MIN_LAN_OICR_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - - /* reserve for flow director */ - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { - needed = ICE_FDIR_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - } - - /* reserve for switchdev */ - needed = ICE_ESWITCH_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - v_budget += needed; - v_left -= needed; - - /* total used for non-traffic vectors */ - v_other = v_budget; - - /* reserve vectors for LAN traffic */ - needed = num_cpus; - if (v_left < needed) - goto no_hw_vecs_left_err; - pf->num_lan_msix = needed; - v_budget += needed; - v_left -= needed; - - /* reserve vectors for RDMA auxiliary driver */ + /* LAN miscellaneous handler */ + v_other = ICE_MIN_LAN_OICR_MSIX; + + /* Flow Director */ + if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) + v_other += ICE_FDIR_MSIX; + + /* switchdev */ + v_other += ICE_ESWITCH_MSIX; + + v_wanted = v_other; + + /* LAN traffic */ + pf->num_lan_msix = num_cpus; + v_wanted += pf->num_lan_msix; + + /* RDMA auxiliary driver */ if (ice_is_rdma_ena(pf)) { - needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; - if (v_left < needed) - goto no_hw_vecs_left_err; - pf->num_rdma_msix = needed; - v_budget += needed; - v_left -= needed; + pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; + v_wanted += pf->num_rdma_msix; + } + + if (v_wanted > hw_num_msix) { + int v_remain; + + dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", + v_wanted, hw_num_msix); + + if (hw_num_msix < ICE_MIN_MSIX) { + err = -ERANGE; + goto exit_err; + } + + v_remain = hw_num_msix - v_other; + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { + v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; + v_remain = ICE_MIN_LAN_TXRX_MSIX; + } + + ice_reduce_msix_usage(pf, v_remain); + v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; + + dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", + pf->num_lan_msix); + if (ice_is_rdma_ena(pf)) + dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", + pf->num_rdma_msix); }
- pf->msix_entries = devm_kcalloc(dev, v_budget, + pf->msix_entries = devm_kcalloc(dev, v_wanted, sizeof(*pf->msix_entries), GFP_KERNEL); if (!pf->msix_entries) { err = -ENOMEM; goto exit_err; }
- for (i = 0; i < v_budget; i++) + for (i = 0; i < v_wanted; i++) pf->msix_entries[i].entry = i;
/* actually reserve the vectors */ v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries, - ICE_MIN_MSIX, v_budget); + ICE_MIN_MSIX, v_wanted); if (v_actual < 0) { dev_err(dev, "unable to reserve MSI-X vectors\n"); err = v_actual; goto msix_err; }
- if (v_actual < v_budget) { + if (v_actual < v_wanted) { dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", - v_budget, v_actual); + v_wanted, v_actual);
if (v_actual < ICE_MIN_MSIX) { /* error if we can't get minimum vectors */ @@@ -4012,38 -4088,11 +4086,11 @@@ goto msix_err; } else { int v_remain = v_actual - v_other; - int v_rdma = 0, v_min_rdma = 0;
- if (ice_is_rdma_ena(pf)) { - /* Need at least 1 interrupt in addition to - * AEQ MSIX - */ - v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; - v_min_rdma = ICE_MIN_RDMA_MSIX; - } + if (v_remain < ICE_MIN_LAN_TXRX_MSIX) + v_remain = ICE_MIN_LAN_TXRX_MSIX;
- if (v_actual == ICE_MIN_MSIX || - v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) { - dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n"); - clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - - pf->num_rdma_msix = 0; - pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || - (v_remain - v_rdma < v_rdma)) { - /* Support minimum RDMA and give remaining - * vectors to LAN MSIX - */ - pf->num_rdma_msix = v_min_rdma; - pf->num_lan_msix = v_remain - v_min_rdma; - } else { - /* Split remaining MSIX with RDMA after - * accounting for AEQ MSIX - */ - pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + - ICE_RDMA_NUM_AEQ_MSIX; - pf->num_lan_msix = v_remain - pf->num_rdma_msix; - } + ice_reduce_msix_usage(pf, v_remain);
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", pf->num_lan_msix); @@@ -4058,12 -4107,7 +4105,7 @@@
msix_err: devm_kfree(dev, pf->msix_entries); - goto exit_err;
- no_hw_vecs_left_err: - dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n", - needed, v_left); - err = -ERANGE; exit_err: pf->num_rdma_msix = 0; pf->num_lan_msix = 0; @@@ -4682,8 -4726,6 +4724,6 @@@ ice_probe(struct pci_dev *pdev, const s ice_set_safe_mode_caps(hw); }
- hw->ucast_shared = true; - err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); @@@ -5742,6 -5784,9 +5782,9 @@@ ice_fdb_del(struct ndmsg *ndm, __always NETIF_F_HW_VLAN_STAG_RX | \ NETIF_F_HW_VLAN_STAG_TX)
+ #define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_STAG_RX) + #define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_STAG_FILTER)
@@@ -5828,6 -5873,14 +5871,14 @@@ ice_fix_features(struct net_device *net NETIF_F_HW_VLAN_STAG_TX); }
+ if (!(netdev->features & NETIF_F_RXFCS) && + (features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES) && + !ice_vsi_has_non_zero_vlans(np->vsi)) { + netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n"); + features &= ~NETIF_VLAN_STRIPPING_FEATURES; + } + return features; }
@@@ -5921,6 -5974,13 +5972,13 @@@ ice_set_vlan_features(struct net_devic current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES; if (current_vlan_features ^ requested_vlan_features) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To enable VLAN stripping, you must first enable FCS/CRC stripping\n"); + return -EIO; + } + err = ice_set_vlan_offload_features(vsi, features); if (err) return err; @@@ -6002,6 -6062,23 +6060,23 @@@ ice_set_features(struct net_device *net if (ret) return ret;
+ /* Turn on receive of FCS aka CRC, and after setting this + * flag the packet data will have the 4 byte CRC appended + */ + if (changed & NETIF_F_RXFCS) { + if ((features & NETIF_F_RXFCS) && + (features & NETIF_VLAN_STRIPPING_FEATURES)) { + dev_err(ice_pf_to_dev(vsi->back), + "To disable FCS/CRC stripping, you must first disable VLAN stripping\n"); + return -EIO; + } + + ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS)); + ret = ice_down_up(vsi); + if (ret) + return ret; + } + if (changed & NETIF_F_NTUPLE) { bool ena = !!(features & NETIF_F_NTUPLE);
@@@ -6705,6 -6782,31 +6780,31 @@@ int ice_down(struct ice_vsi *vsi return 0; }
+ /** + * ice_down_up - shutdown the VSI connection and bring it up + * @vsi: the VSI to be reconnected + */ + int ice_down_up(struct ice_vsi *vsi) + { + int ret; + + /* if DOWN already set, nothing to do */ + if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) + return 0; + + ret = ice_down(vsi); + if (ret) + return ret; + + ret = ice_up(vsi); + if (ret) { + netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n"); + return ret; + } + + return 0; + } + /** * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources * @vsi: VSI having resources allocated diff --combined drivers/net/ethernet/mediatek/mtk_eth_soc.c index b344632beadd,c19c67a480ae..df44711bd28a --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@@ -1458,7 -1458,7 +1458,7 @@@ static void mtk_update_rx_cpu_idx(struc
static bool mtk_page_pool_enabled(struct mtk_eth *eth) { - return !eth->hwlro; + return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2); }
static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, @@@ -1573,8 -1573,8 +1573,8 @@@ static int mtk_xdp_submit_frame(struct .last = !xdp_frame_has_frags(xdpf), }; int err, index = 0, n_desc = 1, nr_frags; - struct mtk_tx_dma *htxd, *txd, *txd_pdma; struct mtk_tx_buf *htx_buf, *tx_buf; + struct mtk_tx_dma *htxd, *txd; void *data = xdpf->data;
if (unlikely(test_bit(MTK_RESETTING, ð->state))) @@@ -1608,7 -1608,6 +1608,6 @@@
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) { txd = mtk_qdma_phys_to_virt(ring, txd->txd2); - txd_pdma = qdma_to_pdma(ring, txd); if (txd == ring->last_free) goto unmap;
@@@ -1629,7 -1628,8 +1628,8 @@@ htx_buf->data = xdpf;
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { - txd_pdma = qdma_to_pdma(ring, txd); + struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd); + if (index & 1) txd_pdma->txd2 |= TX_DMA_LS0; else @@@ -1660,13 -1660,15 +1660,15 @@@
unmap: while (htxd != txd) { - txd_pdma = qdma_to_pdma(ring, htxd); tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; - if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd); + txd_pdma->txd2 = TX_DMA_DESP2_DEF; + }
htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2); } @@@ -3556,8 -3558,8 +3558,8 @@@ static void mtk_get_drvinfo(struct net_ { struct mtk_mac *mac = netdev_priv(dev);
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); - strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); + strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); + strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); }
diff --combined drivers/net/ethernet/mellanox/mlx5/core/main.c index 89b2d9cea33f,1986f1c715b5..b45cef89370e --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@@ -494,24 -494,6 +494,24 @@@ static int max_uc_list_get_devlink_para return err; }
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev) +{ + struct devlink *devlink = priv_to_devlink(dev); + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, + &val); + + if (!err) + return val.vbool; + + mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err); + return MLX5_CAP_GEN(dev, roce); +} +EXPORT_SYMBOL(mlx5_is_roce_on); + static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx) { void *set_hca_cap; @@@ -615,8 -597,7 +615,8 @@@ static int handle_hca_cap(struct mlx5_c MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
if (MLX5_CAP_GEN(dev, roce_rw_supported)) - MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev)); + MLX5_SET(cmd_hca_cap, set_hca_cap, roce, + mlx5_is_roce_on(dev));
max_uc_list = max_uc_list_get_devlink_param(dev); if (max_uc_list > 0) @@@ -642,7 -623,7 +642,7 @@@ */ static bool is_roce_fw_disabled(struct mlx5_core_dev *dev) { - return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) || + return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) || (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce)); }
@@@ -1507,6 -1488,7 +1507,7 @@@ static const int types[] = MLX5_CAP_IPSEC, MLX5_CAP_PORT_SELECTION, MLX5_CAP_DEV_SHAMPO, + MLX5_CAP_MACSEC, };
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) diff --combined drivers/net/ethernet/mellanox/mlxsw/i2c.c index 50b7121a5e3c,716c73e4fd59..f5f5f8dc3d19 --- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c +++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c @@@ -9,6 -9,7 +9,7 @@@ #include <linux/mutex.h> #include <linux/module.h> #include <linux/mod_devicetable.h> + #include <linux/platform_data/mlxreg.h> #include <linux/slab.h>
#include "cmd.h" @@@ -51,6 -52,15 +52,15 @@@ #define MLXSW_I2C_TIMEOUT_MSECS 5000 #define MLXSW_I2C_MAX_DATA_SIZE 256
+ /* Driver can be initialized by kernel platform driver or from the user + * space. In the first case IRQ line number is passed through the platform + * data, otherwise default IRQ line is to be used. Default IRQ is relevant + * only for specific I2C slave address, allowing 3.4 MHz I2C path to the chip + * (special hardware feature for I2C acceleration). + */ + #define MLXSW_I2C_DEFAULT_IRQ 17 + #define MLXSW_FAST_I2C_SLAVE 0x37 + /** * struct mlxsw_i2c - device private data: * @cmd: command attributes; @@@ -63,6 -73,9 +73,9 @@@ * @core: switch core pointer; * @bus_info: bus info block; * @block_size: maximum block size allowed to pass to under layer; + * @pdata: device platform data; + * @irq_work: interrupts work item; + * @irq: IRQ line number; */ struct mlxsw_i2c { struct { @@@ -76,6 -89,9 +89,9 @@@ struct mlxsw_core *core; struct mlxsw_bus_info bus_info; u16 block_size; + struct mlxreg_core_hotplug_platform_data *pdata; + struct work_struct irq_work; + int irq; };
#define MLXSW_I2C_READ_MSG(_client, _addr_buf, _buf, _len) { \ @@@ -546,6 -562,67 +562,67 @@@ static void mlxsw_i2c_fini(void *bus_pr mlxsw_i2c->core = NULL; }
+ static void mlxsw_i2c_work_handler(struct work_struct *work) + { + struct mlxsw_i2c *mlxsw_i2c; + + mlxsw_i2c = container_of(work, struct mlxsw_i2c, irq_work); + mlxsw_core_irq_event_handlers_call(mlxsw_i2c->core); + } + + static irqreturn_t mlxsw_i2c_irq_handler(int irq, void *dev) + { + struct mlxsw_i2c *mlxsw_i2c = dev; + + mlxsw_core_schedule_work(&mlxsw_i2c->irq_work); + + /* Interrupt handler shares IRQ line with 'main' interrupt handler. + * Return here IRQ_NONE, while main handler will return IRQ_HANDLED. + */ + return IRQ_NONE; + } + + static int mlxsw_i2c_irq_init(struct mlxsw_i2c *mlxsw_i2c, u8 addr) + { + int err; + + /* Initialize interrupt handler if system hotplug driver is reachable, + * otherwise interrupt line is not enabled and interrupts will not be + * raised to CPU. Also request_irq() call will be not valid. + */ + if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG)) + return 0; + + /* Set default interrupt line. */ + if (mlxsw_i2c->pdata && mlxsw_i2c->pdata->irq) + mlxsw_i2c->irq = mlxsw_i2c->pdata->irq; + else if (addr == MLXSW_FAST_I2C_SLAVE) + mlxsw_i2c->irq = MLXSW_I2C_DEFAULT_IRQ; + + if (!mlxsw_i2c->irq) + return 0; + + INIT_WORK(&mlxsw_i2c->irq_work, mlxsw_i2c_work_handler); + err = request_irq(mlxsw_i2c->irq, mlxsw_i2c_irq_handler, + IRQF_TRIGGER_FALLING | IRQF_SHARED, "mlxsw-i2c", + mlxsw_i2c); + if (err) { + dev_err(mlxsw_i2c->bus_info.dev, "Failed to request irq: %d\n", + err); + return err; + } + + return 0; + } + + static void mlxsw_i2c_irq_fini(struct mlxsw_i2c *mlxsw_i2c) + { + if (!IS_REACHABLE(CONFIG_MLXREG_HOTPLUG) || !mlxsw_i2c->irq) + return; + cancel_work_sync(&mlxsw_i2c->irq_work); + free_irq(mlxsw_i2c->irq, mlxsw_i2c); + } + static const struct mlxsw_bus mlxsw_i2c_bus = { .kind = "i2c", .init = mlxsw_i2c_init, @@@ -638,17 -715,24 +715,24 @@@ static int mlxsw_i2c_probe(struct i2c_c mlxsw_i2c->bus_info.dev = &client->dev; mlxsw_i2c->bus_info.low_frequency = true; mlxsw_i2c->dev = &client->dev; + mlxsw_i2c->pdata = client->dev.platform_data; + + err = mlxsw_i2c_irq_init(mlxsw_i2c, client->addr); + if (err) + goto errout;
err = mlxsw_core_bus_device_register(&mlxsw_i2c->bus_info, &mlxsw_i2c_bus, mlxsw_i2c, false, NULL, NULL); if (err) { dev_err(&client->dev, "Fail to register core bus\n"); - return err; + goto err_bus_device_register; }
return 0;
+ err_bus_device_register: + mlxsw_i2c_irq_fini(mlxsw_i2c); errout: mutex_destroy(&mlxsw_i2c->cmd.lock); i2c_set_clientdata(client, NULL); @@@ -656,12 -740,15 +740,13 @@@ return err; }
-static int mlxsw_i2c_remove(struct i2c_client *client) +static void mlxsw_i2c_remove(struct i2c_client *client) { struct mlxsw_i2c *mlxsw_i2c = i2c_get_clientdata(client);
mlxsw_core_bus_device_unregister(mlxsw_i2c->core, false); + mlxsw_i2c_irq_fini(mlxsw_i2c); mutex_destroy(&mlxsw_i2c->cmd.lock); - - return 0; }
int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver) diff --combined drivers/net/team/team.c index 154a3c0a6dfd,ab92416d861f..62ade69295a9 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -1275,12 -1275,10 +1275,12 @@@ static int team_port_add(struct team *t } }
- netif_addr_lock_bh(dev); - dev_uc_sync_multiple(port_dev, dev); - dev_mc_sync_multiple(port_dev, dev); - netif_addr_unlock_bh(dev); + if (dev->flags & IFF_UP) { + netif_addr_lock_bh(dev); + dev_uc_sync_multiple(port_dev, dev); + dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev); + }
port->index = -1; list_add_tail_rcu(&port->list, &team->port_list); @@@ -1351,10 -1349,8 +1351,10 @@@ static int team_port_del(struct team *t netdev_rx_handler_unregister(port_dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); - dev_uc_unsync(port_dev, dev); - dev_mc_unsync(port_dev, dev); + if (dev->flags & IFF_UP) { + dev_uc_unsync(port_dev, dev); + dev_mc_unsync(port_dev, dev); + } dev_close(port_dev); team_port_leave(team, port);
@@@ -1704,14 -1700,6 +1704,14 @@@ static int team_open(struct net_device
static int team_close(struct net_device *dev) { + struct team *team = netdev_priv(dev); + struct team_port *port; + + list_for_each_entry(port, &team->port_list, list) { + dev_uc_unsync(port->dev, dev); + dev_mc_unsync(port->dev, dev); + } + return 0; }
@@@ -2082,8 -2070,8 +2082,8 @@@ static const struct net_device_ops team static void team_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { - strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); + strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); }
static int team_ethtool_get_link_ksettings(struct net_device *dev, @@@ -2852,6 -2840,7 +2852,7 @@@ static struct genl_family team_nl_famil .module = THIS_MODULE, .small_ops = team_nl_ops, .n_small_ops = ARRAY_SIZE(team_nl_ops), + .resv_start_op = TEAM_CMD_PORT_LIST_GET + 1, .mcgrps = team_nl_mcgrps, .n_mcgrps = ARRAY_SIZE(team_nl_mcgrps), }; diff --combined drivers/net/wireguard/netlink.c index 5c804bcabfe6,0c0644e762e5..43c8c84e7ea8 --- a/drivers/net/wireguard/netlink.c +++ b/drivers/net/wireguard/netlink.c @@@ -436,13 -436,14 +436,13 @@@ static int set_peer(struct wg_device *w if (attrs[WGPEER_A_ENDPOINT]) { struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); + struct endpoint endpoint = { { { 0 } } };
- if ((len == sizeof(struct sockaddr_in) && - addr->sa_family == AF_INET) || - (len == sizeof(struct sockaddr_in6) && - addr->sa_family == AF_INET6)) { - struct endpoint endpoint = { { { 0 } } }; - - memcpy(&endpoint.addr, addr, len); + if (len == sizeof(struct sockaddr_in) && addr->sa_family == AF_INET) { + endpoint.addr4 = *(struct sockaddr_in *)addr; + wg_socket_set_peer_endpoint(peer, &endpoint); + } else if (len == sizeof(struct sockaddr_in6) && addr->sa_family == AF_INET6) { + endpoint.addr6 = *(struct sockaddr_in6 *)addr; wg_socket_set_peer_endpoint(peer, &endpoint); } } @@@ -620,6 -621,7 +620,7 @@@ static const struct genl_ops genl_ops[ static struct genl_family genl_family __ro_after_init = { .ops = genl_ops, .n_ops = ARRAY_SIZE(genl_ops), + .resv_start_op = WG_CMD_SET_DEVICE + 1, .name = WG_GENL_NAME, .version = WG_GENL_VERSION, .maxattr = WGDEVICE_A_MAX, diff --combined drivers/pinctrl/pinctrl-ocelot.c index c7df8c5fe585,340ca2373429..83464e0bf4e6 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@@ -10,6 -10,7 +10,7 @@@ #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/io.h> + #include <linux/mfd/ocelot.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of_platform.h> @@@ -331,7 -332,6 +332,7 @@@ struct ocelot_pinctrl const struct ocelot_pincfg_data *pincfg_data; struct ocelot_pmx_func func[FUNC_MAX]; u8 stride; + struct workqueue_struct *wq; };
struct ocelot_match_data { @@@ -339,11 -339,6 +340,11 @@@ struct ocelot_pincfg_data pincfg_data; };
+struct ocelot_irq_work { + struct work_struct irq_work; + struct irq_desc *irq_desc; +}; + #define LUTON_P(p, f0, f1) \ static struct ocelot_pin_caps luton_pin_##p = { \ .pin = p, \ @@@ -1819,75 -1814,6 +1820,75 @@@ static void ocelot_irq_mask(struct irq_ gpiochip_disable_irq(chip, gpio); }
+static void ocelot_irq_work(struct work_struct *work) +{ + struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work); + struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc); + struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc); + struct irq_data *data = irq_desc_get_irq_data(w->irq_desc); + unsigned int gpio = irqd_to_hwirq(data); + + local_irq_disable(); + chained_irq_enter(parent_chip, w->irq_desc); + generic_handle_domain_irq(chip->irq.domain, gpio); + chained_irq_exit(parent_chip, w->irq_desc); + local_irq_enable(); + + kfree(w); +} + +static void ocelot_irq_unmask_level(struct irq_data *data) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + struct ocelot_pinctrl *info = gpiochip_get_data(chip); + struct irq_desc *desc = irq_data_to_desc(data); + unsigned int gpio = irqd_to_hwirq(data); + unsigned int bit = BIT(gpio % 32); + bool ack = false, active = false; + u8 trigger_level; + int val; + + trigger_level = irqd_get_trigger_type(data); + + /* Check if the interrupt line is still active. */ + regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val); + if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) || + (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH)) + active = true; + + /* + * Check if the interrupt controller has seen any changes in the + * interrupt line. + */ + regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val); + if (val & bit) + ack = true; + + /* Enable the interrupt now */ + gpiochip_enable_irq(chip, gpio); + regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), + bit, bit); + + /* + * In case the interrupt line is still active and the interrupt + * controller has not seen any changes in the interrupt line, then it + * means that there happen another interrupt while the line was active. + * So we missed that one, so we need to kick the interrupt again + * handler. + */ + if (active && !ack) { + struct ocelot_irq_work *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + work->irq_desc = desc; + INIT_WORK(&work->irq_work, ocelot_irq_work); + queue_work(info->wq, &work->irq_work); + } +} + static void ocelot_irq_unmask(struct irq_data *data) { struct gpio_chip *chip = irq_data_get_irq_chip_data(data); @@@ -1911,12 -1837,13 +1912,12 @@@ static void ocelot_irq_ack(struct irq_d
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
-static struct irq_chip ocelot_eoi_irqchip = { +static struct irq_chip ocelot_level_irqchip = { .name = "gpio", .irq_mask = ocelot_irq_mask, - .irq_eoi = ocelot_irq_ack, - .irq_unmask = ocelot_irq_unmask, - .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | - IRQCHIP_IMMUTABLE, + .irq_ack = ocelot_irq_ack, + .irq_unmask = ocelot_irq_unmask_level, + .flags = IRQCHIP_IMMUTABLE, .irq_set_type = ocelot_irq_set_type, GPIOCHIP_IRQ_RESOURCE_HELPERS }; @@@ -1933,9 -1860,14 +1934,9 @@@ static struct irq_chip ocelot_irqchip
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type) { - type &= IRQ_TYPE_SENSE_MASK; - - if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH))) - return -EINVAL; - - if (type & IRQ_TYPE_LEVEL_HIGH) - irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip, - handle_fasteoi_irq, NULL); + if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip, + handle_level_irq, NULL); if (type & IRQ_TYPE_EDGE_BOTH) irq_set_chip_handler_name_locked(data, &ocelot_irqchip, handle_edge_irq, NULL); @@@ -2044,7 -1976,6 +2045,6 @@@ static int ocelot_pinctrl_probe(struct struct ocelot_pinctrl *info; struct reset_control *reset; struct regmap *pincfg; - void __iomem *base; int ret; struct regmap_config regmap_config = { .reg_bits = 32, @@@ -2065,10 -1996,6 +2065,10 @@@ if (!info->desc) return -ENOMEM;
+ info->wq = alloc_ordered_workqueue("ocelot_ordered", 0); + if (!info->wq) + return -ENOMEM; + info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch"); @@@ -2077,21 -2004,15 +2077,15 @@@ "Failed to get reset\n"); reset_control_reset(reset);
- base = devm_ioremap_resource(dev, - platform_get_resource(pdev, IORESOURCE_MEM, 0)); - if (IS_ERR(base)) - return PTR_ERR(base); - info->stride = 1 + (info->desc->npins - 1) / 32;
regmap_config.max_register = OCELOT_GPIO_SD_MAP * info->stride + 15 * 4;
- info->map = devm_regmap_init_mmio(dev, base, ®map_config); - if (IS_ERR(info->map)) { - dev_err(dev, "Failed to create regmap\n"); - return PTR_ERR(info->map); - } + info->map = ocelot_regmap_from_resource(pdev, 0, ®map_config); + if (IS_ERR(info->map)) + return dev_err_probe(dev, PTR_ERR(info->map), + "Failed to create regmap\n"); - dev_set_drvdata(dev, info->map); + dev_set_drvdata(dev, info); info->dev = dev;
/* Pinconf registers */ @@@ -2116,15 -2037,6 +2110,15 @@@ return 0; }
+static int ocelot_pinctrl_remove(struct platform_device *pdev) +{ + struct ocelot_pinctrl *info = platform_get_drvdata(pdev); + + destroy_workqueue(info->wq); + + return 0; +} + static struct platform_driver ocelot_pinctrl_driver = { .driver = { .name = "pinctrl-ocelot", @@@ -2132,7 -2044,6 +2126,7 @@@ .suppress_bind_attrs = true, }, .probe = ocelot_pinctrl_probe, + .remove = ocelot_pinctrl_remove, }; module_platform_driver(ocelot_pinctrl_driver); MODULE_LICENSE("Dual MIT/GPL"); diff --combined kernel/bpf/btf.c index 36fd4b509294,903719b89238..85d8dc2f7af4 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@@ -3128,7 -3128,7 +3128,7 @@@ static int btf_struct_resolve(struct bt if (v->next_member) { const struct btf_type *last_member_type; const struct btf_member *last_member; - u16 last_member_type_id; + u32 last_member_type_id;
last_member = btf_type_member(v->t) + v->next_member - 1; last_member_type_id = last_member->type; @@@ -5864,26 -5864,25 +5864,25 @@@ again }
static int __get_type_size(struct btf *btf, u32 btf_id, - const struct btf_type **bad_type) + const struct btf_type **ret_type) { const struct btf_type *t;
+ *ret_type = btf_type_by_id(btf, 0); if (!btf_id) /* void */ return 0; t = btf_type_by_id(btf, btf_id); while (t && btf_type_is_modifier(t)) t = btf_type_by_id(btf, t->type); - if (!t) { - *bad_type = btf_type_by_id(btf, 0); + if (!t) return -EINVAL; - } + *ret_type = t; if (btf_type_is_ptr(t)) /* kernel size of pointer. Not BPF's size of pointer*/ return sizeof(void *); if (btf_type_is_int(t) || btf_is_any_enum(t)) return t->size; - *bad_type = t; return -EINVAL; }
@@@ -6175,6 -6174,7 +6174,7 @@@ static int btf_check_func_arg_match(str { enum bpf_prog_type prog_type = resolve_prog_type(env->prog); bool rel = false, kptr_get = false, trusted_arg = false; + bool sleepable = false; struct bpf_verifier_log *log = &env->log; u32 i, nargs, ref_id, ref_obj_id = 0; bool is_kfunc = btf_is_kernel(btf); @@@ -6212,6 -6212,7 +6212,7 @@@ rel = kfunc_flags & KF_RELEASE; kptr_get = kfunc_flags & KF_KPTR_GET; trusted_arg = kfunc_flags & KF_TRUSTED_ARGS; + sleepable = kfunc_flags & KF_SLEEPABLE; }
/* check that BTF function arguments match actual types that the @@@ -6419,6 -6420,13 +6420,13 @@@ func_name); return -EINVAL; } + + if (sleepable && !env->prog->aux->sleepable) { + bpf_log(log, "kernel function %s is sleepable but the program is not\n", + func_name); + return -EINVAL; + } + /* returns argument register number > 0 in case of reference release kfunc */ return rel ? ref_regno : 0; } diff --combined kernel/bpf/syscall.c index 1bd18af8af83,4fb08c43420d..1834183345f1 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@@ -638,7 -638,10 +638,10 @@@ static void __bpf_map_put(struct bpf_ma bpf_map_free_id(map, do_idr_lock); btf_put(map->btf); INIT_WORK(&map->work, bpf_map_free_deferred); - schedule_work(&map->work); + /* Avoid spawning kworkers, since they all might contend + * for the same mutex like slab_mutex. + */ + queue_work(system_unbound_wq, &map->work); } }
@@@ -1437,9 -1440,9 +1440,9 @@@ err_put
#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
- static int map_delete_elem(union bpf_attr *attr) + static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr) { - void __user *ukey = u64_to_user_ptr(attr->key); + bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel); int ufd = attr->map_fd; struct bpf_map *map; struct fd f; @@@ -1459,7 -1462,7 +1462,7 @@@ goto err_put; }
- key = __bpf_copy_key(ukey, map->key_size); + key = ___bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); goto err_put; @@@ -4395,9 -4398,7 +4398,9 @@@ static int bpf_task_fd_query(const unio if (attr->task_fd_query.flags != 0) return -EINVAL;
+ rcu_read_lock(); task = get_pid_task(find_vpid(pid), PIDTYPE_PID); + rcu_read_unlock(); if (!task) return -ENOENT;
@@@ -4943,7 -4944,7 +4946,7 @@@ static int __sys_bpf(int cmd, bpfptr_t err = map_update_elem(&attr, uattr); break; case BPF_MAP_DELETE_ELEM: - err = map_delete_elem(&attr); + err = map_delete_elem(&attr, uattr); break; case BPF_MAP_GET_NEXT_KEY: err = map_get_next_key(&attr); @@@ -5075,8 -5076,10 +5078,10 @@@ BPF_CALL_3(bpf_sys_bpf, int, cmd, unio { switch (cmd) { case BPF_MAP_CREATE: + case BPF_MAP_DELETE_ELEM: case BPF_MAP_UPDATE_ELEM: case BPF_MAP_FREEZE: + case BPF_MAP_GET_FD_BY_ID: case BPF_PROG_LOAD: case BPF_BTF_LOAD: case BPF_LINK_CREATE: diff --combined net/core/flow_dissector.c index 5dc3860e9fc7,2a1f513a2dc8..f24347dbdbc5 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@@ -204,6 -204,30 +204,30 @@@ static void __skb_flow_dissect_icmp(con skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen); }
+ static void __skb_flow_dissect_l2tpv3(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, const void *data, + int nhoff, int hlen) + { + struct flow_dissector_key_l2tpv3 *key_l2tpv3; + struct { + __be32 session_id; + } *hdr, _hdr; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_L2TPV3)) + return; + + hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); + if (!hdr) + return; + + key_l2tpv3 = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_L2TPV3, + target_container); + + key_l2tpv3->session_id = hdr->session_id; + } + void skb_flow_dissect_meta(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container) @@@ -866,8 -890,8 +890,8 @@@ static void __skb_flow_bpf_to_target(co } }
- bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen, unsigned int flags) + u32 bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, + __be16 proto, int nhoff, int hlen, unsigned int flags) { struct bpf_flow_keys *flow_keys = ctx->flow_keys; u32 result; @@@ -892,7 -916,7 +916,7 @@@ flow_keys->thoff = clamp_t(u16, flow_keys->thoff, flow_keys->nhoff, hlen);
- return result == BPF_OK; + return result; }
static bool is_pppoe_ses_hdr_valid(const struct pppoe_hdr *hdr) @@@ -1008,6 -1032,7 +1032,7 @@@ bool __skb_flow_dissect(const struct ne }; __be16 n_proto = proto; struct bpf_prog *prog; + u32 result;
if (skb) { ctx.skb = skb; @@@ -1019,13 -1044,16 +1044,16 @@@ }
prog = READ_ONCE(run_array->items[0].prog); - ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, - hlen, flags); + result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, + hlen, flags); + if (result == BPF_FLOW_DISSECTOR_CONTINUE) + goto dissect_continue; __skb_flow_bpf_to_target(&flow_keys, flow_dissector, target_container); rcu_read_unlock(); - return ret; + return result == BPF_OK; } + dissect_continue: rcu_read_unlock(); }
@@@ -1497,6 -1525,10 +1525,10 @@@ ip_proto_again __skb_flow_dissect_icmp(skb, flow_dissector, target_container, data, nhoff, hlen); break; + case IPPROTO_L2TP: + __skb_flow_dissect_l2tpv3(skb, flow_dissector, target_container, + data, nhoff, hlen); + break;
default: break; @@@ -1611,8 -1643,9 +1643,8 @@@ static inline void __flow_hash_consiste
switch (keys->control.addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: - addr_diff = (__force u32)keys->addrs.v4addrs.dst - - (__force u32)keys->addrs.v4addrs.src; - if (addr_diff < 0) + if ((__force u32)keys->addrs.v4addrs.dst < + (__force u32)keys->addrs.v4addrs.src) swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
if ((__force u16)keys->ports.dst < diff --combined net/ipv4/ipmr.c index e11d6b0b62b7,95eefbe2e142..e04544ac4b45 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@@ -1004,9 -1004,7 +1004,9 @@@ static void ipmr_cache_resolve(struct n
rtnl_unicast(skb, net, NETLINK_CB(skb).portid); } else { + rcu_read_lock(); ip_mr_forward(net, mrt, skb->dev, skb, c, 0); + rcu_read_unlock(); } } } @@@ -1548,7 -1546,8 +1548,8 @@@ out }
/* Getsock opt support for the multicast routing system. */ - int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen) + int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, + sockptr_t optlen) { int olr; int val; @@@ -1579,14 -1578,14 +1580,14 @@@ return -ENOPROTOOPT; }
- if (get_user(olr, optlen)) + if (copy_from_sockptr(&olr, optlen, sizeof(int))) return -EFAULT; olr = min_t(unsigned int, olr, sizeof(int)); if (olr < 0) return -EINVAL; - if (put_user(olr, optlen)) + if (copy_to_sockptr(optlen, &olr, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, olr)) + if (copy_to_sockptr(optval, &val, olr)) return -EFAULT; return 0; } diff --combined net/ipv4/tcp.c index e373dde1f46f,829beee3fa32..5702ca9b952d --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -1761,28 -1761,19 +1761,28 @@@ int tcp_read_skb(struct sock *sk, skb_r if (sk->sk_state == TCP_LISTEN) return -ENOTCONN;
- skb = tcp_recv_skb(sk, seq, &offset); - if (!skb) - return 0; + while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { + u8 tcp_flags; + int used;
- __skb_unlink(skb, &sk->sk_receive_queue); - WARN_ON(!skb_set_owner_sk_safe(skb, sk)); - copied = recv_actor(sk, skb); - if (copied >= 0) { - seq += copied; - if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) + __skb_unlink(skb, &sk->sk_receive_queue); + WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); + tcp_flags = TCP_SKB_CB(skb)->tcp_flags; + used = recv_actor(sk, skb); + consume_skb(skb); + if (used < 0) { + if (!copied) + copied = used; + break; + } + seq += used; + copied += used; + + if (tcp_flags & TCPHDR_FIN) { ++seq; + break; + } } - consume_skb(skb); WRITE_ONCE(tp->copied_seq, seq);
tcp_rcv_space_adjust(sk); @@@ -3208,7 -3199,7 +3208,7 @@@ EXPORT_SYMBOL(tcp_disconnect)
static inline bool tcp_can_repair_sock(const struct sock *sk) { - return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && + return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && (sk->sk_state != TCP_LISTEN); }
@@@ -3485,8 -3476,8 +3485,8 @@@ int tcp_set_window_clamp(struct sock *s /* * Socket option code for TCP. */ - static int do_tcp_setsockopt(struct sock *sk, int level, int optname, - sockptr_t optval, unsigned int optlen) + int do_tcp_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); @@@ -3508,11 -3499,11 +3508,11 @@@ return -EFAULT; name[val] = 0;
- lock_sock(sk); - err = tcp_set_congestion_control(sk, name, true, - ns_capable(sock_net(sk)->user_ns, - CAP_NET_ADMIN)); - release_sock(sk); + sockopt_lock_sock(sk); + err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), + sockopt_ns_capable(sock_net(sk)->user_ns, + CAP_NET_ADMIN)); + sockopt_release_sock(sk); return err; } case TCP_ULP: { @@@ -3528,9 -3519,9 +3528,9 @@@ return -EFAULT; name[val] = 0;
- lock_sock(sk); + sockopt_lock_sock(sk); err = tcp_set_ulp(sk, name); - release_sock(sk); + sockopt_release_sock(sk); return err; } case TCP_FASTOPEN_KEY: { @@@ -3563,7 -3554,7 +3563,7 @@@ if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT;
- lock_sock(sk); + sockopt_lock_sock(sk);
switch (optname) { case TCP_MAXSEG: @@@ -3785,7 -3776,7 +3785,7 @@@ break; }
- release_sock(sk); + sockopt_release_sock(sk); return err; }
@@@ -4049,15 -4040,15 +4049,15 @@@ struct sk_buff *tcp_get_timestamping_op return stats; }
- static int do_tcp_getsockopt(struct sock *sk, int level, - int optname, char __user *optval, int __user *optlen) + int do_tcp_getsockopt(struct sock *sk, int level, + int optname, sockptr_t optval, sockptr_t optlen) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); int val, len;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
len = min_t(unsigned int, len, sizeof(int)); @@@ -4107,15 -4098,15 +4107,15 @@@ case TCP_INFO: { struct tcp_info info;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
tcp_get_info(sk, &info);
len = min_t(unsigned int, len, sizeof(info)); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &info, len)) + if (copy_to_sockptr(optval, &info, len)) return -EFAULT; return 0; } @@@ -4125,7 -4116,7 +4125,7 @@@ size_t sz = 0; int attr;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
ca_ops = icsk->icsk_ca_ops; @@@ -4133,9 -4124,9 +4133,9 @@@ sz = ca_ops->get_info(sk, ~0U, &attr, &info);
len = min_t(unsigned int, len, sz); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &info, len)) + if (copy_to_sockptr(optval, &info, len)) return -EFAULT; return 0; } @@@ -4144,27 -4135,28 +4144,28 @@@ break;
case TCP_CONGESTION: - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, TCP_CA_NAME_MAX); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) + if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) return -EFAULT; return 0;
case TCP_ULP: - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); if (!icsk->icsk_ulp_ops) { - if (put_user(0, optlen)) + len = 0; + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; return 0; } - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) + if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) return -EFAULT; return 0;
@@@ -4172,15 -4164,15 +4173,15 @@@ u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; unsigned int key_len;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
key_len = tcp_fastopen_get_cipher(net, icsk, key) * TCP_FASTOPEN_KEY_LENGTH; len = min_t(unsigned int, len, key_len); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, key, len)) + if (copy_to_sockptr(optval, key, len)) return -EFAULT; return 0; } @@@ -4206,7 -4198,7 +4207,7 @@@ case TCP_REPAIR_WINDOW: { struct tcp_repair_window opt;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
if (len != sizeof(opt)) @@@ -4221,7 -4213,7 +4222,7 @@@ opt.rcv_wnd = tp->rcv_wnd; opt.rcv_wup = tp->rcv_wup;
- if (copy_to_user(optval, &opt, len)) + if (copy_to_sockptr(optval, &opt, len)) return -EFAULT; return 0; } @@@ -4267,35 -4259,35 +4268,35 @@@ val = tp->save_syn; break; case TCP_SAVED_SYN: { - if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT;
- lock_sock(sk); + sockopt_lock_sock(sk); if (tp->saved_syn) { if (len < tcp_saved_syn_len(tp->saved_syn)) { - if (put_user(tcp_saved_syn_len(tp->saved_syn), - optlen)) { - release_sock(sk); + len = tcp_saved_syn_len(tp->saved_syn); + if (copy_to_sockptr(optlen, &len, sizeof(int))) { + sockopt_release_sock(sk); return -EFAULT; } - release_sock(sk); + sockopt_release_sock(sk); return -EINVAL; } len = tcp_saved_syn_len(tp->saved_syn); - if (put_user(len, optlen)) { - release_sock(sk); + if (copy_to_sockptr(optlen, &len, sizeof(int))) { + sockopt_release_sock(sk); return -EFAULT; } - if (copy_to_user(optval, tp->saved_syn->data, len)) { - release_sock(sk); + if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { + sockopt_release_sock(sk); return -EFAULT; } tcp_saved_syn_free(tp); - release_sock(sk); + sockopt_release_sock(sk); } else { - release_sock(sk); + sockopt_release_sock(sk); len = 0; - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; } return 0; @@@ -4306,31 -4298,31 +4307,31 @@@ struct tcp_zerocopy_receive zc = {}; int err;
- if (get_user(len, optlen)) + if (copy_from_sockptr(&len, optlen, sizeof(int))) return -EFAULT; if (len < 0 || len < offsetofend(struct tcp_zerocopy_receive, length)) return -EINVAL; if (unlikely(len > sizeof(zc))) { - err = check_zeroed_user(optval + sizeof(zc), - len - sizeof(zc)); + err = check_zeroed_sockptr(optval, sizeof(zc), + len - sizeof(zc)); if (err < 1) return err == 0 ? -EINVAL : err; len = sizeof(zc); - if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; } - if (copy_from_user(&zc, optval, len)) + if (copy_from_sockptr(&zc, optval, len)) return -EFAULT; if (zc.reserved) return -EINVAL; if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) return -EINVAL; - lock_sock(sk); + sockopt_lock_sock(sk); err = tcp_zerocopy_receive(sk, &zc, &tss); err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, &zc, &len, err); - release_sock(sk); + sockopt_release_sock(sk); if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) goto zerocopy_rcv_cmsg; switch (len) { @@@ -4360,7 -4352,7 +4361,7 @@@ zerocopy_rcv_sk_err zerocopy_rcv_inq: zc.inq = tcp_inq_hint(sk); zerocopy_rcv_out: - if (!err && copy_to_user(optval, &zc, len)) + if (!err && copy_to_sockptr(optval, &zc, len)) err = -EFAULT; return err; } @@@ -4369,9 -4361,9 +4370,9 @@@ return -ENOPROTOOPT; }
- if (put_user(len, optlen)) + if (copy_to_sockptr(optlen, &len, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, len)) + if (copy_to_sockptr(optval, &val, len)) return -EFAULT; return 0; } @@@ -4396,7 -4388,8 +4397,8 @@@ int tcp_getsockopt(struct sock *sk, in if (level != SOL_TCP) return icsk->icsk_af_ops->getsockopt(sk, level, optname, optval, optlen); - return do_tcp_getsockopt(sk, level, optname, optval, optlen); + return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), + USER_SOCKPTR(optlen)); } EXPORT_SYMBOL(tcp_getsockopt);
@@@ -4442,12 -4435,16 +4444,16 @@@ static void __tcp_alloc_md5sig_pool(voi * to memory. See smp_rmb() in tcp_get_md5sig_pool() */ smp_wmb(); - tcp_md5sig_pool_populated = true; + /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() + * and tcp_get_md5sig_pool(). + */ + WRITE_ONCE(tcp_md5sig_pool_populated, true); }
bool tcp_alloc_md5sig_pool(void) { - if (unlikely(!tcp_md5sig_pool_populated)) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { mutex_lock(&tcp_md5sig_mutex);
if (!tcp_md5sig_pool_populated) { @@@ -4458,7 -4455,8 +4464,8 @@@
mutex_unlock(&tcp_md5sig_mutex); } - return tcp_md5sig_pool_populated; + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + return READ_ONCE(tcp_md5sig_pool_populated); } EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
@@@ -4474,7 -4472,8 +4481,8 @@@ struct tcp_md5sig_pool *tcp_get_md5sig_ { local_bh_disable();
- if (tcp_md5sig_pool_populated) { + /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ + if (READ_ONCE(tcp_md5sig_pool_populated)) { /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ smp_rmb(); return this_cpu_ptr(&tcp_md5sig_pool); @@@ -4745,6 -4744,12 +4753,12 @@@ void __init tcp_init(void SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT, NULL); + tcp_hashinfo.bind2_bucket_cachep = + kmem_cache_create("tcp_bind2_bucket", + sizeof(struct inet_bind2_bucket), 0, + SLAB_HWCACHE_ALIGN | SLAB_PANIC | + SLAB_ACCOUNT, + NULL);
/* Size and allocate the main established and bind bucket * hash tables. @@@ -4768,7 -4773,7 +4782,7 @@@ panic("TCP: failed to alloc ehash_locks"); tcp_hashinfo.bhash = alloc_large_system_hash("TCP bind", - sizeof(struct inet_bind_hashbucket), + 2 * sizeof(struct inet_bind_hashbucket), tcp_hashinfo.ehash_mask + 1, 17, /* one slot per 128 KB of memory */ 0, @@@ -4777,11 -4782,15 +4791,15 @@@ 0, 64 * 1024); tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; + tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; for (i = 0; i < tcp_hashinfo.bhash_size; i++) { spin_lock_init(&tcp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); + spin_lock_init(&tcp_hashinfo.bhash2[i].lock); + INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); }
+ tcp_hashinfo.pernet = false;
cnt = tcp_hashinfo.ehash_mask + 1; sysctl_tcp_max_orphans = cnt / 2; diff --combined net/ipv6/af_inet6.c index dbb1430d6cc2,19732b5dce23..d40b7d60e00e --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@@ -1057,6 -1057,8 +1057,8 @@@ static const struct ipv6_stub ipv6_stub static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = { .inet6_bind = __inet6_bind, .udp6_lib_lookup = __udp6_lib_lookup, + .ipv6_setsockopt = do_ipv6_setsockopt, + .ipv6_getsockopt = do_ipv6_getsockopt, };
static int __init inet6_init(void) @@@ -1070,13 -1072,13 +1072,13 @@@ for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) INIT_LIST_HEAD(r);
+ raw_hashinfo_init(&raw_v6_hashinfo); + if (disable_ipv6_mod) { pr_info("Loaded, but administratively disabled, reboot required to enable\n"); goto out; }
- raw_hashinfo_init(&raw_v6_hashinfo); - err = proto_register(&tcpv6_prot, 1); if (err) goto out; diff --combined net/ipv6/ip6mr.c index 858fd8a28b5b,516e83b52f26..facdc78a43e5 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@@ -1028,11 -1028,8 +1028,11 @@@ static void ip6mr_cache_resolve(struct ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE; } rtnl_unicast(skb, net, NETLINK_CB(skb).portid); - } else + } else { + rcu_read_lock(); ip6_mr_forward(net, mrt, skb->dev, skb, c); + rcu_read_unlock(); + } } }
@@@ -1830,8 -1827,8 +1830,8 @@@ int ip6_mroute_setsockopt(struct sock * * Getsock opt support for the multicast routing system. */
- int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, - int __user *optlen) + int ip6_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval, + sockptr_t optlen) { int olr; int val; @@@ -1862,16 -1859,16 +1862,16 @@@ return -ENOPROTOOPT; }
- if (get_user(olr, optlen)) + if (copy_from_sockptr(&olr, optlen, sizeof(int))) return -EFAULT;
olr = min_t(int, olr, sizeof(int)); if (olr < 0) return -EINVAL;
- if (put_user(olr, optlen)) + if (copy_to_sockptr(optlen, &olr, sizeof(int))) return -EFAULT; - if (copy_to_user(optval, &val, olr)) + if (copy_to_sockptr(optval, &val, olr)) return -EFAULT; return 0; } diff --combined net/mptcp/protocol.c index 969b33a9dd64,47931f6cf387..866dfad3cde6 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@@ -150,15 -150,9 +150,15 @@@ static bool mptcp_try_coalesce(struct s MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, to->len, MPTCP_SKB_CB(from)->end_seq); MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; - kfree_skb_partial(from, fragstolen); + + /* note the fwd memory can reach a negative value after accounting + * for the delta, but the later skb free will restore a non + * negative one + */ atomic_add(delta, &sk->sk_rmem_alloc); mptcp_rmem_charge(sk, delta); + kfree_skb_partial(from, fragstolen); + return true; }
@@@ -1544,8 -1538,9 +1544,9 @@@ void __mptcp_push_pending(struct sock * struct mptcp_sendmsg_info info = { .flags = flags, }; + bool do_check_data_fin = false; struct mptcp_data_frag *dfrag; - int len, copied = 0; + int len;
while ((dfrag = mptcp_send_head(sk))) { info.sent = dfrag->already_sent; @@@ -1580,8 -1575,8 +1581,8 @@@ goto out; }
+ do_check_data_fin = true; info.sent += ret; - copied += ret; len -= ret;
mptcp_update_post_push(msk, dfrag, ret); @@@ -1597,7 -1592,7 +1598,7 @@@ out /* ensure the rtx timer is running */ if (!mptcp_timer_pending(sk)) mptcp_reset_timer(sk); - if (copied) + if (do_check_data_fin) __mptcp_check_send_data_fin(sk); }
@@@ -2363,7 -2358,7 +2364,7 @@@ static void __mptcp_close_subflow(struc
might_sleep();
- list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { + mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (inet_sk_state_load(ssk) != TCP_CLOSE) @@@ -2406,7 -2401,7 +2407,7 @@@ static void mptcp_check_fastclose(struc
mptcp_token_destroy(msk);
- list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { + mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); bool slow;
@@@ -3053,7 -3048,7 +3054,7 @@@ void mptcp_destroy_common(struct mptcp_ __mptcp_clear_xmit(sk);
/* join list will be eventually flushed (with rst) at sock lock release time */ - list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) + mptcp_for_each_subflow_safe(msk, subflow, tmp) __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags);
/* move to sk_receive_queue, sk_stream_kill_queues will purge it */ diff --combined net/sched/sch_taprio.c index 86675a79da1e,b72c373edea0..8ae454052201 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@@ -67,7 -67,6 +67,7 @@@ struct taprio_sched u32 flags; enum tk_offsets tk_offset; int clockid; + bool offloaded; atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ * speeds it's sub-nanoseconds per byte */ @@@ -79,8 -78,6 +79,6 @@@ struct sched_gate_list __rcu *admin_sched; struct hrtimer advance_timer; struct list_head taprio_list; - struct sk_buff *(*dequeue)(struct Qdisc *sch); - struct sk_buff *(*peek)(struct Qdisc *sch); u32 txtime_delay; };
@@@ -435,6 -432,9 +433,9 @@@ static int taprio_enqueue_one(struct sk return qdisc_enqueue(skb, child, to_free); }
+ /* Will not be called in the full offload case, since the TX queues are + * attached to the Qdisc created using qdisc_create_dflt() + */ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { @@@ -442,11 -442,6 +443,6 @@@ struct Qdisc *child; int queue;
- if (unlikely(FULL_OFFLOAD_IS_ENABLED(q->flags))) { - WARN_ONCE(1, "Trying to enqueue skb into the root of a taprio qdisc configured with full offload\n"); - return qdisc_drop(skb, sch, to_free); - } - queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue]; @@@ -455,10 -450,10 +451,10 @@@
/* Large packets might not be transmitted when the transmission duration * exceeds any configured interval. Therefore, segment the skb into - * smaller chunks. Skip it for the full offload case, as the driver - * and/or the hardware is expected to handle this. + * smaller chunks. Drivers with full offload are expected to handle + * this in hardware. */ - if (skb_is_gso(skb) && !FULL_OFFLOAD_IS_ENABLED(q->flags)) { + if (skb_is_gso(skb)) { unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); netdev_features_t features = netif_skb_features(skb); struct sk_buff *segs, *nskb; @@@ -492,7 -487,10 +488,10 @@@ return taprio_enqueue_one(skb, sch, child, to_free); }
- static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) + /* Will not be called in the full offload case, since the TX queues are + * attached to the Qdisc created using qdisc_create_dflt() + */ + static struct sk_buff *taprio_peek(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); @@@ -536,20 -534,6 +535,6 @@@ return NULL; }
- static struct sk_buff *taprio_peek_offload(struct Qdisc *sch) - { - WARN_ONCE(1, "Trying to peek into the root of a taprio qdisc configured with full offload\n"); - - return NULL; - } - - static struct sk_buff *taprio_peek(struct Qdisc *sch) - { - struct taprio_sched *q = qdisc_priv(sch); - - return q->peek(sch); - } - static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) { atomic_set(&entry->budget, @@@ -557,7 -541,10 +542,10 @@@ atomic64_read(&q->picos_per_byte))); }
- static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch) + /* Will not be called in the full offload case, since the TX queues are + * attached to the Qdisc created using qdisc_create_dflt() + */ + static struct sk_buff *taprio_dequeue(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); @@@ -645,20 -632,6 +633,6 @@@ done return skb; }
- static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch) - { - WARN_ONCE(1, "Trying to dequeue from the root of a taprio qdisc configured with full offload\n"); - - return NULL; - } - - static struct sk_buff *taprio_dequeue(struct Qdisc *sch) - { - struct taprio_sched *q = qdisc_priv(sch); - - return q->dequeue(sch); - } - static bool should_restart_cycle(const struct sched_gate_list *oper, const struct sched_entry *entry) { @@@ -1194,16 -1167,10 +1168,10 @@@ static void taprio_offload_config_chang { struct sched_gate_list *oper, *admin;
- spin_lock(&q->current_entry_lock); - - oper = rcu_dereference_protected(q->oper_sched, - lockdep_is_held(&q->current_entry_lock)); - admin = rcu_dereference_protected(q->admin_sched, - lockdep_is_held(&q->current_entry_lock)); + oper = rtnl_dereference(q->oper_sched); + admin = rtnl_dereference(q->admin_sched);
switch_schedules(q, &admin, &oper); - - spin_unlock(&q->current_entry_lock); }
static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask) @@@ -1280,8 -1247,6 +1248,8 @@@ static int taprio_enable_offload(struc goto done; }
+ q->offloaded = true; + done: taprio_offload_free(offload);
@@@ -1296,9 -1261,12 +1264,9 @@@ static int taprio_disable_offload(struc struct tc_taprio_qopt_offload *offload; int err;
- if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) + if (!q->offloaded) return 0;
- if (!ops->ndo_setup_tc) - return -EOPNOTSUPP; - offload = taprio_offload_alloc(0); if (!offload) { NL_SET_ERR_MSG(extack, @@@ -1314,8 -1282,6 +1282,8 @@@ goto out; }
+ q->offloaded = false; + out: taprio_offload_free(offload);
@@@ -1492,10 -1458,8 +1460,8 @@@ static int taprio_change(struct Qdisc * } INIT_LIST_HEAD(&new_admin->entries);
- rcu_read_lock(); - oper = rcu_dereference(q->oper_sched); - admin = rcu_dereference(q->admin_sched); - rcu_read_unlock(); + oper = rtnl_dereference(q->oper_sched); + admin = rtnl_dereference(q->admin_sched);
/* no changes - no new mqprio settings */ if (!taprio_mqprio_cmp(dev, mqprio)) @@@ -1565,17 -1529,6 +1531,6 @@@ q->advance_timer.function = advance_sched; }
- if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { - q->dequeue = taprio_dequeue_offload; - q->peek = taprio_peek_offload; - } else { - /* Be sure to always keep the function pointers - * in a consistent state. - */ - q->dequeue = taprio_dequeue_soft; - q->peek = taprio_peek_soft; - } - err = taprio_get_start_time(sch, new_admin, &start); if (err < 0) { NL_SET_ERR_MSG(extack, "Internal error: failed get start time"); @@@ -1638,14 -1591,13 +1593,13 @@@ static void taprio_reset(struct Qdisc * if (q->qdiscs[i]) qdisc_reset(q->qdiscs[i]); } - sch->qstats.backlog = 0; - sch->q.qlen = 0; }
static void taprio_destroy(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); + struct sched_gate_list *oper, *admin; unsigned int i;
spin_lock(&taprio_list_lock); @@@ -1669,11 -1621,14 +1623,14 @@@
netdev_reset_tc(dev);
- if (q->oper_sched) - call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); + oper = rtnl_dereference(q->oper_sched); + admin = rtnl_dereference(q->admin_sched); + + if (oper) + call_rcu(&oper->rcu, taprio_free_sched_cb);
- if (q->admin_sched) - call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb); + if (admin) + call_rcu(&admin->rcu, taprio_free_sched_cb); }
static int taprio_init(struct Qdisc *sch, struct nlattr *opt, @@@ -1688,9 -1643,6 +1645,6 @@@ hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); q->advance_timer.function = advance_sched;
- q->dequeue = taprio_dequeue_soft; - q->peek = taprio_peek_soft; - q->root = sch;
/* We only support static clockids. Use an invalid value as default @@@ -1703,11 -1655,15 +1657,15 @@@ list_add(&q->taprio_list, &taprio_list); spin_unlock(&taprio_list_lock);
- if (sch->parent != TC_H_ROOT) + if (sch->parent != TC_H_ROOT) { + NL_SET_ERR_MSG_MOD(extack, "Can only be attached as root qdisc"); return -EOPNOTSUPP; + }
- if (!netif_is_multiqueue(dev)) + if (!netif_is_multiqueue(dev)) { + NL_SET_ERR_MSG_MOD(extack, "Multi-queue device is required"); return -EOPNOTSUPP; + }
/* pre-allocate qdisc, attachment can't fail */ q->qdiscs = kcalloc(dev->num_tx_queues, @@@ -1888,9 -1844,8 +1846,8 @@@ static int taprio_dump(struct Qdisc *sc struct nlattr *nest, *sched_nest; unsigned int i;
- rcu_read_lock(); - oper = rcu_dereference(q->oper_sched); - admin = rcu_dereference(q->admin_sched); + oper = rtnl_dereference(q->oper_sched); + admin = rtnl_dereference(q->admin_sched);
opt.num_tc = netdev_get_num_tc(dev); memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map)); @@@ -1934,8 -1889,6 +1891,6 @@@ nla_nest_end(skb, sched_nest);
done: - rcu_read_unlock(); - return nla_nest_end(skb, nest);
admin_error: @@@ -1945,20 -1898,17 +1900,19 @@@ options_error nla_nest_cancel(skb, nest);
start_error: - rcu_read_unlock(); return -ENOSPC; }
static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl) { - struct netdev_queue *dev_queue = taprio_queue_get(sch, cl); + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + unsigned int ntx = cl - 1;
- if (!dev_queue) + if (ntx >= dev->num_tx_queues) return NULL;
- return dev_queue->qdisc_sleeping; + return q->qdiscs[ntx]; }
static unsigned long taprio_find(struct Qdisc *sch, u32 classid) diff --combined tools/testing/selftests/bpf/xskxceiver.c index 091402dc5390,ef33309bbe49..140ce15403be --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@@ -99,6 -99,8 +99,8 @@@ #include <stdatomic.h> #include "xsk.h" #include "xskxceiver.h" + #include <bpf/bpf.h> + #include <linux/filter.h> #include "../kselftest.h"
/* AF_XDP APIs were moved into libxdp and marked as deprecated in libbpf. @@@ -122,9 -124,20 +124,20 @@@ static void __exit_with_error(int error }
#define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) - - #define mode_string(test) (test)->ifobj_tx->xdp_flags & XDP_FLAGS_SKB_MODE ? "SKB" : "DRV" #define busy_poll_string(test) (test)->ifobj_tx->busy_poll ? "BUSY-POLL " : "" + static char *mode_string(struct test_spec *test) + { + switch (test->mode) { + case TEST_MODE_SKB: + return "SKB"; + case TEST_MODE_DRV: + return "DRV"; + case TEST_MODE_ZC: + return "ZC"; + default: + return "BOGUS"; + } + }
static void report_failure(struct test_spec *test) { @@@ -244,6 -257,11 +257,11 @@@ static void gen_udp_hdr(u32 payload, vo memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE); }
+ static bool is_umem_valid(struct ifobject *ifobj) + { + return !!ifobj->umem->umem; + } + static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr) { udp_hdr->check = 0; @@@ -294,8 -312,8 +312,8 @@@ static void enable_busy_poll(struct xsk exit_with_error(errno); }
- static int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, - struct ifobject *ifobject, bool shared) + static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, + struct ifobject *ifobject, bool shared) { struct xsk_socket_config cfg = {}; struct xsk_ring_cons *rxr; @@@ -315,6 -333,51 +333,51 @@@ return xsk_socket__create(&xsk->xsk, ifobject->ifname, 0, umem->umem, rxr, txr, &cfg); }
+ static bool ifobj_zc_avail(struct ifobject *ifobject) + { + size_t umem_sz = DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; + int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; + struct xsk_socket_info *xsk; + struct xsk_umem_info *umem; + bool zc_avail = false; + void *bufs; + int ret; + + bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); + if (bufs == MAP_FAILED) + exit_with_error(errno); + + umem = calloc(1, sizeof(struct xsk_umem_info)); + if (!umem) { + munmap(bufs, umem_sz); + exit_with_error(-ENOMEM); + } + umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; + ret = xsk_configure_umem(umem, bufs, umem_sz); + if (ret) + exit_with_error(-ret); + + xsk = calloc(1, sizeof(struct xsk_socket_info)); + if (!xsk) + goto out; + ifobject->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; + ifobject->xdp_flags |= XDP_FLAGS_DRV_MODE; + ifobject->bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY; + ifobject->rx_on = true; + xsk->rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; + ret = __xsk_configure_socket(xsk, umem, ifobject, false); + if (!ret) + zc_avail = true; + + xsk_socket__delete(xsk->xsk); + free(xsk); + out: + munmap(umem->buffer, umem_sz); + xsk_umem__delete(umem->umem); + free(umem); + return zc_avail; + } + static struct option long_options[] = { {"interface", required_argument, 0, 'i'}, {"busy-poll", no_argument, 0, 'b'}, @@@ -426,20 -489,24 +489,24 @@@ static void __test_spec_init(struct tes ifobj->use_poll = false; ifobj->use_fill_ring = true; ifobj->release_rx = true; - ifobj->pkt_stream = test->pkt_stream_default; ifobj->validation_func = NULL;
if (i == 0) { ifobj->rx_on = false; ifobj->tx_on = true; + ifobj->pkt_stream = test->tx_pkt_stream_default; } else { ifobj->rx_on = true; ifobj->tx_on = false; + ifobj->pkt_stream = test->rx_pkt_stream_default; }
memset(ifobj->umem, 0, sizeof(*ifobj->umem)); ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; + if (ifobj->shared_umem && ifobj->rx_on) + ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS * + XSK_UMEM__DEFAULT_FRAME_SIZE;
for (j = 0; j < MAX_SOCKETS; j++) { memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); @@@ -458,12 -525,15 +525,15 @@@ static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, enum test_mode mode) { - struct pkt_stream *pkt_stream; + struct pkt_stream *tx_pkt_stream; + struct pkt_stream *rx_pkt_stream; u32 i;
- pkt_stream = test->pkt_stream_default; + tx_pkt_stream = test->tx_pkt_stream_default; + rx_pkt_stream = test->rx_pkt_stream_default; memset(test, 0, sizeof(*test)); - test->pkt_stream_default = pkt_stream; + test->tx_pkt_stream_default = tx_pkt_stream; + test->rx_pkt_stream_default = rx_pkt_stream;
for (i = 0; i < MAX_INTERFACES; i++) { struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; @@@ -474,9 -544,14 +544,14 @@@ else ifobj->xdp_flags |= XDP_FLAGS_DRV_MODE;
- ifobj->bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY; + ifobj->bind_flags = XDP_USE_NEED_WAKEUP; + if (mode == TEST_MODE_ZC) + ifobj->bind_flags |= XDP_ZEROCOPY; + else + ifobj->bind_flags |= XDP_COPY; }
+ test->mode = mode; __test_spec_init(test, ifobj_tx, ifobj_rx); }
@@@ -524,16 -599,17 +599,17 @@@ static void pkt_stream_delete(struct pk static void pkt_stream_restore_default(struct test_spec *test) { struct pkt_stream *tx_pkt_stream = test->ifobj_tx->pkt_stream; + struct pkt_stream *rx_pkt_stream = test->ifobj_rx->pkt_stream;
- if (tx_pkt_stream != test->pkt_stream_default) { + if (tx_pkt_stream != test->tx_pkt_stream_default) { pkt_stream_delete(test->ifobj_tx->pkt_stream); - test->ifobj_tx->pkt_stream = test->pkt_stream_default; + test->ifobj_tx->pkt_stream = test->tx_pkt_stream_default; }
- if (test->ifobj_rx->pkt_stream != test->pkt_stream_default && - test->ifobj_rx->pkt_stream != tx_pkt_stream) + if (rx_pkt_stream != test->rx_pkt_stream_default) { pkt_stream_delete(test->ifobj_rx->pkt_stream); - test->ifobj_rx->pkt_stream = test->pkt_stream_default; + test->ifobj_rx->pkt_stream = test->rx_pkt_stream_default; + } }
static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) @@@ -556,7 -632,7 +632,7 @@@
static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len) { - pkt->addr = addr; + pkt->addr = addr + umem->base_addr; pkt->len = len; if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom) pkt->valid = false; @@@ -595,22 -671,29 +671,29 @@@ static void pkt_stream_replace(struct t
pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len); test->ifobj_tx->pkt_stream = pkt_stream; + pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, nb_pkts, pkt_len); test->ifobj_rx->pkt_stream = pkt_stream; }
- static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) + static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, + int offset) { - struct xsk_umem_info *umem = test->ifobj_tx->umem; + struct xsk_umem_info *umem = ifobj->umem; struct pkt_stream *pkt_stream; u32 i;
- pkt_stream = pkt_stream_clone(umem, test->pkt_stream_default); - for (i = 1; i < test->pkt_stream_default->nb_pkts; i += 2) + pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream); + for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size + offset, pkt_len);
- test->ifobj_tx->pkt_stream = pkt_stream; - test->ifobj_rx->pkt_stream = pkt_stream; + ifobj->pkt_stream = pkt_stream; + } + + static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, int offset) + { + __pkt_stream_replace_half(test->ifobj_tx, pkt_len, offset); + __pkt_stream_replace_half(test->ifobj_rx, pkt_len, offset); }
static void pkt_stream_receive_half(struct test_spec *test) @@@ -652,7 -735,8 +735,8 @@@ static struct pkt *pkt_generate(struct return pkt; }
- static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) + static void __pkt_stream_generate_custom(struct ifobject *ifobj, + struct pkt *pkts, u32 nb_pkts) { struct pkt_stream *pkt_stream; u32 i; @@@ -661,15 -745,20 +745,20 @@@ if (!pkt_stream) exit_with_error(ENOMEM);
- test->ifobj_tx->pkt_stream = pkt_stream; - test->ifobj_rx->pkt_stream = pkt_stream; - for (i = 0; i < nb_pkts; i++) { - pkt_stream->pkts[i].addr = pkts[i].addr; + pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr; pkt_stream->pkts[i].len = pkts[i].len; pkt_stream->pkts[i].payload = i; pkt_stream->pkts[i].valid = pkts[i].valid; } + + ifobj->pkt_stream = pkt_stream; + } + + static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) + { + __pkt_stream_generate_custom(test->ifobj_tx, pkts, nb_pkts); + __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts); }
static void pkt_dump(void *pkt, u32 len) @@@ -817,12 -906,13 +906,13 @@@ static int complete_pkts(struct xsk_soc return TEST_PASS; }
- static int receive_pkts(struct ifobject *ifobj, struct pollfd *fds) + static int receive_pkts(struct test_spec *test, struct pollfd *fds) { - struct timeval tv_end, tv_now, tv_timeout = {RECV_TMOUT, 0}; + struct timeval tv_end, tv_now, tv_timeout = {THREAD_TMOUT, 0}; + struct pkt_stream *pkt_stream = test->ifobj_rx->pkt_stream; u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkts_sent = 0; - struct pkt_stream *pkt_stream = ifobj->pkt_stream; - struct xsk_socket_info *xsk = ifobj->xsk; + struct xsk_socket_info *xsk = test->ifobj_rx->xsk; + struct ifobject *ifobj = test->ifobj_rx; struct xsk_umem_info *umem = xsk->umem; struct pkt *pkt; int ret; @@@ -843,17 -933,28 +933,28 @@@ }
kick_rx(xsk); + if (ifobj->use_poll) { + ret = poll(fds, 1, POLL_TMOUT); + if (ret < 0) + exit_with_error(-ret); + + if (!ret) { + if (!is_umem_valid(test->ifobj_tx)) + return TEST_PASS; + + ksft_print_msg("ERROR: [%s] Poll timed out\n", __func__); + return TEST_FAILURE;
- rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); - if (!rcvd) { - if (xsk_ring_prod__needs_wakeup(&umem->fq)) { - ret = poll(fds, 1, POLL_TMOUT); - if (ret < 0) - exit_with_error(-ret); } - continue; + + if (!(fds->revents & POLLIN)) + continue; }
+ rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); + if (!rcvd) + continue; + if (ifobj->use_fill_ring) { ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); while (ret != rcvd) { @@@ -900,13 -1001,35 +1001,35 @@@ return TEST_PASS; }
- static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb) + static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds, + bool timeout) { struct xsk_socket_info *xsk = ifobject->xsk; - u32 i, idx, valid_pkts = 0; + bool use_poll = ifobject->use_poll; + u32 i, idx = 0, ret, valid_pkts = 0; + + while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { + if (use_poll) { + ret = poll(fds, 1, POLL_TMOUT); + if (timeout) { + if (ret < 0) { + ksft_print_msg("ERROR: [%s] Poll error %d\n", + __func__, ret); + return TEST_FAILURE; + } + if (ret == 0) + return TEST_PASS; + break; + } + if (ret <= 0) { + ksft_print_msg("ERROR: [%s] Poll error %d\n", + __func__, ret); + return TEST_FAILURE; + } + }
- while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) complete_pkts(xsk, BATCH_SIZE); + }
for (i = 0; i < BATCH_SIZE; i++) { struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); @@@ -933,11 -1056,27 +1056,27 @@@
xsk_ring_prod__submit(&xsk->tx, i); xsk->outstanding_tx += valid_pkts; - if (complete_pkts(xsk, i)) - return TEST_FAILURE;
- usleep(10); - return TEST_PASS; + if (use_poll) { + ret = poll(fds, 1, POLL_TMOUT); + if (ret <= 0) { + if (ret == 0 && timeout) + return TEST_PASS; + + ksft_print_msg("ERROR: [%s] Poll error %d\n", __func__, ret); + return TEST_FAILURE; + } + } + + if (!timeout) { + if (complete_pkts(xsk, i)) + return TEST_FAILURE; + + usleep(10); + return TEST_PASS; + } + + return TEST_CONTINUE; }
static void wait_for_tx_completion(struct xsk_socket_info *xsk) @@@ -948,29 -1087,19 +1087,19 @@@
static int send_pkts(struct test_spec *test, struct ifobject *ifobject) { + bool timeout = !is_umem_valid(test->ifobj_rx); struct pollfd fds = { }; - u32 pkt_cnt = 0; + u32 pkt_cnt = 0, ret;
fds.fd = xsk_socket__fd(ifobject->xsk->xsk); fds.events = POLLOUT;
while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { - int err; - - if (ifobject->use_poll) { - int ret; - - ret = poll(&fds, 1, POLL_TMOUT); - if (ret <= 0) - continue; - - if (!(fds.revents & POLLOUT)) - continue; - } - - err = __send_pkts(ifobject, &pkt_cnt); - if (err || test->fail) + ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout); + if ((ret || test->fail) && !timeout) return TEST_FAILURE; + else if (ret == TEST_PASS && timeout) + return ret; }
wait_for_tx_completion(ifobject->xsk); @@@ -1081,6 -1210,70 +1210,70 @@@ static int validate_tx_invalid_descs(st return TEST_PASS; }
+ static void xsk_configure_socket(struct test_spec *test, struct ifobject *ifobject, + struct xsk_umem_info *umem, bool tx) + { + int i, ret; + + for (i = 0; i < test->nb_sockets; i++) { + bool shared = (ifobject->shared_umem && tx) ? true : !!i; + u32 ctr = 0; + + while (ctr++ < SOCK_RECONF_CTR) { + ret = __xsk_configure_socket(&ifobject->xsk_arr[i], umem, + ifobject, shared); + if (!ret) + break; + + /* Retry if it fails as xsk_socket__create() is asynchronous */ + if (ctr >= SOCK_RECONF_CTR) + exit_with_error(-ret); + usleep(USLEEP_MAX); + } + if (ifobject->busy_poll) + enable_busy_poll(&ifobject->xsk_arr[i]); + } + } + + static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobject) + { + xsk_configure_socket(test, ifobject, test->ifobj_rx->umem, true); + ifobject->xsk = &ifobject->xsk_arr[0]; + ifobject->xsk_map_fd = test->ifobj_rx->xsk_map_fd; + memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); + } + + static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) + { + u32 idx = 0, i, buffers_to_fill; + int ret; + + if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) + buffers_to_fill = umem->num_frames; + else + buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; + + ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); + if (ret != buffers_to_fill) + exit_with_error(ENOSPC); + for (i = 0; i < buffers_to_fill; i++) { + u64 addr; + + if (pkt_stream->use_addr_for_fill) { + struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); + + if (!pkt) + break; + addr = pkt->addr; + } else { + addr = i * umem->frame_size; + } + + *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; + } + xsk_ring_prod__submit(&umem->fq, buffers_to_fill); + } + static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) { u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; @@@ -1088,13 -1281,15 +1281,15 @@@ LIBBPF_OPTS(bpf_xdp_query_opts, opts); int ret, ifindex; void *bufs; - u32 i;
ifobject->ns_fd = switch_namespace(ifobject->nsname);
if (ifobject->umem->unaligned_mode) mmap_flags |= MAP_HUGETLB;
+ if (ifobject->shared_umem) + umem_sz *= 2; + bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); if (bufs == MAP_FAILED) exit_with_error(errno); @@@ -1103,24 -1298,9 +1298,9 @@@ if (ret) exit_with_error(-ret);
- for (i = 0; i < test->nb_sockets; i++) { - u32 ctr = 0; - - while (ctr++ < SOCK_RECONF_CTR) { - ret = xsk_configure_socket(&ifobject->xsk_arr[i], ifobject->umem, - ifobject, !!i); - if (!ret) - break; - - /* Retry if it fails as xsk_socket__create() is asynchronous */ - if (ctr >= SOCK_RECONF_CTR) - exit_with_error(-ret); - usleep(USLEEP_MAX); - } + xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream);
- if (ifobject->busy_poll) - enable_busy_poll(&ifobject->xsk_arr[i]); - } + xsk_configure_socket(test, ifobject, ifobject->umem, false);
ifobject->xsk = &ifobject->xsk_arr[0];
@@@ -1156,22 -1336,18 +1336,18 @@@ exit_with_error(-ret); }
- static void testapp_cleanup_xsk_res(struct ifobject *ifobj) - { - print_verbose("Destroying socket\n"); - xsk_socket__delete(ifobj->xsk->xsk); - munmap(ifobj->umem->buffer, ifobj->umem->num_frames * ifobj->umem->frame_size); - xsk_umem__delete(ifobj->umem->umem); - } - static void *worker_testapp_validate_tx(void *arg) { struct test_spec *test = (struct test_spec *)arg; struct ifobject *ifobject = test->ifobj_tx; int err;
- if (test->current_step == 1) - thread_common_ops(test, ifobject); + if (test->current_step == 1) { + if (!ifobject->shared_umem) + thread_common_ops(test, ifobject); + else + thread_common_ops_tx(test, ifobject); + }
print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, ifobject->ifname); @@@ -1182,60 -1358,30 +1358,30 @@@ if (err) report_failure(test);
- if (test->total_steps == test->current_step || err) - testapp_cleanup_xsk_res(ifobject); pthread_exit(NULL); }
- static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) - { - u32 idx = 0, i, buffers_to_fill; - int ret; - - if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) - buffers_to_fill = umem->num_frames; - else - buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; - - ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); - if (ret != buffers_to_fill) - exit_with_error(ENOSPC); - for (i = 0; i < buffers_to_fill; i++) { - u64 addr; - - if (pkt_stream->use_addr_for_fill) { - struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); - - if (!pkt) - break; - addr = pkt->addr; - } else { - addr = i * umem->frame_size; - } - - *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; - } - xsk_ring_prod__submit(&umem->fq, buffers_to_fill); - } - static void *worker_testapp_validate_rx(void *arg) { struct test_spec *test = (struct test_spec *)arg; struct ifobject *ifobject = test->ifobj_rx; struct pollfd fds = { }; + int id = 0; int err;
- if (test->current_step == 1) + if (test->current_step == 1) { thread_common_ops(test, ifobject); - - xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); + } else { + bpf_map_delete_elem(ifobject->xsk_map_fd, &id); + xsk_socket__update_xskmap(ifobject->xsk->xsk, ifobject->xsk_map_fd); + }
fds.fd = xsk_socket__fd(ifobject->xsk->xsk); fds.events = POLLIN;
pthread_barrier_wait(&barr);
- err = receive_pkts(ifobject, &fds); + err = receive_pkts(test, &fds);
if (!err && ifobject->validation_func) err = ifobject->validation_func(ifobject); @@@ -1246,11 -1392,66 +1392,66 @@@ pthread_mutex_unlock(&pacing_mutex); }
- if (test->total_steps == test->current_step || err) - testapp_cleanup_xsk_res(ifobject); pthread_exit(NULL); }
+ static void testapp_clean_xsk_umem(struct ifobject *ifobj) + { + u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; + + if (ifobj->shared_umem) + umem_sz *= 2; + + xsk_umem__delete(ifobj->umem->umem); + munmap(ifobj->umem->buffer, umem_sz); + } + + static void handler(int signum) + { + pthread_exit(NULL); + } + + static int testapp_validate_traffic_single_thread(struct test_spec *test, struct ifobject *ifobj, + enum test_type type) + { + bool old_shared_umem = ifobj->shared_umem; + pthread_t t0; + + if (pthread_barrier_init(&barr, NULL, 2)) + exit_with_error(errno); + + test->current_step++; + if (type == TEST_TYPE_POLL_RXQ_TMOUT) + pkt_stream_reset(ifobj->pkt_stream); + pkts_in_flight = 0; + + test->ifobj_rx->shared_umem = false; + test->ifobj_tx->shared_umem = false; + + signal(SIGUSR1, handler); + /* Spawn thread */ + pthread_create(&t0, NULL, ifobj->func_ptr, test); + + if (type != TEST_TYPE_POLL_TXQ_TMOUT) + pthread_barrier_wait(&barr); + + if (pthread_barrier_destroy(&barr)) + exit_with_error(errno); + + pthread_kill(t0, SIGUSR1); + pthread_join(t0, NULL); + + if (test->total_steps == test->current_step || test->fail) { + xsk_socket__delete(ifobj->xsk->xsk); + testapp_clean_xsk_umem(ifobj); + } + + test->ifobj_rx->shared_umem = old_shared_umem; + test->ifobj_tx->shared_umem = old_shared_umem; + + return !!test->fail; + } + static int testapp_validate_traffic(struct test_spec *test) { struct ifobject *ifobj_tx = test->ifobj_tx; @@@ -1277,6 -1478,14 +1478,14 @@@ pthread_join(t1, NULL); pthread_join(t0, NULL);
+ if (test->total_steps == test->current_step || test->fail) { + xsk_socket__delete(ifobj_tx->xsk->xsk); + xsk_socket__delete(ifobj_rx->xsk->xsk); + testapp_clean_xsk_umem(ifobj_rx); + if (!ifobj_tx->shared_umem) + testapp_clean_xsk_umem(ifobj_tx); + } + return !!test->fail; }
@@@ -1356,9 -1565,9 +1565,9 @@@ static void testapp_headroom(struct tes static void testapp_stats_rx_dropped(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_DROPPED"); + pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; - pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); pkt_stream_receive_half(test); test->ifobj_rx->validation_func = validate_rx_dropped; testapp_validate_traffic(test); @@@ -1481,6 -1690,11 +1690,11 @@@ static void testapp_invalid_desc(struc pkts[7].valid = false; }
+ if (test->ifobj_tx->shared_umem) { + pkts[4].addr += UMEM_SIZE; + pkts[5].addr += UMEM_SIZE; + } + pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); testapp_validate_traffic(test); pkt_stream_restore_default(test); @@@ -1511,6 -1725,10 +1725,10 @@@ static void run_pkt_test(struct test_sp { switch (type) { case TEST_TYPE_STATS_RX_DROPPED: + if (mode == TEST_MODE_ZC) { + ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); + return; + } testapp_stats_rx_dropped(test); break; case TEST_TYPE_STATS_TX_INVALID_DESCS: @@@ -1548,12 -1766,30 +1766,30 @@@
pkt_stream_restore_default(test); break; - case TEST_TYPE_POLL: - test->ifobj_tx->use_poll = true; + case TEST_TYPE_RX_POLL: test->ifobj_rx->use_poll = true; - test_spec_set_name(test, "POLL"); + test_spec_set_name(test, "POLL_RX"); + testapp_validate_traffic(test); + break; + case TEST_TYPE_TX_POLL: + test->ifobj_tx->use_poll = true; + test_spec_set_name(test, "POLL_TX"); testapp_validate_traffic(test); break; + case TEST_TYPE_POLL_TXQ_TMOUT: + test_spec_set_name(test, "POLL_TXQ_FULL"); + test->ifobj_tx->use_poll = true; + /* create invalid frame by set umem frame_size and pkt length equal to 2048 */ + test->ifobj_tx->umem->frame_size = 2048; + pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048); + testapp_validate_traffic_single_thread(test, test->ifobj_tx, type); + pkt_stream_restore_default(test); + break; + case TEST_TYPE_POLL_RXQ_TMOUT: + test_spec_set_name(test, "POLL_RXQ_EMPTY"); + test->ifobj_rx->use_poll = true; + testapp_validate_traffic_single_thread(test, test->ifobj_rx, type); + break; case TEST_TYPE_ALIGNED_INV_DESC: test_spec_set_name(test, "ALIGNED_INV_DESC"); testapp_invalid_desc(test); @@@ -1606,8 -1842,6 +1842,8 @@@ static struct ifobject *ifobject_create if (!ifobj->umem) goto out_umem;
+ ifobj->ns_fd = -1; + return ifobj;
out_umem: @@@ -1619,19 -1853,49 +1855,51 @@@ out_xsk_arr
static void ifobject_delete(struct ifobject *ifobj) { + if (ifobj->ns_fd != -1) + close(ifobj->ns_fd); free(ifobj->umem); free(ifobj->xsk_arr); free(ifobj); }
+ static bool is_xdp_supported(struct ifobject *ifobject) + { + int flags = XDP_FLAGS_DRV_MODE; + + LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = flags); + struct bpf_insn insns[2] = { + BPF_MOV64_IMM(BPF_REG_0, XDP_PASS), + BPF_EXIT_INSN() + }; + int ifindex = if_nametoindex(ifobject->ifname); + int prog_fd, insn_cnt = ARRAY_SIZE(insns); + int err; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL); + if (prog_fd < 0) + return false; + + err = bpf_xdp_attach(ifindex, prog_fd, flags, NULL); + if (err) { + close(prog_fd); + return false; + } + + bpf_xdp_detach(ifindex, flags, NULL); + close(prog_fd); + + return true; + } + int main(int argc, char **argv) { - struct pkt_stream *pkt_stream_default; + struct pkt_stream *rx_pkt_stream_default; + struct pkt_stream *tx_pkt_stream_default; struct ifobject *ifobj_tx, *ifobj_rx; + int modes = TEST_MODE_SKB + 1; u32 i, j, failed_tests = 0; struct test_spec test; + bool shared_umem;
/* Use libbpf 1.0 API mode */ libbpf_set_strict_mode(LIBBPF_STRICT_ALL); @@@ -1646,6 -1910,10 +1914,10 @@@ setlocale(LC_ALL, "");
parse_command_line(ifobj_tx, ifobj_rx, argc, argv); + shared_umem = !strcmp(ifobj_tx->ifname, ifobj_rx->ifname); + + ifobj_tx->shared_umem = shared_umem; + ifobj_rx->shared_umem = shared_umem;
if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { usage(basename(argv[0])); @@@ -1657,15 -1925,23 +1929,23 @@@ init_iface(ifobj_rx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, worker_testapp_validate_rx);
+ if (is_xdp_supported(ifobj_tx)) { + modes++; + if (ifobj_zc_avail(ifobj_tx)) + modes++; + } + test_spec_init(&test, ifobj_tx, ifobj_rx, 0); - pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); - if (!pkt_stream_default) + tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); + rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE); + if (!tx_pkt_stream_default || !rx_pkt_stream_default) exit_with_error(ENOMEM); - test.pkt_stream_default = pkt_stream_default; + test.tx_pkt_stream_default = tx_pkt_stream_default; + test.rx_pkt_stream_default = rx_pkt_stream_default;
- ksft_set_plan(TEST_MODE_MAX * TEST_TYPE_MAX); + ksft_set_plan(modes * TEST_TYPE_MAX);
- for (i = 0; i < TEST_MODE_MAX; i++) + for (i = 0; i < modes; i++) for (j = 0; j < TEST_TYPE_MAX; j++) { test_spec_init(&test, ifobj_tx, ifobj_rx, i); run_pkt_test(&test, i, j); @@@ -1675,7 -1951,11 +1955,11 @@@ failed_tests++; }
- pkt_stream_delete(pkt_stream_default); + pkt_stream_delete(tx_pkt_stream_default); + pkt_stream_delete(rx_pkt_stream_default); + free(ifobj_rx->umem); + if (!ifobj_tx->shared_umem) + free(ifobj_tx->umem); ifobject_delete(ifobj_tx); ifobject_delete(ifobj_rx);
diff --combined tools/testing/selftests/drivers/net/bonding/Makefile index 0f9659407969,d209f7a98b6c..84bf31a8a608 --- a/tools/testing/selftests/drivers/net/bonding/Makefile +++ b/tools/testing/selftests/drivers/net/bonding/Makefile @@@ -1,9 -1,7 +1,10 @@@ # SPDX-License-Identifier: GPL-2.0 # Makefile for net selftests
-TEST_PROGS := bond-break-lacpdu-tx.sh +TEST_PROGS := bond-break-lacpdu-tx.sh \ + dev_addr_lists.sh + TEST_PROGS += bond-lladdr-target.sh
+TEST_FILES := lag_lib.sh + include ../../../lib.mk