The following commit has been merged in the master branch: commit fb298ac4ac03078dfedf9987dc4e1f20595f4d75 Merge: bcbc82dd30c6917a6ce9a5bc6d4f7283c7a09911 53f6f391786e01bf2050c03d8a36d9defdcc2831 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Tue Aug 13 11:53:32 2019 +1000
Merge remote-tracking branch 'net-next/master'
# Conflicts: # drivers/net/ethernet/mellanox/mlx5/core/en_tc.c # net/rxrpc/call_object.c
diff --combined MAINTAINERS index 63b727d0cbe7,e352550a6895..893a4c90e7ab --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -517,6 -517,14 +517,6 @@@ W: http://ez.analog.com/community/linux S: Supported F: drivers/video/backlight/adp8860_bl.c
-ADS1015 HARDWARE MONITOR DRIVER -M: Dirk Eibach eibach@gdsys.de -L: linux-hwmon@vger.kernel.org -S: Maintained -F: Documentation/hwmon/ads1015.rst -F: drivers/hwmon/ads1015.c -F: include/linux/platform_data/ads1015.h - ADT746X FAN DRIVER M: Colin Leroy colin@colino.net S: Maintained @@@ -658,7 -666,7 +658,7 @@@ ALI1563 I2C DRIVE M: Rudolf Marek r.marek@assembler.cz L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/i2c/busses/i2c-ali1563 +F: Documentation/i2c/busses/i2c-ali1563.rst F: drivers/i2c/busses/i2c-ali1563.c
ALLEGRO DVT VIDEO IP CORE DRIVER @@@ -668,13 -676,6 +668,13 @@@ L: linux-media@vger.kernel.or S: Maintained F: drivers/staging/media/allegro-dvt/
+ALLWINNER CPUFREQ DRIVER +M: Yangtao Li tiny.windzz@gmail.com +L: linux-pm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt +F: drivers/cpufreq/sun50i-cpufreq-nvmem.c + ALLWINNER SECURITY SYSTEM M: Corentin Labbe clabbe.montjoie@gmail.com L: linux-crypto@vger.kernel.org @@@ -682,7 -683,7 +682,7 @@@ S: Maintaine F: drivers/crypto/sunxi-ss/
ALLWINNER VPU DRIVER -M: Maxime Ripard maxime.ripard@bootlin.com +M: Maxime Ripard mripard@kernel.org M: Paul Kocialkowski paul.kocialkowski@bootlin.com L: linux-media@vger.kernel.org S: Maintained @@@ -1407,7 -1408,7 +1407,7 @@@ S: Maintaine F: drivers/clk/sunxi/
ARM/Allwinner sunXi SoC support -M: Maxime Ripard maxime.ripard@bootlin.com +M: Maxime Ripard mripard@kernel.org M: Chen-Yu Tsai wens@csie.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@@ -1581,8 -1582,8 +1581,8 @@@ R: Suzuki K Poulose <suzuki.poulose@arm L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/hwtracing/coresight/* -F: Documentation/trace/coresight.txt -F: Documentation/trace/coresight-cpu-debug.txt +F: Documentation/trace/coresight.rst +F: Documentation/trace/coresight-cpu-debug.rst F: Documentation/devicetree/bindings/arm/coresight.txt F: Documentation/devicetree/bindings/arm/coresight-cpu-debug.txt F: Documentation/ABI/testing/sysfs-bus-coresight-devices-* @@@ -3576,7 -3577,7 +3576,7 @@@ F: Documentation/filesystems/caching/ca F: fs/cachefiles/
CADENCE MIPI-CSI2 BRIDGES -M: Maxime Ripard maxime.ripard@bootlin.com +M: Maxime Ripard mripard@kernel.org L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/cdns,*.txt @@@ -3634,9 -3635,12 +3634,12 @@@ S: Maintaine F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h + F: include/linux/can/led.h + F: include/linux/can/rx-offload.h F: include/linux/can/platform/ F: include/uapi/linux/can/error.h F: include/uapi/linux/can/netlink.h + F: include/uapi/linux/can/vxcan.h
CAN NETWORK LAYER M: Oliver Hartkopp socketcan@hartkopp.net @@@ -3649,6 -3653,8 +3652,8 @@@ S: Maintaine F: Documentation/networking/can.rst F: net/can/ F: include/linux/can/core.h + F: include/linux/can/skb.h + F: include/net/netns/can.h F: include/uapi/linux/can.h F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h @@@ -4102,7 -4108,7 +4107,7 @@@ L: samba-technical@lists.samba.org (mod W: http://linux-cifs.samba.org/ T: git git://git.samba.org/sfrench/cifs-2.6.git S: Supported -F: Documentation/filesystems/cifs/ +F: Documentation/admin-guide/cifs/ F: fs/cifs/
COMPACTPCI HOTPLUG CORE @@@ -4289,14 -4295,6 +4294,14 @@@ S: Supporte F: drivers/cpuidle/cpuidle-exynos.c F: arch/arm/mach-exynos/pm.c
+CPUIDLE DRIVER - ARM PSCI +M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com +M: Sudeep Holla sudeep.holla@arm.com +L: linux-pm@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +S: Supported +F: drivers/cpuidle/cpuidle-psci.c + CPU IDLE TIME MANAGEMENT FRAMEWORK M: "Rafael J. Wysocki" rjw@rjwysocki.net M: Daniel Lezcano daniel.lezcano@linaro.org @@@ -4958,9 -4956,7 +4963,9 @@@ M: Jonathan Corbet <corbet@lwn.net L: linux-doc@vger.kernel.org S: Maintained F: Documentation/ +F: scripts/documentation-file-ref-check F: scripts/kernel-doc +F: scripts/sphinx-pre-install X: Documentation/ABI/ X: Documentation/firmware-guide/acpi/ X: Documentation/devicetree/ @@@ -4976,14 -4972,6 +4981,14 @@@ L: linux-doc@vger.kernel.or S: Maintained F: Documentation/translations/it_IT
+DOCUMENTATION SCRIPTS +M: Mauro Carvalho Chehab mchehab@kernel.org +L: linux-doc@vger.kernel.org +S: Maintained +F: scripts/documentation-file-ref-check +F: scripts/sphinx-pre-install +F: Documentation/sphinx/parse-headers.pl + DONGWOON DW9714 LENS VOICE COIL DRIVER M: Sakari Ailus sakari.ailus@linux.intel.com L: linux-media@vger.kernel.org @@@ -5312,7 -5300,7 +5317,7 @@@ F: include/linux/vga
DRM DRIVERS AND MISC GPU PATCHES M: Maarten Lankhorst maarten.lankhorst@linux.intel.com -M: Maxime Ripard maxime.ripard@bootlin.com +M: Maxime Ripard mripard@kernel.org M: Sean Paul sean@poorly.run W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html S: Maintained @@@ -5325,7 -5313,7 +5330,7 @@@ F: include/uapi/drm/drm F: include/linux/vga*
DRM DRIVERS FOR ALLWINNER A10 -M: Maxime Ripard maxime.ripard@bootlin.com +M: Maxime Ripard mripard@kernel.org L: dri-devel@lists.freedesktop.org S: Supported F: drivers/gpu/drm/sun4i/ @@@ -6082,7 -6070,7 +6087,7 @@@ M: Florian Fainelli <f.fainelli@gmail.c M: Heiner Kallweit hkallweit1@gmail.com L: netdev@vger.kernel.org S: Maintained -F: Documentation/ABI/testing/sysfs-bus-mdio +F: Documentation/ABI/testing/sysfs-class-net-phydev F: Documentation/devicetree/bindings/net/ethernet-phy.yaml F: Documentation/devicetree/bindings/net/mdio* F: Documentation/networking/phy.rst @@@ -6338,7 -6326,7 +6343,7 @@@ FLEXTIMER FTM-QUADDEC DRIVE M: Patrick Havelange patrick.havelange@essensium.com L: linux-iio@vger.kernel.org S: Maintained -F: Documentation/ABI/testing/sysfs-bus-counter-ftm-quadddec +F: Documentation/ABI/testing/sysfs-bus-counter-ftm-quaddec F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt F: drivers/counter/ftm-quaddec.c
@@@ -6361,7 -6349,7 +6366,7 @@@ FPGA MANAGER FRAMEWOR M: Moritz Fischer mdf@kernel.org L: linux-fpga@vger.kernel.org S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mdf/linux-fpga.git Q: http://patchwork.kernel.org/project/linux-fpga/list/ F: Documentation/fpga/ F: Documentation/driver-api/fpga/ @@@ -6394,7 -6382,7 +6399,7 @@@ FRAMEBUFFER LAYE M: Bartlomiej Zolnierkiewicz b.zolnierkie@samsung.com L: dri-devel@lists.freedesktop.org L: linux-fbdev@vger.kernel.org -T: git git://github.com/bzolnier/linux.git +T: git git://anongit.freedesktop.org/drm/drm-misc Q: http://patchwork.kernel.org/project/linux-fbdev/list/ S: Maintained F: Documentation/fb/ @@@ -6651,18 -6639,6 +6656,18 @@@ S: Maintaine F: fs/notify/ F: include/linux/fsnotify*.h
+FSVERITY: READ-ONLY FILE-BASED AUTHENTICITY PROTECTION +M: Eric Biggers ebiggers@kernel.org +M: Theodore Y. Ts'o tytso@mit.edu +L: linux-fscrypt@vger.kernel.org +Q: https://patchwork.kernel.org/project/linux-fscrypt/list/ +T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git fsverity +S: Supported +F: fs/verity/ +F: include/linux/fsverity.h +F: include/uapi/linux/fsverity.h +F: Documentation/filesystems/fsverity.rst + FUJITSU LAPTOP EXTRAS M: Jonathan Woithe jwoithe@just42.net L: platform-driver-x86@vger.kernel.org @@@ -6753,13 -6729,6 +6758,13 @@@ W: https://linuxtv.or S: Maintained F: drivers/media/radio/radio-gemtek*
+GENERIC ARCHITECTURE TOPOLOGY +M: Sudeep Holla sudeep.holla@arm.com +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/base/arch_topology.c +F: include/linux/arch_topology.h + GENERIC GPIO I2C DRIVER M: Wolfram Sang wsa+renesas@sang-engineering.com S: Supported @@@ -6772,7 -6741,7 +6777,7 @@@ L: linux-i2c@vger.kernel.or S: Supported F: drivers/i2c/muxes/i2c-mux-gpio.c F: include/linux/platform_data/i2c-mux-gpio.h -F: Documentation/i2c/muxes/i2c-mux-gpio +F: Documentation/i2c/muxes/i2c-mux-gpio.rst
GENERIC HDLC (WAN) DRIVERS M: Krzysztof Halasa khc@pm.waw.pl @@@ -7521,14 -7490,14 +7526,14 @@@ I2C CONTROLLER DRIVER FOR NVIDIA GP M: Ajay Gupta ajayg@nvidia.com L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/i2c/busses/i2c-nvidia-gpu +F: Documentation/i2c/busses/i2c-nvidia-gpu.rst F: drivers/i2c/busses/i2c-nvidia-gpu.c
I2C MUXES M: Peter Rosin peda@axentia.se L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/i2c/i2c-topology +F: Documentation/i2c/i2c-topology.rst F: Documentation/i2c/muxes/ F: Documentation/devicetree/bindings/i2c/i2c-mux* F: Documentation/devicetree/bindings/i2c/i2c-arb* @@@ -7541,15 -7510,15 +7546,15 @@@ I2C MV64XXX MARVELL AND ALLWINNER DRIVE M: Gregory CLEMENT gregory.clement@bootlin.com L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt +F: Documentation/devicetree/bindings/i2c/marvell,mv64xxx-i2c.yaml F: drivers/i2c/busses/i2c-mv64xxx.c
I2C OVER PARALLEL PORT M: Jean Delvare jdelvare@suse.com L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/i2c/busses/i2c-parport -F: Documentation/i2c/busses/i2c-parport-light +F: Documentation/i2c/busses/i2c-parport.rst +F: Documentation/i2c/busses/i2c-parport-light.rst F: drivers/i2c/busses/i2c-parport.c F: drivers/i2c/busses/i2c-parport-light.c
@@@ -7583,7 -7552,7 +7588,7 @@@ I2C-TAOS-EVM DRIVE M: Jean Delvare jdelvare@suse.com L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/i2c/busses/i2c-taos-evm +F: Documentation/i2c/busses/i2c-taos-evm.rst F: drivers/i2c/busses/i2c-taos-evm.c
I2C-TINY-USB DRIVER @@@ -7597,19 -7566,19 +7602,19 @@@ I2C/SMBUS CONTROLLER DRIVERS FOR P M: Jean Delvare jdelvare@suse.com L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/i2c/busses/i2c-ali1535 -F: Documentation/i2c/busses/i2c-ali1563 -F: Documentation/i2c/busses/i2c-ali15x3 -F: Documentation/i2c/busses/i2c-amd756 -F: Documentation/i2c/busses/i2c-amd8111 -F: Documentation/i2c/busses/i2c-i801 -F: Documentation/i2c/busses/i2c-nforce2 -F: Documentation/i2c/busses/i2c-piix4 -F: Documentation/i2c/busses/i2c-sis5595 -F: Documentation/i2c/busses/i2c-sis630 -F: Documentation/i2c/busses/i2c-sis96x -F: Documentation/i2c/busses/i2c-via -F: Documentation/i2c/busses/i2c-viapro +F: Documentation/i2c/busses/i2c-ali1535.rst +F: Documentation/i2c/busses/i2c-ali1563.rst +F: Documentation/i2c/busses/i2c-ali15x3.rst +F: Documentation/i2c/busses/i2c-amd756.rst +F: Documentation/i2c/busses/i2c-amd8111.rst +F: Documentation/i2c/busses/i2c-i801.rst +F: Documentation/i2c/busses/i2c-nforce2.rst +F: Documentation/i2c/busses/i2c-piix4.rst +F: Documentation/i2c/busses/i2c-sis5595.rst +F: Documentation/i2c/busses/i2c-sis630.rst +F: Documentation/i2c/busses/i2c-sis96x.rst +F: Documentation/i2c/busses/i2c-via.rst +F: Documentation/i2c/busses/i2c-viapro.rst F: drivers/i2c/busses/i2c-ali1535.c F: drivers/i2c/busses/i2c-ali1563.c F: drivers/i2c/busses/i2c-ali15x3.c @@@ -7638,7 -7607,7 +7643,7 @@@ M: Seth Heasley <seth.heasley@intel.com M: Neil Horman nhorman@tuxdriver.com L: linux-i2c@vger.kernel.org F: drivers/i2c/busses/i2c-ismt.c -F: Documentation/i2c/busses/i2c-ismt +F: Documentation/i2c/busses/i2c-ismt.rst
I2C/SMBUS STUB DRIVER M: Jean Delvare jdelvare@suse.com @@@ -8078,7 -8047,6 +8083,7 @@@ S: Maintaine F: drivers/video/fbdev/i810/
INTEL ASoC DRIVERS +M: Cezary Rojewski cezary.rojewski@intel.com M: Pierre-Louis Bossart pierre-louis.bossart@linux.intel.com M: Liam Girdwood liam.r.girdwood@linux.intel.com M: Jie Yang yang.jie@linux.intel.com @@@ -8381,7 -8349,7 +8386,7 @@@ M: linux-wimax@intel.co L: wimax@linuxwimax.org (subscribers-only) S: Supported W: http://linuxwimax.org -F: Documentation/wimax/README.i2400m +F: Documentation/admin-guide/wimax/i2400m.rst F: drivers/net/wimax/i2400m/ F: include/uapi/linux/wimax/i2400m.h
@@@ -8684,7 -8652,7 +8689,7 @@@ L: jfs-discussion@lists.sourceforge.ne W: http://jfs.sourceforge.net/ T: git git://github.com/kleikamp/linux-shaggy.git S: Maintained -F: Documentation/filesystems/jfs.txt +F: Documentation/admin-guide/jfs.rst F: fs/jfs/
JME NETWORK DRIVER @@@ -9026,7 -8994,7 +9031,7 @@@ F: kernel/kprobes. KS0108 LCD CONTROLLER DRIVER M: Miguel Ojeda Sandonis miguel.ojeda.sandonis@gmail.com S: Maintained -F: Documentation/auxdisplay/ks0108 +F: Documentation/admin-guide/auxdisplay/ks0108.rst F: drivers/auxdisplay/ks0108.c F: include/linux/ks0108.h
@@@ -9603,7 -9571,7 +9608,7 @@@ F: Documentation/networking/mac80211-in F: include/net/mac80211.h F: net/mac80211/ F: drivers/net/wireless/mac80211_hwsim.[ch] -F: Documentation/networking/mac80211_hwsim/README +F: Documentation/networking/mac80211_hwsim/mac80211_hwsim.rst
MAILBOX API M: Jassi Brar jassisinghbrar@gmail.com @@@ -10052,8 -10020,8 +10057,8 @@@ L: linux-media@vger.kernel.or L: linux-renesas-soc@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Supported -F: Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt -F: Documentation/devicetree/bindings/media/rcar_vin.txt +F: Documentation/devicetree/bindings/media/renesas,csi2.txt +F: Documentation/devicetree/bindings/media/renesas,vin.txt F: drivers/media/platform/rcar-vin/
MEDIA DRIVERS FOR RENESAS - VSP1 @@@ -10380,7 -10348,7 +10385,7 @@@ L: linux-i2c@vger.kernel.or S: Supported F: drivers/i2c/busses/i2c-mlxcpld.c F: drivers/i2c/muxes/i2c-mux-mlxcpld.c -F: Documentation/i2c/busses/i2c-mlxcpld +F: Documentation/i2c/busses/i2c-mlxcpld.rst
MELLANOX MLXCPLD LED DRIVER M: Vadim Pasternak vadimp@mellanox.com @@@ -11359,7 -11327,6 +11364,6 @@@ F: include/net/nfc F: include/uapi/linux/nfc.h F: drivers/nfc/ F: include/linux/platform_data/nfcmrvl.h - F: include/linux/platform_data/nxp-nci.h F: Documentation/devicetree/bindings/net/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS @@@ -12003,7 -11970,7 +12007,7 @@@ M: Andrew Lunn <andrew@lunn.ch L: linux-i2c@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/i2c/i2c-ocores.txt -F: Documentation/i2c/busses/i2c-ocores +F: Documentation/i2c/busses/i2c-ocores.rst F: drivers/i2c/busses/i2c-ocores.c F: include/linux/platform_data/i2c-ocores.h
@@@ -12126,7 -12093,7 +12130,7 @@@ L: netdev@vger.kernel.or S: Supported F: lib/packing.c F: include/linux/packing.h -F: Documentation/packing.txt +F: Documentation/core-api/packing.rst
PADATA PARALLEL EXECUTION MECHANISM M: Steffen Klassert steffen.klassert@secunet.com @@@ -13254,7 -13221,7 +13258,7 @@@ M: Manish Chopra <manishc@marvell.com M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported - F: drivers/net/ethernet/qlogic/qlge/ + F: drivers/staging/qlge/
QM1D1B0004 MEDIA DRIVER M: Akihiro Tsukada tskd08@gmail.com @@@ -13322,8 -13289,8 +13326,8 @@@ QUALCOMM CPUFREQ DRIVER MSM8996/APQ809 M: Ilia Lin ilia.lin@kernel.org L: linux-pm@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt -F: drivers/cpufreq/qcom-cpufreq-kryo.c +F: Documentation/devicetree/bindings/opp/qcom-nvmem-cpufreq.txt +F: drivers/cpufreq/qcom-cpufreq-nvmem.c
QUALCOMM EMAC GIGABIT ETHERNET DRIVER M: Timur Tabi timur@kernel.org @@@ -13688,14 -13655,14 +13692,14 @@@ RENESAS R-CAR I2C DRIVER M: Wolfram Sang wsa+renesas@sang-engineering.com S: Supported F: Documentation/devicetree/bindings/i2c/i2c-rcar.txt -F: Documentation/devicetree/bindings/i2c/i2c-sh_mobile.txt +F: Documentation/devicetree/bindings/i2c/renesas,iic.txt F: drivers/i2c/busses/i2c-rcar.c F: drivers/i2c/busses/i2c-sh_mobile.c
RENESAS RIIC DRIVER M: Chris Brandt chris.brandt@renesas.com S: Supported -F: Documentation/devicetree/bindings/i2c/i2c-riic.txt +F: Documentation/devicetree/bindings/i2c/renesas,riic.txt F: drivers/i2c/busses/i2c-riic.c
RENESAS USB PHY DRIVER @@@ -14313,7 -14280,7 +14317,7 @@@ F: net/sctp SCx200 CPU SUPPORT M: Jim Cromie jim.cromie@gmail.com S: Odd Fixes -F: Documentation/i2c/busses/scx200_acb +F: Documentation/i2c/busses/scx200_acb.rst F: arch/x86/platform/scx200/ F: drivers/watchdog/scx200_wdt.c F: drivers/i2c/busses/scx200* @@@ -15588,7 -15555,6 +15592,7 @@@ F: drivers/clk/clk-sc[mp]i. F: drivers/cpufreq/sc[mp]i-cpufreq.c F: drivers/firmware/arm_scpi.c F: drivers/firmware/arm_scmi/ +F: drivers/reset/reset-scmi.c F: include/linux/sc[mp]i_protocol.h
SYSTEM RESET/SHUTDOWN DRIVERS @@@ -15950,7 -15916,7 +15954,7 @@@ M: Viresh Kumar <viresh.kumar@linaro.or M: Javi Merino javi.merino@kernel.org L: linux-pm@vger.kernel.org S: Supported -F: Documentation/thermal/cpu-cooling-api.rst +F: Documentation/driver-api/thermal/cpu-cooling-api.rst F: drivers/thermal/cpu_cooling.c F: include/linux/cpu_cooling.h
@@@ -16122,7 -16088,7 +16126,7 @@@ S: Maintaine F: drivers/net/ethernet/ti/netcp*
TI PCM3060 ASoC CODEC DRIVER -M: Kirill Marinushkin kmarinushkin@birdec.tech +M: Kirill Marinushkin kmarinushkin@birdec.com L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/sound/pcm3060.txt @@@ -16473,7 -16439,7 +16477,7 @@@ F: drivers/hid/hid-udraw-ps3. UFS FILESYSTEM M: Evgeniy Dushistov dushistov@mail.ru S: Maintained -F: Documentation/filesystems/ufs.txt +F: Documentation/admin-guide/ufs.rst F: fs/ufs/
UHID USERSPACE HID IO DRIVER: @@@ -17391,7 -17357,7 +17395,7 @@@ M: linux-wimax@intel.co L: wimax@linuxwimax.org (subscribers-only) S: Supported W: http://linuxwimax.org -F: Documentation/wimax/README.wimax +F: Documentation/admin-guide/wimax/wimax.rst F: include/linux/wimax/debug.h F: include/net/wimax.h F: include/uapi/linux/wimax.h diff --combined arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 20d7e7db5dcb,de71153fda00..d34c867b49ba --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@@ -29,7 -29,6 +29,7 @@@ clocks = <&clockgen 1 0>; next-level-cache = <&l2>; cpu-idle-states = <&CPU_PW20>; + #cooling-cells = <2>; };
cpu1: cpu@1 { @@@ -40,7 -39,6 +40,7 @@@ clocks = <&clockgen 1 0>; next-level-cache = <&l2>; cpu-idle-states = <&CPU_PW20>; + #cooling-cells = <2>; };
l2: l2-cache { @@@ -505,89 -503,6 +505,89 @@@ status = "disabled"; };
+ tmu: tmu@1f00000 { + compatible = "fsl,qoriq-tmu"; + reg = <0x0 0x1f80000 0x0 0x10000>; + interrupts = <0 23 0x4>; + fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>; + fsl,tmu-calibration = <0x00000000 0x00000024 + 0x00000001 0x0000002b + 0x00000002 0x00000031 + 0x00000003 0x00000038 + 0x00000004 0x0000003f + 0x00000005 0x00000045 + 0x00000006 0x0000004c + 0x00000007 0x00000053 + 0x00000008 0x00000059 + 0x00000009 0x00000060 + 0x0000000a 0x00000066 + 0x0000000b 0x0000006d + + 0x00010000 0x0000001c + 0x00010001 0x00000024 + 0x00010002 0x0000002c + 0x00010003 0x00000035 + 0x00010004 0x0000003d + 0x00010005 0x00000045 + 0x00010006 0x0000004d + 0x00010007 0x00000045 + 0x00010008 0x0000005e + 0x00010009 0x00000066 + 0x0001000a 0x0000006e + + 0x00020000 0x00000018 + 0x00020001 0x00000022 + 0x00020002 0x0000002d + 0x00020003 0x00000038 + 0x00020004 0x00000043 + 0x00020005 0x0000004d + 0x00020006 0x00000058 + 0x00020007 0x00000063 + 0x00020008 0x0000006e + + 0x00030000 0x00000010 + 0x00030001 0x0000001c + 0x00030002 0x00000029 + 0x00030003 0x00000036 + 0x00030004 0x00000042 + 0x00030005 0x0000004f + 0x00030006 0x0000005b + 0x00030007 0x00000068>; + little-endian; + #thermal-sensor-cells = <1>; + }; + + thermal-zones { + core-cluster { + polling-delay-passive = <1000>; + polling-delay = <5000>; + thermal-sensors = <&tmu 0>; + + trips { + core_cluster_alert: core-cluster-alert { + temperature = <85000>; + hysteresis = <2000>; + type = "passive"; + }; + + core_cluster_crit: core-cluster-crit { + temperature = <95000>; + hysteresis = <2000>; + type = "critical"; + }; + }; + + cooling-maps { + map0 { + trip = <&core_cluster_alert>; + cooling-device = + <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, + <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; + }; + }; + }; + }; + pcie@1f0000000 { /* Integrated Endpoint Root Complex */ compatible = "pci-host-ecam-generic"; reg = <0x01 0xf0000000 0x0 0x100000>; @@@ -621,6 -536,12 +621,12 @@@ compatible = "fsl,enetc"; reg = <0x000100 0 0 0 0>; }; + enetc_mdio_pf3: mdio@0,3 { + compatible = "fsl,enetc-mdio"; + reg = <0x000300 0 0 0 0>; + #address-cells = <1>; + #size-cells = <0>; + }; ethernet@0,4 { compatible = "fsl,enetc-ptp"; reg = <0x000400 0 0 0 0>; @@@ -639,7 -560,6 +645,7 @@@ clocks = <&dpclk>, <&aclk>, <&aclk>, <&pclk>; clock-names = "pxlclk", "mclk", "aclk", "pclk"; arm,malidp-output-port-lines = /bits/ 8 <8 8 8>; + arm,malidp-arqos-value = <0xd000d000>;
port { dp0_out: endpoint { diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7882148abb43,dc7b128c780e..17b7ae9f46ec --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -1785,7 -1785,7 +1785,7 @@@ static bool ixgbe_is_non_eop(struct ixg static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned char *va; unsigned int pull_len;
@@@ -1807,7 -1807,7 +1807,7 @@@
/* update all of the pointers */ skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; + skb_frag_off_add(frag, pull_len); skb->data_len -= pull_len; skb->tail += pull_len; } @@@ -1840,11 -1840,11 +1840,11 @@@ static void ixgbe_dma_sync_frag(struct skb_headlen(skb), DMA_FROM_DEVICE); } else { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); } @@@ -7897,8 -7897,11 +7897,8 @@@ static void ixgbe_service_task(struct w return; } if (ixgbe_check_fw_error(adapter)) { - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { - rtnl_lock(); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) unregister_netdev(adapter->netdev); - rtnl_unlock(); - } ixgbe_service_event_complete(adapter); return; } @@@ -8183,7 -8186,7 +8183,7 @@@ static int ixgbe_tx_map(struct ixgbe_ri struct sk_buff *skb = first->skb; struct ixgbe_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - struct skb_frag_struct *frag; + skb_frag_t *frag; dma_addr_t dma; unsigned int data_len, size; u32 tx_flags = first->tx_flags; @@@ -8602,7 -8605,8 +8602,8 @@@ netdev_tx_t ixgbe_xmit_frame_ring(struc * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); + count += TXD_USE_COUNT(skb_frag_size( + &skb_shinfo(skb)->frags[f]));
if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) { tx_ring->tx_stats.tx_busy++; diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h index f6b64a03cd06,0807992090b8..ff0cb65f5b58 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@@ -184,13 -184,8 +184,13 @@@ static inline int mlx5e_get_max_num_cha
struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_eth_seg eth; - struct mlx5_wqe_data_seg data[0]; + union { + struct { + struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; + }; + u8 tls_progress_params_ctx[0]; + }; };
struct mlx5e_rx_wqe_ll { @@@ -356,6 -351,7 +356,7 @@@ enum MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_TLS, + MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, };
struct mlx5e_sq_wqe_info { @@@ -480,8 -476,6 +481,6 @@@ struct mlx5e_xdp_mpwqe struct mlx5e_tx_wqe *wqe; u8 ds_count; u8 pkt_count; - u8 max_ds_count; - u8 complete; u8 inline_on; };
@@@ -1133,7 -1127,6 +1132,6 @@@ void mlx5e_build_rq_params(struct mlx5_ struct mlx5e_params *params); void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, u16 num_channels); - u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index c7f86453c638,6e54fefea410..817c6ea7e349 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@@ -1,7 -1,6 +1,6 @@@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2019 Mellanox Technologies. */
- #include <net/devlink.h> #include "reporter.h" #include "lib/eq.h"
@@@ -76,21 -75,26 +75,21 @@@ static int mlx5e_tx_reporter_err_cqe_re u8 state; int err;
- if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) - return 0; - err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); if (err) { netdev_err(dev, "Failed to query SQ 0x%x state. err = %d\n", sq->sqn, err); - return err; + goto out; }
- if (state != MLX5_SQC_STATE_ERR) { - netdev_err(dev, "SQ 0x%x not in ERROR state\n", sq->sqn); - return -EINVAL; - } + if (state != MLX5_SQC_STATE_ERR) + goto out;
mlx5e_tx_disable_queue(sq->txq);
err = mlx5e_wait_for_sq_flush(sq); if (err) - return err; + goto out;
/* At this point, no new packets will arrive from the stack as TXQ is * marked with QUEUE_STATE_DRV_XOFF. In addition, NAPI cleared all @@@ -99,24 -103,20 +98,24 @@@
err = mlx5e_sq_to_ready(sq, state); if (err) - return err; + goto out;
mlx5e_reset_txqsq_cc_pc(sq); sq->stats->recover++; + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); mlx5e_activate_txqsq(sq);
return 0; +out: + clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); + return err; }
static int mlx5_tx_health_report(struct devlink_health_reporter *tx_reporter, char *err_str, struct mlx5e_tx_err_ctx *err_ctx) { - if (IS_ERR_OR_NULL(tx_reporter)) { + if (!tx_reporter) { netdev_err(err_ctx->sq->channel->netdev, err_str); return err_ctx->recover(err_ctx->sq); } @@@ -288,23 -288,27 +287,27 @@@ static const struct devlink_health_repo
int mlx5e_tx_reporter_create(struct mlx5e_priv *priv) { + struct devlink_health_reporter *reporter; struct mlx5_core_dev *mdev = priv->mdev; struct devlink *devlink = priv_to_devlink(mdev);
- priv->tx_reporter = + reporter = devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, MLX5_REPORTER_TX_GRACEFUL_PERIOD, true, priv); - if (IS_ERR(priv->tx_reporter)) + if (IS_ERR(reporter)) { netdev_warn(priv->netdev, "Failed to create tx reporter, err = %ld\n", - PTR_ERR(priv->tx_reporter)); - return IS_ERR_OR_NULL(priv->tx_reporter); + PTR_ERR(reporter)); + return PTR_ERR(reporter); + } + priv->tx_reporter = reporter; + return 0; }
void mlx5e_tx_reporter_destroy(struct mlx5e_priv *priv) { - if (IS_ERR_OR_NULL(priv->tx_reporter)) + if (!priv->tx_reporter) return;
devlink_health_reporter_destroy(priv->tx_reporter); diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index e89dba790a2d,02530b50609c..2211d60f4503 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@@ -1081,14 -1081,6 +1081,14 @@@ int mlx5e_ethtool_set_link_ksettings(st link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : mlx5e_port_speed2linkmodes(mdev, speed, !ext);
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && + autoneg != AUTONEG_ENABLE) { + netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", + __func__); + err = -EINVAL; + goto out; + } + link_modes = link_modes & eproto.cap; if (!link_modes) { netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", @@@ -1346,9 -1338,6 +1346,9 @@@ int mlx5e_ethtool_set_pauseparam(struc struct mlx5_core_dev *mdev = priv->mdev; int err;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return -EOPNOTSUPP; + if (pauseparam->autoneg) return -EINVAL;
@@@ -1924,21 -1913,27 +1924,27 @@@ static u32 mlx5e_get_priv_flags(struct return priv->channels.params.pflags; }
- #ifndef CONFIG_MLX5_EN_RXNFC - /* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS - * otherwise this function will be defined from en_fs_ethtool.c - */ static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) { struct mlx5e_priv *priv = netdev_priv(dev);
- if (info->cmd != ETHTOOL_GRXRINGS) - return -EOPNOTSUPP; - /* ring_count is needed by ethtool -x */ - info->data = priv->channels.params.num_channels; - return 0; + /* ETHTOOL_GRXRINGS is needed by ethtool -x which is not part + * of rxnfc. We keep this logic out of mlx5e_ethtool_get_rxnfc, + * to avoid breaking "ethtool -x" when mlx5e_ethtool_get_rxnfc + * is compiled out via CONFIG_MLX5_EN_RXNFC=n. + */ + if (info->cmd == ETHTOOL_GRXRINGS) { + info->data = priv->channels.params.num_channels; + return 0; + } + + return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs); + } + + static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) + { + return mlx5e_ethtool_set_rxnfc(dev, cmd); } - #endif
const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, @@@ -1959,9 -1954,7 +1965,7 @@@ .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, .get_rxnfc = mlx5e_get_rxnfc, - #ifdef CONFIG_MLX5_EN_RXNFC .set_rxnfc = mlx5e_set_rxnfc, - #endif .get_tunable = mlx5e_get_tunable, .set_tunable = mlx5e_set_tunable, .get_pauseparam = mlx5e_get_pauseparam, diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9d5f6e56188f,9a2fcef6e7f0..0c8e847a9eee --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -1130,6 -1130,8 +1130,8 @@@ static int mlx5e_alloc_txqsq(struct mlx sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; sq->stop_room = MLX5E_SQ_STOP_ROOM; INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); + if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) + set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); if (MLX5_IPSEC_DEV(c->priv->mdev)) set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); if (mlx5_accel_is_tls_device(c->priv->mdev)) { @@@ -1321,6 -1323,7 +1323,6 @@@ err_free_txqsq void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) { sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); - clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); @@@ -2321,7 -2324,7 +2323,7 @@@ int mlx5e_open_channels(struct mlx5e_pr goto err_close_channels; }
- if (!IS_ERR_OR_NULL(priv->tx_reporter)) + if (priv->tx_reporter) devlink_health_reporter_state_update(priv->tx_reporter, DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
@@@ -3422,7 -3425,7 +3424,7 @@@ out #ifdef CONFIG_MLX5_ESWITCH static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, struct flow_cls_offload *cls_flower, - int flags) + unsigned long flags) { switch (cls_flower->command) { case FLOW_CLS_REPLACE: @@@ -3442,12 -3445,12 +3444,12 @@@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { + unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD); struct mlx5e_priv *priv = cb_priv;
switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | - MLX5E_TC_NIC_OFFLOAD); + return mlx5e_setup_tc_cls_flower(priv, type_data, flags); default: return -EOPNOTSUPP; } @@@ -3640,7 -3643,7 +3642,7 @@@ static int set_feature_tc_num_filters(s { struct mlx5e_priv *priv = netdev_priv(netdev);
- if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) { + if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) { netdev_err(netdev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); return -EINVAL; @@@ -3781,9 -3784,10 +3783,10 @@@ static netdev_features_t mlx5e_fix_feat netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); } if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) { - features &= ~NETIF_F_LRO; - if (params->lro_en) + if (features & NETIF_F_LRO) { netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); + features &= ~NETIF_F_LRO; + } }
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { @@@ -3950,7 -3954,8 +3953,8 @@@ int mlx5e_hwstamp_set(struct mlx5e_pri case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL: /* Disable CQE compression */ - netdev_warn(priv->netdev, "Disabling cqe compression"); + if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS)) + netdev_warn(priv->netdev, "Disabling RX cqe compression\n"); err = mlx5e_modify_rx_cqe_compression_locked(priv, false); if (err) { netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); @@@ -4768,7 -4773,7 +4772,7 @@@ void mlx5e_build_nic_params(struct mlx5 mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
/* TX inline */ - params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); + mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
/* RSS */ mlx5e_build_rss_params(rss_params, params->num_channels); diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index deeb65da99f3,5be3da621499..00473d09a5c5 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@@ -38,6 -38,8 +38,8 @@@ #include <linux/mlx5/fs.h> #include <linux/mlx5/device.h> #include <linux/rhashtable.h> + #include <linux/refcount.h> + #include <linux/completion.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> @@@ -65,19 -67,20 +67,20 @@@ struct mlx5_nic_flow_attr struct mlx5_fc *counter; };
- #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1) + #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
enum { - MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS, - MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS, - MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD, - MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD, - MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE), - MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1), - MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2), - MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3), - MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4), - MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5), + MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, + MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT, + MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT, + MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE, + MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1, + MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2, + MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3, + MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4, + MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5, + MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6, };
#define MLX5E_TC_MAX_SPLITS 1 @@@ -100,6 -103,7 +103,7 @@@ * container_of(helper item, containing struct type, helper field[index]) */ struct encap_flow_item { + struct mlx5e_encap_entry *e; /* attached encap instance */ struct list_head list; int index; }; @@@ -108,7 -112,7 +112,7 @@@ struct mlx5e_tc_flow struct rhash_head node; struct mlx5e_priv *priv; u64 cookie; - u16 flags; + unsigned long flags; struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; /* Flow can be associated with multiple encap IDs. * The number of encaps is bounded by the number of supported @@@ -116,10 -120,14 +120,14 @@@ */ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5e_tc_flow *peer_flow; + struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ + struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */ + refcount_t refcnt; + struct rcu_head rcu_head; union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; @@@ -157,12 -165,20 +165,20 @@@ struct mlx5e_hairpin_entry /* a node of a hash table which keeps all the hairpin entries */ struct hlist_node hairpin_hlist;
+ /* protects flows list */ + spinlock_t flows_lock; /* flows sharing the same hairpin */ struct list_head flows; + /* hpe's that were not fully initialized when dead peer update event + * function traversed them. + */ + struct list_head dead_peer_wait_list;
u16 peer_vhca_id; u8 prio; struct mlx5e_hairpin *hp; + refcount_t refcnt; + struct completion res_ready; };
struct mod_hdr_key { @@@ -174,16 -190,93 +190,93 @@@ struct mlx5e_mod_hdr_entry /* a node of a hash table which keeps all the mod_hdr entries */ struct hlist_node mod_hdr_hlist;
+ /* protects flows list */ + spinlock_t flows_lock; /* flows sharing the same mod_hdr entry */ struct list_head flows;
struct mod_hdr_key key;
u32 mod_hdr_id; + + refcount_t refcnt; + struct completion res_ready; + int compl_result; };
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
+ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow); + + static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) + { + if (!flow || !refcount_inc_not_zero(&flow->refcnt)) + return ERR_PTR(-EINVAL); + return flow; + } + + static void mlx5e_flow_put(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) + { + if (refcount_dec_and_test(&flow->refcnt)) { + mlx5e_tc_del_flow(priv, flow); + kfree_rcu(flow, rcu_head); + } + } + + static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) + { + /* Complete all memory stores before setting bit. */ + smp_mb__before_atomic(); + set_bit(flag, &flow->flags); + } + + #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) + + static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow, + unsigned long flag) + { + /* test_and_set_bit() provides all necessary barriers */ + return test_and_set_bit(flag, &flow->flags); + } + + #define flow_flag_test_and_set(flow, flag) \ + __flow_flag_test_and_set(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + + static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag) + { + /* Complete all memory stores before clearing bit. */ + smp_mb__before_atomic(); + clear_bit(flag, &flow->flags); + } + + #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + + static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag) + { + bool ret = test_bit(flag, &flow->flags); + + /* Read fields of flow structure only after checking flags. */ + smp_mb__after_atomic(); + return ret; + } + + #define flow_flag_test(flow, flag) __flow_flag_test(flow, \ + MLX5E_TC_FLOW_FLAG_##flag) + + static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow) + { + return flow_flag_test(flow, ESWITCH); + } + + static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) + { + return flow_flag_test(flow, OFFLOADED); + } + static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key) { return jhash(key->actions, @@@ -199,15 -292,62 +292,62 @@@ static inline int cmp_mod_hdr_info(stru return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ); }
+ static struct mod_hdr_tbl * + get_mod_hdr_table(struct mlx5e_priv *priv, int namespace) + { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr : + &priv->fs.tc.mod_hdr; + } + + static struct mlx5e_mod_hdr_entry * + mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key) + { + struct mlx5e_mod_hdr_entry *mh, *found = NULL; + + hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) { + if (!cmp_mod_hdr_info(&mh->key, key)) { + refcount_inc(&mh->refcnt); + found = mh; + break; + } + } + + return found; + } + + static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv, + struct mlx5e_mod_hdr_entry *mh, + int namespace) + { + struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace); + + if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock)) + return; + hash_del(&mh->mod_hdr_hlist); + mutex_unlock(&tbl->lock); + + WARN_ON(!list_empty(&mh->flows)); + if (mh->compl_result > 0) + mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id); + + kfree(mh); + } + + static int get_flow_name_space(struct mlx5e_tc_flow *flow) + { + return mlx5e_is_eswitch_flow(flow) ? + MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; + } static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow_parse_attr *parse_attr) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int num_actions, actions_size, namespace, err; struct mlx5e_mod_hdr_entry *mh; + struct mod_hdr_tbl *tbl; struct mod_hdr_key key; - bool found = false; u32 hash_key;
num_actions = parse_attr->num_mod_hdr_actions; @@@ -218,80 -358,82 +358,82 @@@
hash_key = hash_mod_hdr_info(&key);
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { - namespace = MLX5_FLOW_NAMESPACE_FDB; - hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh, - mod_hdr_hlist, hash_key) { - if (!cmp_mod_hdr_info(&mh->key, &key)) { - found = true; - break; - } - } - } else { - namespace = MLX5_FLOW_NAMESPACE_KERNEL; - hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh, - mod_hdr_hlist, hash_key) { - if (!cmp_mod_hdr_info(&mh->key, &key)) { - found = true; - break; - } - } - } + namespace = get_flow_name_space(flow); + tbl = get_mod_hdr_table(priv, namespace); + + mutex_lock(&tbl->lock); + mh = mlx5e_mod_hdr_get(tbl, &key, hash_key); + if (mh) { + mutex_unlock(&tbl->lock); + wait_for_completion(&mh->res_ready);
- if (found) + if (mh->compl_result < 0) { + err = -EREMOTEIO; + goto attach_header_err; + } goto attach_flow; + }
mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL); - if (!mh) + if (!mh) { + mutex_unlock(&tbl->lock); return -ENOMEM; + }
mh->key.actions = (void *)mh + sizeof(*mh); memcpy(mh->key.actions, key.actions, actions_size); mh->key.num_actions = num_actions; + spin_lock_init(&mh->flows_lock); INIT_LIST_HEAD(&mh->flows); + refcount_set(&mh->refcnt, 1); + init_completion(&mh->res_ready); + + hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key); + mutex_unlock(&tbl->lock);
err = mlx5_modify_header_alloc(priv->mdev, namespace, mh->key.num_actions, mh->key.actions, &mh->mod_hdr_id); - if (err) - goto out_err; - - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) - hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key); - else - hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key); + if (err) { + mh->compl_result = err; + goto alloc_header_err; + } + mh->compl_result = 1; + complete_all(&mh->res_ready);
attach_flow: + flow->mh = mh; + spin_lock(&mh->flows_lock); list_add(&flow->mod_hdr, &mh->flows); - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + spin_unlock(&mh->flows_lock); + if (mlx5e_is_eswitch_flow(flow)) flow->esw_attr->mod_hdr_id = mh->mod_hdr_id; else flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
return 0;
- out_err: - kfree(mh); + alloc_header_err: + complete_all(&mh->res_ready); + attach_header_err: + mlx5e_mod_hdr_put(priv, mh, namespace); return err; }
static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - struct list_head *next = flow->mod_hdr.next; + /* flow wasn't fully initialized */ + if (!flow->mh) + return;
+ spin_lock(&flow->mh->flows_lock); list_del(&flow->mod_hdr); + spin_unlock(&flow->mh->flows_lock);
- if (list_empty(next)) { - struct mlx5e_mod_hdr_entry *mh; - - mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows); - - mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id); - hash_del(&mh->mod_hdr_hlist); - kfree(mh); - } + mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow)); + flow->mh = NULL; }
static @@@ -555,13 -697,35 +697,35 @@@ static struct mlx5e_hairpin_entry *mlx5
hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, hairpin_hlist, hash_key) { - if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) + if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) { + refcount_inc(&hpe->refcnt); return hpe; + } }
return NULL; }
+ static void mlx5e_hairpin_put(struct mlx5e_priv *priv, + struct mlx5e_hairpin_entry *hpe) + { + /* no more hairpin flows for us, release the hairpin pair */ + if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock)) + return; + hash_del(&hpe->hairpin_hlist); + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + + if (!IS_ERR_OR_NULL(hpe->hp)) { + netdev_dbg(priv->netdev, "del hairpin: peer %s\n", + dev_name(hpe->hp->pair->peer_mdev->device)); + + mlx5e_hairpin_destroy(hpe->hp); + } + + WARN_ON(!list_empty(&hpe->flows)); + kfree(hpe); + } + #define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv, @@@ -627,17 -791,37 +791,37 @@@ static int mlx5e_hairpin_flow_add(struc extack); if (err) return err; + + mutex_lock(&priv->fs.tc.hairpin_tbl_lock); hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); - if (hpe) + if (hpe) { + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + wait_for_completion(&hpe->res_ready); + + if (IS_ERR(hpe->hp)) { + err = -EREMOTEIO; + goto out_err; + } goto attach_flow; + }
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); - if (!hpe) + if (!hpe) { + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); return -ENOMEM; + }
+ spin_lock_init(&hpe->flows_lock); INIT_LIST_HEAD(&hpe->flows); + INIT_LIST_HEAD(&hpe->dead_peer_wait_list); hpe->peer_vhca_id = peer_id; hpe->prio = match_prio; + refcount_set(&hpe->refcnt, 1); + init_completion(&hpe->res_ready); + + hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, + hash_hairpin_info(peer_id, match_prio)); + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
params.log_data_size = 15; params.log_data_size = min_t(u8, params.log_data_size, @@@ -659,9 -843,11 +843,11 @@@ params.num_channels = link_speed64;
hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); + hpe->hp = hp; + complete_all(&hpe->res_ready); if (IS_ERR(hp)) { err = PTR_ERR(hp); - goto create_hairpin_err; + goto out_err; }
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", @@@ -669,46 -855,39 +855,39 @@@ dev_name(hp->pair->peer_mdev->device), hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
- hpe->hp = hp; - hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, - hash_hairpin_info(peer_id, match_prio)); - attach_flow: if (hpe->hp->num_channels > 1) { - flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS; + flow_flag_set(flow, HAIRPIN_RSS); flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t; } else { flow->nic_attr->hairpin_tirn = hpe->hp->tirn; } + + flow->hpe = hpe; + spin_lock(&hpe->flows_lock); list_add(&flow->hairpin, &hpe->flows); + spin_unlock(&hpe->flows_lock);
return 0;
- create_hairpin_err: - kfree(hpe); + out_err: + mlx5e_hairpin_put(priv, hpe); return err; }
static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - struct list_head *next = flow->hairpin.next; + /* flow wasn't fully initialized */ + if (!flow->hpe) + return;
+ spin_lock(&flow->hpe->flows_lock); list_del(&flow->hairpin); + spin_unlock(&flow->hpe->flows_lock);
- /* no more hairpin flows for us, release the hairpin pair */ - if (list_empty(next)) { - struct mlx5e_hairpin_entry *hpe; - - hpe = list_entry(next, struct mlx5e_hairpin_entry, flows); - - netdev_dbg(priv->netdev, "del hairpin: peer %s\n", - dev_name(hpe->hp->pair->peer_mdev->device)); - - mlx5e_hairpin_destroy(hpe->hp); - hash_del(&hpe->hairpin_hlist); - kfree(hpe); - } + mlx5e_hairpin_put(priv, flow->hpe); + flow->hpe = NULL; }
static int @@@ -727,18 -906,17 +906,17 @@@ mlx5e_tc_add_nic_flow(struct mlx5e_pri .flags = FLOW_ACT_NO_APPEND, }; struct mlx5_fc *counter = NULL; - bool table_created = false; int err, dest_ix = 0;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG; flow_context->flow_tag = attr->flow_tag;
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) { + if (flow_flag_test(flow, HAIRPIN)) { err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); - if (err) { - goto err_add_hairpin_flow; - } - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) { + if (err) + return err; + + if (flow_flag_test(flow, HAIRPIN_RSS)) { dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[dest_ix].ft = attr->hairpin_ft; } else { @@@ -754,10 -932,9 +932,9 @@@
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(dev, true); - if (IS_ERR(counter)) { - err = PTR_ERR(counter); - goto err_fc_create; - } + if (IS_ERR(counter)) + return PTR_ERR(counter); + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[dest_ix].counter_id = mlx5_fc_id(counter); dest_ix++; @@@ -769,9 -946,10 +946,10 @@@ flow_act.modify_id = attr->mod_hdr_id; kfree(parse_attr->mod_hdr_actions); if (err) - goto err_create_mod_hdr_id; + return err; }
+ mutex_lock(&priv->fs.tc.t_lock); if (IS_ERR_OR_NULL(priv->fs.tc.t)) { int tc_grp_size, tc_tbl_size; u32 max_flow_counter; @@@ -791,15 -969,13 +969,13 @@@ MLX5E_TC_TABLE_NUM_GROUPS, MLX5E_TC_FT_LEVEL, 0); if (IS_ERR(priv->fs.tc.t)) { + mutex_unlock(&priv->fs.tc.t_lock); NL_SET_ERR_MSG_MOD(extack, "Failed to create tc offload table\n"); netdev_err(priv->netdev, "Failed to create tc offload table\n"); - err = PTR_ERR(priv->fs.tc.t); - goto err_create_ft; + return PTR_ERR(priv->fs.tc.t); } - - table_created = true; }
if (attr->match_level != MLX5_MATCH_NONE) @@@ -807,29 -983,12 +983,12 @@@
flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, &flow_act, dest, dest_ix); + mutex_unlock(&priv->fs.tc.t_lock);
- if (IS_ERR(flow->rule[0])) { - err = PTR_ERR(flow->rule[0]); - goto err_add_rule; - } + if (IS_ERR(flow->rule[0])) + return PTR_ERR(flow->rule[0]);
return 0; - - err_add_rule: - if (table_created) { - mlx5_destroy_flow_table(priv->fs.tc.t); - priv->fs.tc.t = NULL; - } - err_create_ft: - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - mlx5e_detach_mod_hdr(priv, flow); - err_create_mod_hdr_id: - mlx5_fc_destroy(dev, counter); - err_fc_create: - if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) - mlx5e_hairpin_flow_del(priv, flow); - err_add_hairpin_flow: - return err; }
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, @@@ -839,18 -998,21 +998,21 @@@ struct mlx5_fc *counter = NULL;
counter = attr->counter; - mlx5_del_flow_rules(flow->rule[0]); + if (!IS_ERR_OR_NULL(flow->rule[0])) + mlx5_del_flow_rules(flow->rule[0]); mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) { + mutex_lock(&priv->fs.tc.t_lock); + if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } + mutex_unlock(&priv->fs.tc.t_lock);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow);
- if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) + if (flow_flag_test(flow, HAIRPIN)) mlx5e_hairpin_flow_del(priv, flow); }
@@@ -885,7 -1047,6 +1047,6 @@@ mlx5e_tc_offload_fdb_rules(struct mlx5_ } }
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED; return rule; }
@@@ -894,7 -1055,7 +1055,7 @@@ mlx5e_tc_unoffload_fdb_rules(struct mlx struct mlx5e_tc_flow *flow, struct mlx5_esw_flow_attr *attr) { - flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; + flow_flag_clear(flow, OFFLOADED);
if (attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); @@@ -917,7 -1078,7 +1078,7 @@@ mlx5e_tc_offload_to_slow_path(struct ml
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); if (!IS_ERR(rule)) - flow->flags |= MLX5E_TC_FLOW_SLOW; + flow_flag_set(flow, SLOW);
return rule; } @@@ -932,7 -1093,26 +1093,26 @@@ mlx5e_tc_unoffload_from_slow_path(struc slow_attr->split_count = 0; slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); - flow->flags &= ~MLX5E_TC_FLOW_SLOW; + flow_flag_clear(flow, SLOW); + } + + /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this + * function. + */ + static void unready_flow_add(struct mlx5e_tc_flow *flow, + struct list_head *unready_flows) + { + flow_flag_set(flow, NOT_READY); + list_add_tail(&flow->unready, unready_flows); + } + + /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this + * function. + */ + static void unready_flow_del(struct mlx5e_tc_flow *flow) + { + list_del(&flow->unready); + flow_flag_clear(flow, NOT_READY); }
static void add_unready_flow(struct mlx5e_tc_flow *flow) @@@ -945,14 -1125,24 +1125,24 @@@ rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &rpriv->uplink_priv;
- flow->flags |= MLX5E_TC_FLOW_NOT_READY; - list_add_tail(&flow->unready, &uplink_priv->unready_flows); + mutex_lock(&uplink_priv->unready_flows_lock); + unready_flow_add(flow, &uplink_priv->unready_flows); + mutex_unlock(&uplink_priv->unready_flows_lock); }
static void remove_unready_flow(struct mlx5e_tc_flow *flow) { - list_del(&flow->unready); - flow->flags &= ~MLX5E_TC_FLOW_NOT_READY; + struct mlx5_rep_uplink_priv *uplink_priv; + struct mlx5e_rep_priv *rpriv; + struct mlx5_eswitch *esw; + + esw = flow->priv->mdev->priv.eswitch; + rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + uplink_priv = &rpriv->uplink_priv; + + mutex_lock(&uplink_priv->unready_flows_lock); + unready_flow_del(flow); + mutex_unlock(&uplink_priv->unready_flows_lock); }
static int @@@ -980,14 -1170,12 +1170,12 @@@ mlx5e_tc_add_fdb_flow(struct mlx5e_pri
if (attr->chain > max_chain) { NL_SET_ERR_MSG(extack, "Requested chain is out of supported range"); - err = -EOPNOTSUPP; - goto err_max_prio_chain; + return -EOPNOTSUPP; }
if (attr->prio > max_prio) { NL_SET_ERR_MSG(extack, "Requested priority is out of supported range"); - err = -EOPNOTSUPP; - goto err_max_prio_chain; + return -EOPNOTSUPP; }
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { @@@ -1002,7 -1190,7 +1190,7 @@@ err = mlx5e_attach_encap(priv, flow, out_dev, out_index, extack, &encap_dev, &encap_valid); if (err) - goto err_attach_encap; + return err;
out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; @@@ -1012,21 -1200,19 +1200,19 @@@
err = mlx5_eswitch_add_vlan_action(esw, attr); if (err) - goto err_add_vlan; + return err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); kfree(parse_attr->mod_hdr_actions); if (err) - goto err_mod_hdr; + return err; }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(attr->counter_dev, true); - if (IS_ERR(counter)) { - err = PTR_ERR(counter); - goto err_create_counter; - } + if (IS_ERR(counter)) + return PTR_ERR(counter);
attr->counter = counter; } @@@ -1044,27 -1230,12 +1230,12 @@@ flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr); }
- if (IS_ERR(flow->rule[0])) { - err = PTR_ERR(flow->rule[0]); - goto err_add_rule; - } + if (IS_ERR(flow->rule[0])) + return PTR_ERR(flow->rule[0]); + else + flow_flag_set(flow, OFFLOADED);
return 0; - - err_add_rule: - mlx5_fc_destroy(attr->counter_dev, counter); - err_create_counter: - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - mlx5e_detach_mod_hdr(priv, flow); - err_mod_hdr: - mlx5_eswitch_del_vlan_action(esw, attr); - err_add_vlan: - for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) - if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) - mlx5e_detach_encap(priv, flow, out_index); - err_attach_encap: - err_max_prio_chain: - return err; }
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow) @@@ -1088,14 -1259,14 +1259,14 @@@ static void mlx5e_tc_del_fdb_flow(struc struct mlx5_esw_flow_attr slow_attr; int out_index;
- if (flow->flags & MLX5E_TC_FLOW_NOT_READY) { + if (flow_flag_test(flow, NOT_READY)) { remove_unready_flow(flow); kvfree(attr->parse_attr); return; }
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { - if (flow->flags & MLX5E_TC_FLOW_SLOW) + if (mlx5e_is_offloaded_flow(flow)) { + if (flow_flag_test(flow, SLOW)) mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); else mlx5e_tc_unoffload_fdb_rules(esw, flow, attr); @@@ -1123,9 -1294,9 +1294,9 @@@ void mlx5e_tc_encap_flows_add(struct ml { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr slow_attr, *esw_attr; + struct encap_flow_item *efi, *tmp; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; - struct encap_flow_item *efi; struct mlx5e_tc_flow *flow; int err;
@@@ -1142,11 -1313,14 +1313,14 @@@ e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(efi, &e->flows, list) { + list_for_each_entry_safe(efi, tmp, &e->flows, list) { bool all_flow_encaps_valid = true; int i;
flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + esw_attr = flow->esw_attr; spec = &esw_attr->parse_attr->spec;
@@@ -1166,19 -1340,23 +1340,23 @@@ } /* Do not offload flows with unresolved neighbors */ if (!all_flow_encaps_valid) - continue; + goto loop_cont; /* update from slow path rule to encap rule */ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr); if (IS_ERR(rule)) { err = PTR_ERR(rule); mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", err); - continue; + goto loop_cont; }
mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr); - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */ flow->rule[0] = rule; + /* was unset when slow path rule removed */ + flow_flag_set(flow, OFFLOADED); + + loop_cont: + mlx5e_flow_put(priv, flow); } }
@@@ -1187,14 -1365,17 +1365,17 @@@ void mlx5e_tc_encap_flows_del(struct ml { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr slow_attr; + struct encap_flow_item *efi, *tmp; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; - struct encap_flow_item *efi; struct mlx5e_tc_flow *flow; int err;
- list_for_each_entry(efi, &e->flows, list) { + list_for_each_entry_safe(efi, tmp, &e->flows, list) { flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */ @@@ -1206,12 -1387,16 +1387,16 @@@ err = PTR_ERR(rule); mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n", err); - continue; + goto loop_cont; }
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr); - flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */ flow->rule[0] = rule; + /* was unset when fast path rule removed */ + flow_flag_set(flow, OFFLOADED); + + loop_cont: + mlx5e_flow_put(priv, flow); }
/* we know that the encap is valid */ @@@ -1221,7 -1406,7 +1406,7 @@@
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow) { - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (mlx5e_is_eswitch_flow(flow)) return flow->esw_attr->counter; else return flow->nic_attr->counter; @@@ -1248,21 -1433,32 +1433,32 @@@ void mlx5e_tc_update_neigh_used_value(s return;
list_for_each_entry(e, &nhe->encap_list, encap_list) { - struct encap_flow_item *efi; - if (!(e->flags & MLX5_ENCAP_ENTRY_VALID)) + struct encap_flow_item *efi, *tmp; + + if (!(e->flags & MLX5_ENCAP_ENTRY_VALID) || + !mlx5e_encap_take(e)) continue; - list_for_each_entry(efi, &e->flows, list) { + + list_for_each_entry_safe(efi, tmp, &e->flows, list) { flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); - if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + if (IS_ERR(mlx5e_flow_get(flow))) + continue; + + if (mlx5e_is_offloaded_flow(flow)) { counter = mlx5e_tc_get_counter(flow); lastuse = mlx5_fc_query_lastuse(counter); if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) { + mlx5e_flow_put(netdev_priv(e->out_dev), flow); neigh_used = true; break; } } + + mlx5e_flow_put(netdev_priv(e->out_dev), flow); } + + mlx5e_encap_put(netdev_priv(e->out_dev), e); if (neigh_used) break; } @@@ -1282,40 -1478,66 +1478,66 @@@ } }
- static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, int out_index) + static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) { - struct list_head *next = flow->encaps[out_index].list.next; + WARN_ON(!list_empty(&e->flows)); + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
- list_del(&flow->encaps[out_index].list); - if (list_empty(next)) { - struct mlx5e_encap_entry *e; + if (e->flags & MLX5_ENCAP_ENTRY_VALID) + mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
- e = list_entry(next, struct mlx5e_encap_entry, flows); - mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); + kfree(e->encap_header); + kfree(e); + }
- if (e->flags & MLX5_ENCAP_ENTRY_VALID) - mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id); + void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e) + { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- hash_del_rcu(&e->encap_hlist); - kfree(e->encap_header); - kfree(e); + if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock)) + return; + hash_del_rcu(&e->encap_hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_encap_dealloc(priv, e); + } + + static void mlx5e_detach_encap(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, int out_index) + { + struct mlx5e_encap_entry *e = flow->encaps[out_index].e; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + /* flow wasn't fully initialized */ + if (!e) + return; + + mutex_lock(&esw->offloads.encap_tbl_lock); + list_del(&flow->encaps[out_index].list); + flow->encaps[out_index].e = NULL; + if (!refcount_dec_and_test(&e->refcnt)) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + return; } + hash_del_rcu(&e->encap_hlist); + mutex_unlock(&esw->offloads.encap_tbl_lock); + + mlx5e_encap_dealloc(priv, e); }
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
- if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || - !(flow->flags & MLX5E_TC_FLOW_DUP)) + if (!flow_flag_test(flow, ESWITCH) || + !flow_flag_test(flow, DUP)) return;
mutex_lock(&esw->offloads.peer_mutex); list_del(&flow->peer); mutex_unlock(&esw->offloads.peer_mutex);
- flow->flags &= ~MLX5E_TC_FLOW_DUP; + flow_flag_clear(flow, DUP);
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); kvfree(flow->peer_flow); @@@ -1339,7 -1561,7 +1561,7 @@@ static void mlx5e_tc_del_fdb_peer_flow( static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { + if (mlx5e_is_eswitch_flow(flow)) { mlx5e_tc_del_fdb_peer_flow(flow); mlx5e_tc_del_fdb_flow(priv, flow); } else { @@@ -1480,7 -1702,7 +1702,7 @@@ static int __parse_cls_flower(struct ml struct mlx5_flow_spec *spec, struct flow_cls_offload *f, struct net_device *filter_dev, - u8 *match_level, u8 *tunnel_match_level) + u8 *inner_match_level, u8 *outer_match_level) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@@ -1495,9 -1717,8 +1717,9 @@@ struct flow_dissector *dissector = rule->match.dissector; u16 addr_type = 0; u8 ip_proto = 0; + u8 *match_level;
- *match_level = MLX5_MATCH_NONE; + match_level = outer_match_level;
if (dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_META) | @@@ -1525,14 -1746,12 +1747,14 @@@ }
if (mlx5e_get_tc_tun(filter_dev)) { - if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) + if (parse_tunnel_attr(priv, spec, f, filter_dev, + outer_match_level)) return -EOPNOTSUPP;
- /* In decap flow, header pointers should point to the inner + /* At this point, header pointers should point to the inner * headers, outer header were already set by parse_tunnel_attr */ + match_level = inner_match_level; headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP, spec); headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP, @@@ -1834,41 -2053,37 +2056,43 @@@ static int parse_cls_flower(struct mlx5 struct flow_cls_offload *f, struct net_device *filter_dev) { + u8 inner_match_level, outer_match_level, non_tunnel_match_level; struct netlink_ext_ack *extack = f->common.extack; struct mlx5_core_dev *dev = priv->mdev; struct mlx5_eswitch *esw = dev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; - u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; struct mlx5_eswitch_rep *rep; + bool is_eswitch_flow; int err;
- err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); + inner_match_level = MLX5_MATCH_NONE; + outer_match_level = MLX5_MATCH_NONE; + + err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level, + &outer_match_level); + non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ? + outer_match_level : inner_match_level;
- if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { + is_eswitch_flow = mlx5e_is_eswitch_flow(flow); + if (!err && is_eswitch_flow) { rep = rpriv->rep; if (rep->vport != MLX5_VPORT_UPLINK && (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE && - esw->offloads.inline_mode < match_level)) { + esw->offloads.inline_mode < non_tunnel_match_level)) { NL_SET_ERR_MSG_MOD(extack, "Flow is not offloaded due to min inline setting"); netdev_warn(priv->netdev, "Flow is not offloaded due to min inline setting, required %d actual %d\n", - match_level, esw->offloads.inline_mode); + non_tunnel_match_level, esw->offloads.inline_mode); return -EOPNOTSUPP; } }
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { + if (is_eswitch_flow) { - flow->esw_attr->match_level = match_level; - flow->esw_attr->tunnel_match_level = tunnel_match_level; + flow->esw_attr->inner_match_level = inner_match_level; + flow->esw_attr->outer_match_level = outer_match_level; } else { - flow->nic_attr->match_level = match_level; + flow->nic_attr->match_level = non_tunnel_match_level; }
return err; @@@ -2385,14 -2600,15 +2609,15 @@@ static bool actions_match_supported(str { u32 actions;
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH) + if (mlx5e_is_eswitch_flow(flow)) actions = flow->esw_attr->action; else actions = flow->nic_attr->action;
- if (flow->flags & MLX5E_TC_FLOW_EGRESS && + if (flow_flag_test(flow, EGRESS) && !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) || - (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP))) + (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || + (actions & MLX5_FLOW_CONTEXT_ACTION_DROP))) return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) @@@ -2542,7 -2758,7 +2767,7 @@@ static int parse_tc_nic_actions(struct if (priv->netdev->netdev_ops == peer_dev->netdev_ops && same_hw_devs(priv, netdev_priv(peer_dev))) { parse_attr->mirred_ifindex[0] = peer_dev->ifindex; - flow->flags |= MLX5E_TC_FLOW_HAIRPIN; + flow_flag_set(flow, HAIRPIN); action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; } else { @@@ -2629,6 -2845,31 +2854,31 @@@ static bool is_merged_eswitch_dev(struc
+ bool mlx5e_encap_take(struct mlx5e_encap_entry *e) + { + return refcount_inc_not_zero(&e->refcnt); + } + + static struct mlx5e_encap_entry * + mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key, + uintptr_t hash_key) + { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_encap_entry *e; + struct encap_key e_key; + + hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, + encap_hlist, hash_key) { + e_key.ip_tun_key = &e->tun_info->key; + e_key.tc_tunnel = e->tunnel; + if (!cmp_encap_info(&e_key, key) && + mlx5e_encap_take(e)) + return e; + } + + return NULL; + } + static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct net_device *mirred_dev, @@@ -2641,11 -2882,10 +2891,10 @@@ struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5e_tc_flow_parse_attr *parse_attr; const struct ip_tunnel_info *tun_info; - struct encap_key key, e_key; + struct encap_key key; struct mlx5e_encap_entry *e; unsigned short family; uintptr_t hash_key; - bool found = false; int err = 0;
parse_attr = attr->parse_attr; @@@ -2660,42 -2900,59 +2909,59 @@@
hash_key = hash_encap_info(&key);
- hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, - encap_hlist, hash_key) { - e_key.ip_tun_key = &e->tun_info->key; - e_key.tc_tunnel = e->tunnel; - if (!cmp_encap_info(&e_key, &key)) { - found = true; - break; - } - } + mutex_lock(&esw->offloads.encap_tbl_lock); + e = mlx5e_encap_get(priv, &key, hash_key);
/* must verify if encap is valid or not */ - if (found) + if (e) { + mutex_unlock(&esw->offloads.encap_tbl_lock); + wait_for_completion(&e->res_ready); + + /* Protect against concurrent neigh update. */ + mutex_lock(&esw->offloads.encap_tbl_lock); + if (e->compl_result) { + err = -EREMOTEIO; + goto out_err; + } goto attach_flow; + }
e = kzalloc(sizeof(*e), GFP_KERNEL); - if (!e) - return -ENOMEM; + if (!e) { + err = -ENOMEM; + goto out_err; + } + + refcount_set(&e->refcnt, 1); + init_completion(&e->res_ready);
e->tun_info = tun_info; err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); - if (err) + if (err) { + kfree(e); + e = NULL; goto out_err; + }
INIT_LIST_HEAD(&e->flows); + hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); + mutex_unlock(&esw->offloads.encap_tbl_lock);
if (family == AF_INET) err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); else if (family == AF_INET6) err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
- if (err) + /* Protect against concurrent neigh update. */ + mutex_lock(&esw->offloads.encap_tbl_lock); + complete_all(&e->res_ready); + if (err) { + e->compl_result = err; goto out_err; - - hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); + }
attach_flow: + flow->encaps[out_index].e = e; list_add(&flow->encaps[out_index].list, &e->flows); flow->encaps[out_index].index = out_index; *encap_dev = e->out_dev; @@@ -2706,11 -2963,14 +2972,14 @@@ } else { *encap_valid = false; } + mutex_unlock(&esw->offloads.encap_tbl_lock);
return err;
out_err: - kfree(e); + mutex_unlock(&esw->offloads.encap_tbl_lock); + if (e) + mlx5e_encap_put(priv, e); return err; }
@@@ -2890,12 -3150,16 +3159,16 @@@ static int parse_tc_fdb_actions(struct if (netdev_port_same_parent_id(priv->netdev, out_dev)) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); - struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev); + struct net_device *uplink_upper;
+ rcu_read_lock(); + uplink_upper = + netdev_master_upper_dev_get_rcu(uplink_dev); if (uplink_upper && netif_is_lag_master(uplink_upper) && uplink_upper == out_dev) out_dev = uplink_dev; + rcu_read_unlock();
if (is_vlan_dev(out_dev)) { err = add_vlan_push_action(priv, attr, @@@ -3066,19 -3330,19 +3339,19 @@@ return 0; }
- static void get_flags(int flags, u16 *flow_flags) + static void get_flags(int flags, unsigned long *flow_flags) { - u16 __flow_flags = 0; + unsigned long __flow_flags = 0;
- if (flags & MLX5E_TC_INGRESS) - __flow_flags |= MLX5E_TC_FLOW_INGRESS; - if (flags & MLX5E_TC_EGRESS) - __flow_flags |= MLX5E_TC_FLOW_EGRESS; + if (flags & MLX5_TC_FLAG(INGRESS)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS); + if (flags & MLX5_TC_FLAG(EGRESS)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
- if (flags & MLX5E_TC_ESW_OFFLOAD) - __flow_flags |= MLX5E_TC_FLOW_ESWITCH; - if (flags & MLX5E_TC_NIC_OFFLOAD) - __flow_flags |= MLX5E_TC_FLOW_NIC; + if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); + if (flags & MLX5_TC_FLAG(NIC_OFFLOAD)) + __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
*flow_flags = __flow_flags; } @@@ -3090,12 -3354,13 +3363,13 @@@ static const struct rhashtable_params t .automatic_shrinking = true, };
- static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags) + static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, + unsigned long flags) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *uplink_rpriv;
- if (flags & MLX5E_TC_ESW_OFFLOAD) { + if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) { uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); return &uplink_rpriv->uplink_priv.tc_ht; } else /* NIC offload */ @@@ -3106,7 -3371,7 +3380,7 @@@ static bool is_peer_flow_needed(struct { struct mlx5_esw_flow_attr *attr = flow->esw_attr; bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK && - flow->flags & MLX5E_TC_FLOW_INGRESS; + flow_flag_test(flow, INGRESS); bool act_is_encap = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom, @@@ -3125,13 -3390,13 +3399,13 @@@
static int mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, - struct flow_cls_offload *f, u16 flow_flags, + struct flow_cls_offload *f, unsigned long flow_flags, struct mlx5e_tc_flow_parse_attr **__parse_attr, struct mlx5e_tc_flow **__flow) { struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; - int err; + int out_index, err;
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL); parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL); @@@ -3143,6 -3408,11 +3417,11 @@@ flow->cookie = f->cookie; flow->flags = flow_flags; flow->priv = priv; + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) + INIT_LIST_HEAD(&flow->encaps[out_index].list); + INIT_LIST_HEAD(&flow->mod_hdr); + INIT_LIST_HEAD(&flow->hairpin); + refcount_set(&flow->refcnt, 1);
*__flow = flow; *__parse_attr = parse_attr; @@@ -3182,7 -3452,7 +3461,7 @@@ mlx5e_flow_esw_attr_init(struct mlx5_es static struct mlx5e_tc_flow * __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5_eswitch_rep *in_rep, struct mlx5_core_dev *in_mdev) @@@ -3193,7 -3463,7 +3472,7 @@@ struct mlx5e_tc_flow *flow; int attr_size, err;
- flow_flags |= MLX5E_TC_FLOW_ESWITCH; + flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH); attr_size = sizeof(struct mlx5_esw_flow_attr); err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, &parse_attr, &flow); @@@ -3225,15 -3495,14 +3504,14 @@@ return flow;
err_free: - kfree(flow); - kvfree(parse_attr); + mlx5e_flow_put(priv, flow); out: return ERR_PTR(err); }
static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f, struct mlx5e_tc_flow *flow, - u16 flow_flags) + unsigned long flow_flags) { struct mlx5e_priv *priv = flow->priv, *peer_priv; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw; @@@ -3271,7 -3540,7 +3549,7 @@@ }
flow->peer_flow = peer_flow; - flow->flags |= MLX5E_TC_FLOW_DUP; + flow_flag_set(flow, DUP); mutex_lock(&esw->offloads.peer_mutex); list_add_tail(&flow->peer, &esw->offloads.peer_flows); mutex_unlock(&esw->offloads.peer_mutex); @@@ -3284,7 -3553,7 +3562,7 @@@ out static int mlx5e_add_fdb_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { @@@ -3318,7 -3587,7 +3596,7 @@@ out static int mlx5e_add_nic_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - u16 flow_flags, + unsigned long flow_flags, struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { @@@ -3332,7 -3601,7 +3610,7 @@@ if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common)) return -EOPNOTSUPP;
- flow_flags |= MLX5E_TC_FLOW_NIC; + flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC); attr_size = sizeof(struct mlx5_nic_flow_attr); err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags, &parse_attr, &flow); @@@ -3353,14 -3622,14 +3631,14 @@@ if (err) goto err_free;
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED; + flow_flag_set(flow, OFFLOADED); kvfree(parse_attr); *__flow = flow;
return 0;
err_free: - kfree(flow); + mlx5e_flow_put(priv, flow); kvfree(parse_attr); out: return err; @@@ -3369,12 -3638,12 +3647,12 @@@ static int mlx5e_tc_add_flow(struct mlx5e_priv *priv, struct flow_cls_offload *f, - int flags, + unsigned long flags, struct net_device *filter_dev, struct mlx5e_tc_flow **flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - u16 flow_flags; + unsigned long flow_flags; int err;
get_flags(flags, &flow_flags); @@@ -3393,14 -3662,16 +3671,16 @@@ }
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct netlink_ext_ack *extack = f->common.extack; struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); + rcu_read_lock(); + flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params); + rcu_read_unlock(); if (flow) { NL_SET_ERR_MSG_MOD(extack, "flow cookie already exists, ignoring"); @@@ -3415,51 -3686,62 +3695,62 @@@ if (err) goto out;
- err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params); + err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params); if (err) goto err_free;
return 0;
err_free: - mlx5e_tc_del_flow(priv, flow); - kfree(flow); + mlx5e_flow_put(priv, flow); out: return err; }
- #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS) - #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS) - static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) { - if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK)) - return true; + bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS)); + bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
- return false; + return flow_flag_test(flow, INGRESS) == dir_ingress && + flow_flag_test(flow, EGRESS) == dir_egress; }
int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; + int err;
+ rcu_read_lock(); flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (!flow || !same_flow_direction(flow, flags)) - return -EINVAL; + if (!flow || !same_flow_direction(flow, flags)) { + err = -EINVAL; + goto errout; + }
+ /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag + * set. + */ + if (flow_flag_test_and_set(flow, DELETED)) { + err = -EINVAL; + goto errout; + } rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params); + rcu_read_unlock();
- mlx5e_tc_del_flow(priv, flow); - - kfree(flow); + mlx5e_flow_put(priv, flow);
return 0; + + errout: + rcu_read_unlock(); + return err; }
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, - struct flow_cls_offload *f, int flags) + struct flow_cls_offload *f, unsigned long flags) { struct mlx5_devcom *devcom = priv->mdev->priv.devcom; struct rhashtable *tc_ht = get_tc_ht(priv, flags); @@@ -3469,15 -3751,24 +3760,24 @@@ u64 lastuse = 0; u64 packets = 0; u64 bytes = 0; + int err = 0;
- flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); - if (!flow || !same_flow_direction(flow, flags)) - return -EINVAL; + rcu_read_lock(); + flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie, + tc_ht_params)); + rcu_read_unlock(); + if (IS_ERR(flow)) + return PTR_ERR(flow);
- if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { + if (!same_flow_direction(flow, flags)) { + err = -EINVAL; + goto errout; + } + + if (mlx5e_is_offloaded_flow(flow)) { counter = mlx5e_tc_get_counter(flow); if (!counter) - return 0; + goto errout;
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); } @@@ -3489,8 -3780,8 +3789,8 @@@ if (!peer_esw) goto out;
- if ((flow->flags & MLX5E_TC_FLOW_DUP) && - (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) { + if (flow_flag_test(flow, DUP) && + flow_flag_test(flow->peer_flow, OFFLOADED)) { u64 bytes2; u64 packets2; u64 lastuse2; @@@ -3509,15 -3800,117 +3809,117 @@@ no_peer_counter mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out: flow_stats_update(&f->stats, bytes, packets, lastuse); + errout: + mlx5e_flow_put(priv, flow); + return err; + } + + static int apply_police_params(struct mlx5e_priv *priv, u32 rate, + struct netlink_ext_ack *extack) + { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch *esw; + u16 vport_num; + u32 rate_mbps; + int err; + + esw = priv->mdev->priv.eswitch; + /* rate is given in bytes/sec. + * First convert to bits/sec and then round to the nearest mbit/secs. + * mbit means million bits. + * Moreover, if rate is non zero we choose to configure to a minimum of + * 1 mbit/sec. + */ + rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; + vport_num = rpriv->rep->vport; + + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); + if (err) + NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); + + return err; + } + + static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv, + struct flow_action *flow_action, + struct netlink_ext_ack *extack) + { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + const struct flow_action_entry *act; + int err; + int i; + + if (!flow_action_has_entries(flow_action)) { + NL_SET_ERR_MSG_MOD(extack, "matchall called with no action"); + return -EINVAL; + } + + if (!flow_offload_has_one_action(flow_action)) { + NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action"); + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_POLICE: + err = apply_police_params(priv, act->police.rate_bytes_ps, extack); + if (err) + return err; + + rpriv->prev_vf_vport_stats = priv->stats.vf_vport; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall"); + return -EOPNOTSUPP; + } + }
return 0; }
+ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) + { + struct netlink_ext_ack *extack = ma->common.extack; + int prio = TC_H_MAJ(ma->common.prio) >> 16; + + if (prio != 1) { + NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); + return -EINVAL; + } + + return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack); + } + + int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) + { + struct netlink_ext_ack *extack = ma->common.extack; + + return apply_police_params(priv, 0, extack); + } + + void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, + struct tc_cls_matchall_offload *ma) + { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct rtnl_link_stats64 cur_stats; + u64 dbytes; + u64 dpkts; + + cur_stats = priv->stats.vf_vport; + dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; + dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; + rpriv->prev_vf_vport_stats = cur_stats; + flow_stats_update(&ma->stats, dpkts, dbytes, jiffies); + } + static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) { struct mlx5_core_dev *peer_mdev = peer_priv->mdev; - struct mlx5e_hairpin_entry *hpe; + struct mlx5e_hairpin_entry *hpe, *tmp; + LIST_HEAD(init_wait_list); u16 peer_vhca_id; int bkt;
@@@ -3526,9 -3919,18 +3928,18 @@@
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
- hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) { - if (hpe->peer_vhca_id == peer_vhca_id) + mutex_lock(&priv->fs.tc.hairpin_tbl_lock); + hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) + if (refcount_inc_not_zero(&hpe->refcnt)) + list_add(&hpe->dead_peer_wait_list, &init_wait_list); + mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + + list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) { + wait_for_completion(&hpe->res_ready); + if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id) hpe->hp->pair->peer_gone = true; + + mlx5e_hairpin_put(priv, hpe); } }
@@@ -3564,7 -3966,10 +3975,10 @@@ int mlx5e_tc_nic_init(struct mlx5e_pri struct mlx5e_tc_table *tc = &priv->fs.tc; int err;
- hash_init(tc->mod_hdr_tbl); + mutex_init(&tc->t_lock); + mutex_init(&tc->mod_hdr.lock); + hash_init(tc->mod_hdr.hlist); + mutex_init(&tc->hairpin_tbl_lock); hash_init(tc->hairpin_tbl);
err = rhashtable_init(&tc->ht, &tc_ht_params); @@@ -3596,12 -4001,16 +4010,16 @@@ void mlx5e_tc_nic_cleanup(struct mlx5e_ if (tc->netdevice_nb.notifier_call) unregister_netdevice_notifier(&tc->netdevice_nb);
+ mutex_destroy(&tc->mod_hdr.lock); + mutex_destroy(&tc->hairpin_tbl_lock); + rhashtable_destroy(&tc->ht);
if (!IS_ERR_OR_NULL(tc->t)) { mlx5_destroy_flow_table(tc->t); tc->t = NULL; } + mutex_destroy(&tc->t_lock); }
int mlx5e_tc_esw_init(struct rhashtable *tc_ht) @@@ -3614,7 -4023,7 +4032,7 @@@ void mlx5e_tc_esw_cleanup(struct rhasht rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); }
- int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) + int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) { struct rhashtable *tc_ht = get_tc_ht(priv, flags);
@@@ -3636,10 -4045,10 +4054,10 @@@ void mlx5e_tc_reoffload_flows_work(stru reoffload_flows_work); struct mlx5e_tc_flow *flow, *tmp;
- rtnl_lock(); + mutex_lock(&rpriv->unready_flows_lock); list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) { if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL)) - remove_unready_flow(flow); + unready_flow_del(flow); } - rtnl_unlock(); + mutex_unlock(&rpriv->unready_flows_lock); } diff --combined drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 04685dbb280c,86db0e9776da..aba9e7a6ad3c --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@@ -35,6 -35,7 +35,7 @@@
#include <linux/if_ether.h> #include <linux/if_link.h> + #include <linux/atomic.h> #include <net/devlink.h> #include <linux/mlx5/device.h> #include <linux/mlx5/eswitch.h> @@@ -101,6 -102,13 +102,13 @@@ struct mlx5_vport_info bool trusted; };
+ /* Vport context events */ + enum mlx5_eswitch_vport_event { + MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), + MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), + MLX5_VPORT_PROMISC_CHANGE = BIT(3), + }; + struct mlx5_vport { struct mlx5_core_dev *dev; int vport; @@@ -122,7 -130,7 +130,7 @@@ } qos;
bool enabled; - u16 enabled_events; + enum mlx5_eswitch_vport_event enabled_events; };
enum offloads_fdb_flags { @@@ -173,13 -181,14 +181,14 @@@ struct mlx5_esw_offload struct mlx5_eswitch_rep *vport_reps; struct list_head peer_flows; struct mutex peer_mutex; + struct mutex encap_tbl_lock; /* protects encap_tbl */ DECLARE_HASHTABLE(encap_tbl, 8); - DECLARE_HASHTABLE(mod_hdr_tbl, 8); + struct mod_hdr_tbl mod_hdr; DECLARE_HASHTABLE(termtbl_tbl, 8); struct mutex termtbl_mutex; /* protects termtbl hash */ const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; u8 inline_mode; - u64 num_flows; + atomic64_t num_flows; enum devlink_eswitch_encap_mode encap; };
@@@ -207,8 -216,11 +216,11 @@@ enum struct mlx5_eswitch { struct mlx5_core_dev *dev; struct mlx5_nb nb; + /* legacy data structures */ struct mlx5_eswitch_fdb fdb_table; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; + struct esw_mc_addr mc_promisc; + /* end of legacy */ struct workqueue_struct *work_queue; struct mlx5_vport *vports; u32 flags; @@@ -218,7 -230,6 +230,6 @@@ * and async SRIOV admin state changes */ struct mutex state_lock; - struct esw_mc_addr mc_promisc;
struct { bool enabled; @@@ -233,8 -244,8 +244,8 @@@ struct mlx5_esw_functions esw_funcs; };
- void esw_offloads_cleanup(struct mlx5_eswitch *esw); - int esw_offloads_init(struct mlx5_eswitch *esw); + void esw_offloads_disable(struct mlx5_eswitch *esw); + int esw_offloads_enable(struct mlx5_eswitch *esw); void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); int esw_offloads_init_reps(struct mlx5_eswitch *esw); void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, @@@ -251,6 -262,8 +262,8 @@@ void esw_vport_disable_ingress_acl(stru struct mlx5_vport *vport); void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, + u32 rate_mbps);
/* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); @@@ -377,8 -390,8 +390,8 @@@ struct mlx5_esw_flow_attr struct mlx5_termtbl_handle *termtbl; } dests[MLX5_MAX_FLOW_FWD_VPORTS]; u32 mod_hdr_id; - u8 match_level; - u8 tunnel_match_level; + u8 inner_match_level; + u8 outer_match_level; struct mlx5_fc *counter; u32 chain; u16 prio; @@@ -513,6 -526,11 +526,11 @@@ void mlx5e_tc_clean_fdb_peer_flows(stru (vport) = &(esw)->vports[i], \ (i) < (esw)->total_vports; (i)++)
+ #define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ + for ((i) = (esw)->total_vports - 1; \ + (vport) = &(esw)->vports[i], \ + (i) >= MLX5_VPORT_PF; (i)--) + #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ for ((i) = MLX5_VPORT_FIRST_VF; \ (vport) = &(esw)->vports[(i)], \ @@@ -574,6 -592,11 +592,11 @@@ bool mlx5_eswitch_is_vf_vport(const str void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs); int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
+ void + mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, + enum mlx5_eswitch_vport_event enabled_events); + void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } diff --combined drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 0323fd078271,42cc5001255b..7d3582ee66b7 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@@ -207,10 -207,14 +207,10 @@@ mlx5_eswitch_add_offloaded_rule(struct
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { - if (attr->tunnel_match_level != MLX5_MATCH_NONE) - spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; - if (attr->match_level != MLX5_MATCH_NONE) - spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; - } else if (attr->match_level != MLX5_MATCH_NONE) { + if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; - } + if (attr->inner_match_level != MLX5_MATCH_NONE) + spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; @@@ -229,7 -233,7 +229,7 @@@ if (IS_ERR(rule)) goto err_add_rule; else - esw->offloads.num_flows++; + atomic64_inc(&esw->offloads.num_flows);
return rule;
@@@ -286,7 -290,7 +286,7 @@@ mlx5_eswitch_add_fwd_rule(struct mlx5_e mlx5_eswitch_set_rule_source_port(esw, spec, attr);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS; - if (attr->match_level != MLX5_MATCH_NONE) + if (attr->outer_match_level != MLX5_MATCH_NONE) spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i); @@@ -294,7 -298,7 +294,7 @@@ if (IS_ERR(rule)) goto add_err;
- esw->offloads.num_flows++; + atomic64_inc(&esw->offloads.num_flows);
return rule; add_err: @@@ -322,7 -326,7 +322,7 @@@ __mlx5_eswitch_del_rule(struct mlx5_esw mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl); }
- esw->offloads.num_flows--; + atomic64_dec(&esw->offloads.num_flows);
if (fwd_rule) { esw_put_prio_table(esw, attr->chain, attr->prio, 1); @@@ -438,9 -442,11 +438,11 @@@ int mlx5_eswitch_add_vlan_action(struc fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !attr->dest_chain);
+ mutex_lock(&esw->state_lock); + err = esw_add_vlan_action_check(attr, push, pop, fwd); if (err) - return err; + goto unlock;
attr->vlan_handled = false;
@@@ -453,11 -459,11 +455,11 @@@ attr->vlan_handled = true; }
- return 0; + goto unlock; }
if (!push && !pop) - return 0; + goto unlock;
if (!(offloads->vlan_push_pop_refcount)) { /* it's the 1st vlan rule, apply global vlan pop policy */ @@@ -482,6 -488,8 +484,8 @@@ skip_set_push out: if (!err) attr->vlan_handled = true; + unlock: + mutex_unlock(&esw->state_lock); return err; }
@@@ -504,6 -512,8 +508,8 @@@ int mlx5_eswitch_del_vlan_action(struc pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ mutex_lock(&esw->state_lock); + vport = esw_vlan_action_get_vport(attr, push, pop);
if (!push && !pop && fwd) { @@@ -511,7 -521,7 +517,7 @@@ if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) vport->vlan_refcount--;
- return 0; + goto out; }
if (push) { @@@ -529,12 -539,13 +535,13 @@@ skip_unset_push: offloads->vlan_push_pop_refcount--; if (offloads->vlan_push_pop_refcount) - return 0; + goto out;
/* no more vlan rules, stop global vlan pop policy */ err = esw_set_global_vlan_pop(esw, 0);
out: + mutex_unlock(&esw->state_lock); return err; }
@@@ -583,38 -594,15 +590,15 @@@ void mlx5_eswitch_del_send_to_vport_rul mlx5_del_flow_rules(rule); }
- static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw) + static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable) { u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; u8 fdb_to_vport_reg_c_id; int err;
- err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, - out, sizeof(out)); - if (err) - return err; - - fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, - esw_vport_context.fdb_to_vport_reg_c_id); - - fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0; - MLX5_SET(modify_esw_vport_context_in, in, - esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); - - MLX5_SET(modify_esw_vport_context_in, in, - field_select.fdb_to_vport_reg_c_id, 1); - - return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport, - in, sizeof(in)); - } - - static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw) - { - u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {}; - u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {}; - u8 fdb_to_vport_reg_c_id; - int err; + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) + return 0;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport, out, sizeof(out)); @@@ -624,7 -612,10 +608,10 @@@ fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out, esw_vport_context.fdb_to_vport_reg_c_id);
- fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0; + if (enable) + fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0; + else + fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in, esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id); @@@ -1402,10 -1393,9 +1389,9 @@@ void esw_offloads_cleanup_reps(struct m int esw_offloads_init_reps(struct mlx5_eswitch *esw) { int total_vports = esw->total_vports; - struct mlx5_core_dev *dev = esw->dev; struct mlx5_eswitch_rep *rep; - u8 hw_id[ETH_ALEN], rep_type; int vport_index; + u8 rep_type;
esw->offloads.vport_reps = kcalloc(total_vports, sizeof(struct mlx5_eswitch_rep), @@@ -1413,12 -1403,9 +1399,9 @@@ if (!esw->offloads.vport_reps) return -ENOMEM;
- mlx5_query_mac_address(dev, hw_id); - mlx5_esw_for_all_reps(esw, vport_index, rep) { rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index); rep->vport_index = vport_index; - ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) atomic_set(&rep->rep_data[rep_type].state, @@@ -2120,7 -2107,7 +2103,7 @@@ int mlx5_esw_funcs_changed_handler(stru return NOTIFY_OK; }
- int esw_offloads_init(struct mlx5_eswitch *esw) + int esw_offloads_enable(struct mlx5_eswitch *esw) { int err;
@@@ -2134,11 -2121,11 +2117,11 @@@ if (err) return err;
- if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { - err = mlx5_eswitch_enable_passing_vport_metadata(esw); - if (err) - goto err_vport_metadata; - } + err = esw_set_passing_vport_metadata(esw, true); + if (err) + goto err_vport_metadata; + + mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw); if (err) @@@ -2152,8 -2139,8 +2135,8 @@@ return 0;
err_reps: - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) - mlx5_eswitch_disable_passing_vport_metadata(esw); + mlx5_eswitch_disable_pf_vf_vports(esw); + esw_set_passing_vport_metadata(esw, false); err_vport_metadata: esw_offloads_steering_cleanup(esw); return err; @@@ -2178,13 -2165,13 +2161,13 @@@ static int esw_offloads_stop(struct mlx return err; }
- void esw_offloads_cleanup(struct mlx5_eswitch *esw) + void esw_offloads_disable(struct mlx5_eswitch *esw) { mlx5_rdma_disable_roce(esw->dev); esw_offloads_devcom_cleanup(esw); esw_offloads_unload_all_reps(esw); - if (mlx5_eswitch_vport_match_metadata_enabled(esw)) - mlx5_eswitch_disable_passing_vport_metadata(esw); + mlx5_eswitch_disable_pf_vf_vports(esw); + esw_set_passing_vport_metadata(esw, false); esw_offloads_steering_cleanup(esw); esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE; } @@@ -2345,7 -2332,7 +2328,7 @@@ int mlx5_devlink_eswitch_inline_mode_se break; }
- if (esw->offloads.num_flows > 0) { + if (atomic64_read(&esw->offloads.num_flows) > 0) { NL_SET_ERR_MSG_MOD(extack, "Can't set inline mode when flows are configured"); return -EOPNOTSUPP; @@@ -2455,7 -2442,7 +2438,7 @@@ int mlx5_devlink_eswitch_encap_mode_set if (esw->offloads.encap == encap) return 0;
- if (esw->offloads.num_flows > 0) { + if (atomic64_read(&esw->offloads.num_flows) > 0) { NL_SET_ERR_MSG_MOD(extack, "Can't set encapsulation when flows are configured"); return -EOPNOTSUPP; diff --combined drivers/net/hyperv/netvsc_drv.c index e8fce6d715ef,86884c863013..0a6cd2f1111f --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@@ -435,7 -435,7 +435,7 @@@ static u32 init_page_array(void *hdr, u skb_frag_t *frag = skb_shinfo(skb)->frags + i;
slots_used += fill_pg_buf(skb_frag_page(frag), - frag->page_offset, + skb_frag_off(frag), skb_frag_size(frag), &pb[slots_used]); } return slots_used; @@@ -449,7 -449,7 +449,7 @@@ static int count_skb_frag_slots(struct for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; unsigned long size = skb_frag_size(frag); - unsigned long offset = frag->page_offset; + unsigned long offset = skb_frag_off(frag);
/* Skip unused frames from start of page */ offset &= ~PAGE_MASK; @@@ -1239,15 -1239,12 +1239,15 @@@ static void netvsc_get_stats64(struct n struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); + struct netvsc_device *nvdev; struct netvsc_vf_pcpu_stats vf_tot; int i;
+ rcu_read_lock(); + + nvdev = rcu_dereference(ndev_ctx->nvdev); if (!nvdev) - return; + goto out;
netdev_stats_to_stats64(t, &net->stats);
@@@ -1286,8 -1283,6 +1286,8 @@@ t->rx_packets += packets; t->multicast += multicast; } +out: + rcu_read_unlock(); }
static int netvsc_set_mac_addr(struct net_device *ndev, void *p) diff --combined drivers/net/netdevsim/dev.c index bcc40a236624,08ca59fc189b..84457df451b8 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@@ -40,6 -40,10 +40,10 @@@ static int nsim_dev_debugfs_init(struc return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL; debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir, &nsim_dev->fw_update_status); + debugfs_create_u32("max_macs", 0600, nsim_dev->ddir, + &nsim_dev->max_macs); + debugfs_create_bool("test1", 0600, nsim_dev->ddir, + &nsim_dev->test1); return 0; }
@@@ -73,47 -77,46 +77,47 @@@ static void nsim_dev_port_debugfs_exit( debugfs_remove_recursive(nsim_dev_port->ddir); }
+static struct net *nsim_devlink_net(struct devlink *devlink) +{ + return &init_net; +} + static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false); }
static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB_RULES, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false); }
static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false); }
static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv) { - struct nsim_dev *nsim_dev = priv; + struct net *net = priv;
- return nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB_RULES, false); + return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false); }
static int nsim_dev_resources_register(struct devlink *devlink) { - struct nsim_dev *nsim_dev = devlink_priv(devlink); struct devlink_resource_size_params params = { .size_max = (u64)-1, .size_granularity = 1, .unit = DEVLINK_RESOURCE_UNIT_ENTRY }; + struct net *net = nsim_devlink_net(devlink); int err; u64 n;
@@@ -127,7 -130,8 +131,7 @@@ goto out; }
- n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true); err = devlink_resource_register(devlink, "fib", n, NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4, ¶ms); @@@ -136,7 -140,8 +140,7 @@@ return err; }
- n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV4_FIB_RULES, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true); err = devlink_resource_register(devlink, "fib-rules", n, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV4, ¶ms); @@@ -155,7 -160,8 +159,7 @@@ goto out; }
- n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true); err = devlink_resource_register(devlink, "fib", n, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6, ¶ms); @@@ -164,7 -170,8 +168,7 @@@ return err; }
- n = nsim_fib_get_val(nsim_dev->fib_data, - NSIM_RESOURCE_IPV6_FIB_RULES, true); + n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true); err = devlink_resource_register(devlink, "fib-rules", n, NSIM_RESOURCE_IPV6_FIB_RULES, NSIM_RESOURCE_IPV6, ¶ms); @@@ -176,31 -183,79 +180,79 @@@ devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB, nsim_dev_ipv4_fib_resource_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV4_FIB_RULES, nsim_dev_ipv4_fib_rules_res_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB, nsim_dev_ipv6_fib_resource_occ_get, - nsim_dev); + net); devlink_resource_occ_get_register(devlink, NSIM_RESOURCE_IPV6_FIB_RULES, nsim_dev_ipv6_fib_rules_res_occ_get, - nsim_dev); + net); out: return err; }
+ enum nsim_devlink_param_id { + NSIM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + NSIM_DEVLINK_PARAM_ID_TEST1, + }; + + static const struct devlink_param nsim_devlink_params[] = { + DEVLINK_PARAM_GENERIC(MAX_MACS, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), + DEVLINK_PARAM_DRIVER(NSIM_DEVLINK_PARAM_ID_TEST1, + "test1", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL), + }; + + static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev, + struct devlink *devlink) + { + union devlink_param_value value; + + value.vu32 = nsim_dev->max_macs; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + value); + value.vbool = nsim_dev->test1; + devlink_param_driverinit_value_set(devlink, + NSIM_DEVLINK_PARAM_ID_TEST1, + value); + } + + static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink) + { + struct nsim_dev *nsim_dev = devlink_priv(devlink); + union devlink_param_value saved_value; + int err; + + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MAX_MACS, + &saved_value); + if (!err) + nsim_dev->max_macs = saved_value.vu32; + err = devlink_param_driverinit_value_get(devlink, + NSIM_DEVLINK_PARAM_ID_TEST1, + &saved_value); + if (!err) + nsim_dev->test1 = saved_value.vbool; + } + static int nsim_dev_reload(struct devlink *devlink, struct netlink_ext_ack *extack) { - struct nsim_dev *nsim_dev = devlink_priv(devlink); enum nsim_resource_id res_ids[] = { NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES, NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES }; + struct net *net = nsim_devlink_net(devlink); int i;
for (i = 0; i < ARRAY_SIZE(res_ids); ++i) { @@@ -209,11 -264,13 +261,12 @@@
err = devlink_resource_size_get(devlink, res_ids[i], &val); if (!err) { - err = nsim_fib_set_max(nsim_dev->fib_data, - res_ids[i], val, extack); + err = nsim_fib_set_max(net, res_ids[i], val, extack); if (err) return err; } } + nsim_devlink_param_load_driverinit_values(devlink);
return 0; } @@@ -263,6 -320,9 +316,9 @@@ static const struct devlink_ops nsim_de .flash_update = nsim_dev_flash_update, };
+ #define NSIM_DEV_MAX_MACS_DEFAULT 32 + #define NSIM_DEV_TEST1_DEFAULT true + static struct nsim_dev * nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count) { @@@ -280,31 -340,51 +336,43 @@@ INIT_LIST_HEAD(&nsim_dev->port_list); mutex_init(&nsim_dev->port_list_lock); nsim_dev->fw_update_status = true; + nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT; + nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
- nsim_dev->fib_data = nsim_fib_create(); - if (IS_ERR(nsim_dev->fib_data)) { - err = PTR_ERR(nsim_dev->fib_data); - goto err_devlink_free; - } - err = nsim_dev_resources_register(devlink); if (err) - goto err_fib_destroy; + goto err_devlink_free;
err = devlink_register(devlink, &nsim_bus_dev->dev); if (err) goto err_resources_unregister;
- err = nsim_dev_debugfs_init(nsim_dev); + err = devlink_params_register(devlink, nsim_devlink_params, + ARRAY_SIZE(nsim_devlink_params)); if (err) goto err_dl_unregister; + nsim_devlink_set_params_init_values(nsim_dev, devlink); + + err = nsim_dev_debugfs_init(nsim_dev); + if (err) + goto err_params_unregister;
err = nsim_bpf_dev_init(nsim_dev); if (err) goto err_debugfs_exit;
+ devlink_params_publish(devlink); return nsim_dev;
err_debugfs_exit: nsim_dev_debugfs_exit(nsim_dev); + err_params_unregister: + devlink_params_unregister(devlink, nsim_devlink_params, + ARRAY_SIZE(nsim_devlink_params)); err_dl_unregister: devlink_unregister(devlink); err_resources_unregister: devlink_resources_unregister(devlink, NULL); -err_fib_destroy: - nsim_fib_destroy(nsim_dev->fib_data); err_devlink_free: devlink_free(devlink); return ERR_PTR(err); @@@ -316,8 -396,11 +384,10 @@@ static void nsim_dev_destroy(struct nsi
nsim_bpf_dev_exit(nsim_dev); nsim_dev_debugfs_exit(nsim_dev); + devlink_params_unregister(devlink, nsim_devlink_params, + ARRAY_SIZE(nsim_devlink_params)); devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); - nsim_fib_destroy(nsim_dev->fib_data); mutex_destroy(&nsim_dev->port_list_lock); devlink_free(devlink); } diff --combined drivers/net/netdevsim/netdevsim.h index 9404637d34b7,95751a817508..ed183c9d0dc9 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@@ -158,6 -158,8 +158,8 @@@ struct nsim_dev struct list_head port_list; struct mutex port_list_lock; /* protects port list */ bool fw_update_status; + u32 max_macs; + bool test1; };
int nsim_dev_init(void); @@@ -169,10 -171,12 +171,10 @@@ int nsim_dev_port_add(struct nsim_bus_d int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_index);
-struct nsim_fib_data *nsim_fib_create(void); -void nsim_fib_destroy(struct nsim_fib_data *fib_data); -u64 nsim_fib_get_val(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, bool max); -int nsim_fib_set_max(struct nsim_fib_data *fib_data, - enum nsim_resource_id res_id, u64 val, +int nsim_fib_init(void); +void nsim_fib_exit(void); +u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max); +int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val, struct netlink_ext_ack *extack);
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD) diff --combined drivers/net/xen-netback/netback.c index c9262ffeefe4,4679fcf1a1c4..0020b2e8c279 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@@ -136,12 -136,12 +136,12 @@@ static inline struct xenvif_queue *ubuf
static u16 frag_get_pending_idx(skb_frag_t *frag) { - return (u16)frag->page_offset; + return (u16)skb_frag_off(frag); }
static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx) { - frag->page_offset = pending_idx; + skb_frag_off_set(frag, pending_idx); }
static inline pending_ring_idx_t pending_index(unsigned i) @@@ -925,7 -925,6 +925,7 @@@ static void xenvif_tx_build_gops(struc skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS; nskb = xenvif_alloc_skb(0); if (unlikely(nskb == NULL)) { + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); xenvif_tx_err(queue, &txreq, extra_count, idx); if (net_ratelimit()) @@@ -941,7 -940,6 +941,7 @@@
if (xenvif_set_skb_gso(queue->vif, skb, gso)) { /* Failure in xenvif_set_skb_gso is fatal. */ + skb_shinfo(skb)->nr_frags = 0; kfree_skb(skb); kfree_skb(nskb); break; @@@ -1057,7 -1055,7 +1057,7 @@@ static int xenvif_handle_frag_list(stru int j; skb->truesize += skb->data_len; for (j = 0; j < i; j++) - put_page(frags[j].page.p); + put_page(skb_frag_page(&frags[j])); return -ENOMEM; }
@@@ -1069,8 -1067,8 +1069,8 @@@ BUG();
offset += len; - frags[i].page.p = page; - frags[i].page_offset = 0; + __skb_frag_set_page(&frags[i], page); + skb_frag_off_set(&frags[i], 0); skb_frag_size_set(&frags[i], len); }
@@@ -1655,9 -1653,6 +1655,6 @@@ static int __init netback_init(void
#ifdef CONFIG_DEBUG_FS xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); - if (IS_ERR_OR_NULL(xen_netback_dbg_root)) - pr_warn("Init of debugfs returned %ld!\n", - PTR_ERR(xen_netback_dbg_root)); #endif /* CONFIG_DEBUG_FS */
return 0; diff --combined drivers/staging/unisys/visornic/visornic_main.c index 40dd573e73c3,6fa7726185de..1d1440d43002 --- a/drivers/staging/unisys/visornic/visornic_main.c +++ b/drivers/staging/unisys/visornic/visornic_main.c @@@ -284,9 -284,9 +284,9 @@@ static int visor_copy_fragsinfo_from_sk for (frag = 0; frag < numfrags; frag++) { count = add_physinfo_entries(page_to_pfn( skb_frag_page(&skb_shinfo(skb)->frags[frag])), - skb_shinfo(skb)->frags[frag].page_offset, - skb_shinfo(skb)->frags[frag].size, count, - frags_max, frags); + skb_frag_off(&skb_shinfo(skb)->frags[frag]), + skb_frag_size(&skb_shinfo(skb)->frags[frag]), + count, frags_max, frags); /* add_physinfo_entries only returns * zero if the frags array is out of room * That should never happen because we @@@ -1750,8 -1750,7 +1750,8 @@@ static int visornic_poll(struct napi_st }
/* poll_for_irq - checks the status of the response queue - * @v: Void pointer to the visronic devdata struct. + * @t: pointer to the 'struct timer_list' from which we can retrieve the + * the visornic devdata struct. * * Main function of the vnic_incoming thread. Periodically check the response * queue and drain it if needed. diff --combined include/linux/mlx5/mlx5_ifc.h index b8b570c30b5e,da5e7eaed438..a66ed0abe40e --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@@ -1040,6 -1040,21 +1040,21 @@@ enum MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, };
+ #define MLX5_FC_BULK_SIZE_FACTOR 128 + + enum mlx5_fc_bulk_alloc_bitmask { + MLX5_FC_BULK_128 = (1 << 0), + MLX5_FC_BULK_256 = (1 << 1), + MLX5_FC_BULK_512 = (1 << 2), + MLX5_FC_BULK_1024 = (1 << 3), + MLX5_FC_BULK_2048 = (1 << 4), + MLX5_FC_BULK_4096 = (1 << 5), + MLX5_FC_BULK_8192 = (1 << 6), + MLX5_FC_BULK_16384 = (1 << 7), + }; + + #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; @@@ -1244,7 -1259,8 +1259,8 @@@ u8 reserved_at_2e0[0x7]; u8 max_qp_mcg[0x19];
- u8 reserved_at_300[0x18]; + u8 reserved_at_300[0x10]; + u8 flow_counter_bulk_alloc[0x8]; u8 log_max_mcg[0x8];
u8 reserved_at_320[0x3]; @@@ -2766,7 -2782,7 +2782,7 @@@ struct mlx5_ifc_traffic_counter_bits struct mlx5_ifc_tisc_bits { u8 strict_lag_tx_port_affinity[0x1]; u8 tls_en[0x1]; - u8 reserved_at_1[0x2]; + u8 reserved_at_2[0x2]; u8 lag_tx_port_affinity[0x04];
u8 reserved_at_8[0x4]; @@@ -2941,6 -2957,13 +2957,13 @@@ enum SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3, };
+ enum { + ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, + ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, + ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, + }; + struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; @@@ -7817,7 -7840,8 +7840,8 @@@ struct mlx5_ifc_alloc_flow_counter_in_b u8 reserved_at_20[0x10]; u8 op_mod[0x10];
- u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x38]; + u8 flow_counter_bulk[0x8]; };
struct mlx5_ifc_add_vxlan_udp_dport_out_bits { @@@ -10054,8 -10078,9 +10078,8 @@@ struct mlx5_ifc_tls_static_params_bits };
struct mlx5_ifc_tls_progress_params_bits { - u8 valid[0x1]; - u8 reserved_at_1[0x7]; - u8 pd[0x18]; + u8 reserved_at_0[0x8]; + u8 tisn[0x18];
u8 next_record_tcp_sn[0x20];
diff --combined include/linux/skbuff.h index ba5583522d24,3aef8d82ea59..bff9a62e881e --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@@ -14,6 -14,7 +14,7 @@@ #include <linux/compiler.h> #include <linux/time.h> #include <linux/bug.h> + #include <linux/bvec.h> #include <linux/cache.h> #include <linux/rbtree.h> #include <linux/socket.h> @@@ -308,58 -309,45 +309,45 @@@ extern int sysctl_max_skb_frags */ #define GSO_BY_FRAGS 0xFFFF
- typedef struct skb_frag_struct skb_frag_t; - - struct skb_frag_struct { - struct { - struct page *p; - } page; - #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; - __u32 size; - #else - __u16 page_offset; - __u16 size; - #endif - }; + typedef struct bio_vec skb_frag_t;
/** - * skb_frag_size - Returns the size of a skb fragment + * skb_frag_size() - Returns the size of a skb fragment * @frag: skb fragment */ static inline unsigned int skb_frag_size(const skb_frag_t *frag) { - return frag->size; + return frag->bv_len; }
/** - * skb_frag_size_set - Sets the size of a skb fragment + * skb_frag_size_set() - Sets the size of a skb fragment * @frag: skb fragment * @size: size of fragment */ static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) { - frag->size = size; + frag->bv_len = size; }
/** - * skb_frag_size_add - Incrementes the size of a skb fragment by %delta + * skb_frag_size_add() - Increments the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to add */ static inline void skb_frag_size_add(skb_frag_t *frag, int delta) { - frag->size += delta; + frag->bv_len += delta; }
/** - * skb_frag_size_sub - Decrements the size of a skb fragment by %delta + * skb_frag_size_sub() - Decrements the size of a skb fragment by @delta * @frag: skb fragment * @delta: value to subtract */ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) { - frag->size -= delta; + frag->bv_len -= delta; }
/** @@@ -379,7 -367,7 +367,7 @@@ static inline bool skb_frag_must_loop(s * skb_frag_foreach_page - loop over pages in a fragment * * @f: skb frag to operate on - * @f_off: offset from start of f->page.p + * @f_off: offset from start of f->bv_page * @f_len: length from f_off to loop over * @p: (temp var) current page * @p_off: (temp var) offset from start of current page, @@@ -1374,14 -1362,6 +1362,14 @@@ static inline void skb_copy_hash(struc to->l4_hash = from->l4_hash; };
+static inline void skb_copy_decrypted(struct sk_buff *to, + const struct sk_buff *from) +{ +#ifdef CONFIG_TLS_DEVICE + to->decrypted = from->decrypted; +#endif +} + #ifdef NET_SKBUFF_DATA_USES_OFFSET static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { @@@ -2097,8 -2077,8 +2085,8 @@@ static inline void __skb_fill_page_desc * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). */ - frag->page.p = page; - frag->page_offset = off; + frag->bv_page = page; + frag->bv_offset = off; skb_frag_size_set(frag, size);
page = compound_head(page); @@@ -2878,6 -2858,46 +2866,46 @@@ static inline void skb_propagate_pfmema }
/** + * skb_frag_off() - Returns the offset of a skb fragment + * @frag: the paged fragment + */ + static inline unsigned int skb_frag_off(const skb_frag_t *frag) + { + return frag->bv_offset; + } + + /** + * skb_frag_off_add() - Increments the offset of a skb fragment by @delta + * @frag: skb fragment + * @delta: value to add + */ + static inline void skb_frag_off_add(skb_frag_t *frag, int delta) + { + frag->bv_offset += delta; + } + + /** + * skb_frag_off_set() - Sets the offset of a skb fragment + * @frag: skb fragment + * @offset: offset of fragment + */ + static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset) + { + frag->bv_offset = offset; + } + + /** + * skb_frag_off_copy() - Sets the offset of a skb fragment from another fragment + * @fragto: skb fragment where offset is set + * @fragfrom: skb fragment offset is copied from + */ + static inline void skb_frag_off_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) + { + fragto->bv_offset = fragfrom->bv_offset; + } + + /** * skb_frag_page - retrieve the page referred to by a paged fragment * @frag: the paged fragment * @@@ -2885,7 -2905,7 +2913,7 @@@ */ static inline struct page *skb_frag_page(const skb_frag_t *frag) { - return frag->page.p; + return frag->bv_page; }
/** @@@ -2943,7 -2963,7 +2971,7 @@@ static inline void skb_frag_unref(struc */ static inline void *skb_frag_address(const skb_frag_t *frag) { - return page_address(skb_frag_page(frag)) + frag->page_offset; + return page_address(skb_frag_page(frag)) + skb_frag_off(frag); }
/** @@@ -2959,7 -2979,18 +2987,18 @@@ static inline void *skb_frag_address_sa if (unlikely(!ptr)) return NULL;
- return ptr + frag->page_offset; + return ptr + skb_frag_off(frag); + } + + /** + * skb_frag_page_copy() - sets the page in a fragment from another fragment + * @fragto: skb fragment where page is set + * @fragfrom: skb fragment page is copied from + */ + static inline void skb_frag_page_copy(skb_frag_t *fragto, + const skb_frag_t *fragfrom) + { + fragto->bv_page = fragfrom->bv_page; }
/** @@@ -2971,7 -3002,7 +3010,7 @@@ */ static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) { - frag->page.p = page; + frag->bv_page = page; }
/** @@@ -3007,7 -3038,7 +3046,7 @@@ static inline dma_addr_t skb_frag_dma_m enum dma_data_direction dir) { return dma_map_page(dev, skb_frag_page(frag), - frag->page_offset + offset, size, dir); + skb_frag_off(frag) + offset, size, dir); }
static inline struct sk_buff *pskb_copy(struct sk_buff *skb, @@@ -3174,10 -3205,10 +3213,10 @@@ static inline bool skb_can_coalesce(str if (skb_zcopy(skb)) return false; if (i) { - const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
return page == skb_frag_page(frag) && - off == frag->page_offset + skb_frag_size(frag); + off == skb_frag_off(frag) + skb_frag_size(frag); } return false; } diff --combined net/ipv4/tcp.c index 77b485d60b9d,f8fa1686f7f3..051ef10374f6 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -984,9 -984,6 +984,9 @@@ new_segment if (!skb) goto wait_for_memory;
+#ifdef CONFIG_TLS_DEVICE + skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); +#endif skb_entail(sk, skb); copy = size_goal; } @@@ -1165,7 -1162,7 +1165,7 @@@ int tcp_sendmsg_locked(struct sock *sk struct sockcm_cookie sockc; int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; - bool process_backlog = false; + int process_backlog = 0; bool zc = false; long timeo;
@@@ -1257,9 -1254,10 +1257,10 @@@ new_segment if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf;
- if (process_backlog && sk_flush_backlog(sk)) { - process_backlog = false; - goto restart; + if (unlikely(process_backlog >= 16)) { + process_backlog = 0; + if (sk_flush_backlog(sk)) + goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, @@@ -1267,7 -1265,7 +1268,7 @@@ if (!skb) goto wait_for_memory;
- process_backlog = true; + process_backlog++; skb->ip_summed = CHECKSUM_PARTIAL;
skb_entail(sk, skb); @@@ -1779,19 -1777,21 +1780,21 @@@ static int tcp_zerocopy_receive(struct break; frags = skb_shinfo(skb)->frags; while (offset) { - if (frags->size > offset) + if (skb_frag_size(frags) > offset) goto out; - offset -= frags->size; + offset -= skb_frag_size(frags); frags++; } } - if (frags->size != PAGE_SIZE || frags->page_offset) { + if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) { int remaining = zc->recv_skip_hint; + int size = skb_frag_size(frags);
- while (remaining && (frags->size != PAGE_SIZE || - frags->page_offset)) { - remaining -= frags->size; + while (remaining && (size != PAGE_SIZE || + skb_frag_off(frags))) { + remaining -= size; frags++; + size = skb_frag_size(frags); } zc->recv_skip_hint -= remaining; break; @@@ -3784,8 -3784,8 +3787,8 @@@ int tcp_md5_hash_skb_data(struct tcp_md return 1;
for (i = 0; i < shi->nr_frags; ++i) { - const struct skb_frag_struct *f = &shi->frags[i]; - unsigned int offset = f->page_offset; + const skb_frag_t *f = &shi->frags[i]; + unsigned int offset = skb_frag_off(f); struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
sg_set_page(&sg, page, skb_frag_size(f), diff --combined net/ipv4/tcp_output.c index 979520e46e33,e6d02e05bb1c..5c46bc4c7e8d --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -1320,7 -1320,6 +1320,7 @@@ int tcp_fragment(struct sock *sk, enum buff = sk_stream_alloc_skb(sk, nsize, gfp, true); if (!buff) return -ENOMEM; /* We'll just try again later. */ + skb_copy_decrypted(buff, skb);
sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); @@@ -1403,7 -1402,7 +1403,7 @@@ static int __pskb_trim_head(struct sk_b } else { shinfo->frags[k] = shinfo->frags[i]; if (eat) { - shinfo->frags[k].page_offset += eat; + skb_frag_off_add(&shinfo->frags[k], eat); skb_frag_size_sub(&shinfo->frags[k], eat); eat = 0; } @@@ -1875,7 -1874,6 +1875,7 @@@ static int tso_fragment(struct sock *sk buff = sk_stream_alloc_skb(sk, 0, gfp, true); if (unlikely(!buff)) return -ENOMEM; + skb_copy_decrypted(buff, skb);
sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); @@@ -2145,7 -2143,6 +2145,7 @@@ static int tcp_mtu_probe(struct sock *s sk_mem_charge(sk, nskb->truesize);
skb = tcp_send_head(sk); + skb_copy_decrypted(nskb, skb);
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; diff --combined net/sched/sch_taprio.c index e25d414ae12f,046fd2c102b4..540bde009ea5 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@@ -677,10 -677,6 +677,6 @@@ static const struct nla_policy entry_po [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, };
- static const struct nla_policy entry_list_policy[TCA_TAPRIO_SCHED_MAX + 1] = { - [TCA_TAPRIO_SCHED_ENTRY] = { .type = NLA_NESTED }, - }; - static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_PRIOMAP] = { .len = sizeof(struct tc_mqprio_qopt) @@@ -1195,8 -1191,7 +1191,8 @@@ unlock spin_unlock_bh(qdisc_lock(sch));
free_sched: - kfree(new_admin); + if (new_admin) + call_rcu(&new_admin->rcu, taprio_free_sched_cb);
return err; } diff --combined net/tls/tls_device.c index 43922d86e510,d184230665eb..a470df7ffcf9 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@@ -243,14 -243,14 +243,14 @@@ static void tls_append_frag(struct tls_ skb_frag_t *frag;
frag = &record->frags[record->num_frags - 1]; - if (frag->page.p == pfrag->page && - frag->page_offset + frag->size == pfrag->offset) { - frag->size += size; + if (skb_frag_page(frag) == pfrag->page && + skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) { + skb_frag_size_add(frag, size); } else { ++frag; - frag->page.p = pfrag->page; - frag->page_offset = pfrag->offset; - frag->size = size; + __skb_frag_set_page(frag, pfrag->page); + skb_frag_off_set(frag, pfrag->offset); + skb_frag_size_set(frag, size); ++record->num_frags; get_page(pfrag->page); } @@@ -301,8 -301,8 +301,8 @@@ static int tls_push_record(struct sock frag = &record->frags[i]; sg_unmark_end(&offload_ctx->sg_tx_data[i]); sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag), - frag->size, frag->page_offset); - sk_mem_charge(sk, frag->size); + skb_frag_size(frag), skb_frag_off(frag)); + sk_mem_charge(sk, skb_frag_size(frag)); get_page(skb_frag_page(frag)); } sg_mark_end(&offload_ctx->sg_tx_data[record->num_frags - 1]); @@@ -324,7 -324,7 +324,7 @@@ static int tls_create_new_record(struc
frag = &record->frags[0]; __skb_frag_set_page(frag, pfrag->page); - frag->page_offset = pfrag->offset; + skb_frag_off_set(frag, pfrag->offset); skb_frag_size_set(frag, prepend_size);
get_page(pfrag->page); @@@ -373,9 -373,9 +373,9 @@@ static int tls_push_data(struct sock *s struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_prot_info *prot = &tls_ctx->prot_info; struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx); - int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE); struct tls_record_info *record = ctx->open_record; + int tls_push_record_flags; struct page_frag *pfrag; size_t orig_size = size; u32 max_open_record_len; @@@ -390,9 -390,6 +390,9 @@@ if (sk->sk_err) return -sk->sk_err;
+ flags |= MSG_SENDPAGE_DECRYPTED; + tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST; + timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if (tls_is_partially_sent_record(tls_ctx)) { rc = tls_push_partial_record(sk, tls_ctx, flags); @@@ -579,9 -576,7 +579,9 @@@ void tls_device_write_space(struct soc gfp_t sk_allocation = sk->sk_allocation;
sk->sk_allocation = GFP_ATOMIC; - tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL); + tls_push_partial_record(sk, ctx, + MSG_DONTWAIT | MSG_NOSIGNAL | + MSG_SENDPAGE_DECRYPTED); sk->sk_allocation = sk_allocation; } }
linux-merge@lists.open-mesh.org