The following commit has been merged in the master branch: commit c30eb0084783beafe602cbcf368367d2e4f24d17 Merge: 41cc40eee37048f315a5fa8e7b2df2ea5810a256 f25c0515c521375154c62c72447869f40218c861 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Thu Oct 28 11:54:18 2021 +1100
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
diff --combined Documentation/devicetree/bindings/vendor-prefixes.yaml index fe9615089acb,e2eb0c738f3a..a348fccf3d0a --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@@ -131,6 -131,8 +131,8 @@@ patternProperties description: Asahi Kasei Corp. "^asc,.*": description: All Sensors Corporation + "^asix,.*": + description: ASIX Electronics Corporation "^aspeed,.*": description: ASPEED Technology Inc. "^asus,.*": @@@ -191,8 -193,6 +193,8 @@@ description: B&R Industrial Automation GmbH "^bticino,.*": description: Bticino International + "^calamp,.*": + description: CalAmp Corp. "^calaosystems,.*": description: CALAO Systems SAS "^calxeda,.*": @@@ -337,8 -337,6 +339,8 @@@ description: EBV Elektronik "^eckelmann,.*": description: Eckelmann AG + "^edimax,.*": + description: EDIMAX Technology Co., Ltd "^edt,.*": description: Emerging Display Technologies "^eeti,.*": @@@ -357,8 -355,6 +359,8 @@@ description: Shenzhen Elida Technology Co., Ltd. "^elimo,.*": description: Elimo Engineering Ltd. + "^elpida,.*": + description: Elpida Memory, Inc. "^embest,.*": description: Shenzhen Embest Technology Co., Ltd. "^emlid,.*": @@@ -401,8 -397,6 +403,8 @@@ description: Exar Corporation "^excito,.*": description: Excito + "^exegin,.*": + description: Exegin Technologies Limited "^ezchip,.*": description: EZchip Semiconductor "^facebook,.*": @@@ -517,8 -511,6 +519,8 @@@ description: Hycon Technology Corp. "^hydis,.*": description: Hydis Technologies + "^hynix,.*": + description: SK Hynix Inc. "^hyundai,.*": description: Hyundai Technology "^i2se,.*": @@@ -587,8 -579,6 +589,8 @@@ description: JEDEC Solid State Technology Association "^jesurun,.*": description: Shenzhen Jesurun Electronics Business Dept. + "^jethome,.*": + description: JetHome (IP Sokolov P.A.) "^jianda,.*": description: Jiandangjing Technology Co., Ltd. "^kam,.*": @@@ -1114,8 -1104,6 +1116,8 @@@ description: SparkFun Electronics "^sprd,.*": description: Spreadtrum Communications Inc. + "^ssi,.*": + description: SSI Computer Corp "^sst,.*": description: Silicon Storage Technology, Inc. "^sstar,.*": diff --combined MAINTAINERS index 99e5c7702cca,975086c5345d..524ef8e72339 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -334,7 -334,7 +334,7 @@@ F: drivers/platform/x86/acer-wmi.
ACPI M: "Rafael J. Wysocki" rafael@kernel.org -M: Len Brown lenb@kernel.org +R: Len Brown lenb@kernel.org L: linux-acpi@vger.kernel.org S: Supported W: https://01.org/linux-acpi @@@ -355,7 -355,7 +355,7 @@@ F: tools/power/acpi
ACPI APEI M: "Rafael J. Wysocki" rafael@kernel.org -M: Len Brown lenb@kernel.org +R: Len Brown lenb@kernel.org R: James Morse james.morse@arm.com R: Tony Luck tony.luck@intel.com R: Borislav Petkov bp@alien8.de @@@ -378,6 -378,14 +378,6 @@@ F: drivers/acpi/acpica F: include/acpi/ F: tools/power/acpi/
-ACPI FAN DRIVER -M: Zhang Rui rui.zhang@intel.com -L: linux-acpi@vger.kernel.org -S: Supported -W: https://01.org/linux-acpi -B: https://bugzilla.kernel.org -F: drivers/acpi/fan.c - ACPI FOR ARM64 (ACPI/arm64) M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com M: Hanjun Guo guohanjun@huawei.com @@@ -414,6 -422,14 +414,6 @@@ W: https://01.org/linux-acp B: https://bugzilla.kernel.org F: drivers/acpi/*thermal*
-ACPI VIDEO DRIVER -M: Zhang Rui rui.zhang@intel.com -L: linux-acpi@vger.kernel.org -S: Supported -W: https://01.org/linux-acpi -B: https://bugzilla.kernel.org -F: drivers/acpi/acpi_video.c - ACPI VIOT DRIVER M: Jean-Philippe Brucker jean-philippe@linaro.org L: linux-acpi@vger.kernel.org @@@ -1266,13 -1282,6 +1266,13 @@@ S: Maintaine F: Documentation/devicetree/bindings/iommu/apple,dart.yaml F: drivers/iommu/apple-dart.c
+APPLE PCIE CONTROLLER DRIVER +M: Alyssa Rosenzweig alyssa@rosenzweig.io +M: Marc Zyngier maz@kernel.org +L: linux-pci@vger.kernel.org +S: Maintained +F: drivers/pci/controller/pcie-apple.c + APPLE SMC DRIVER M: Henrik Rydberg rydberg@bitmath.org L: linux-hwmon@vger.kernel.org @@@ -1713,14 -1722,10 +1713,14 @@@ B: https://github.com/AsahiLinux/linux/ C: irc://irc.oftc.net/asahi-dev T: git https://github.com/AsahiLinux/linux.git F: Documentation/devicetree/bindings/arm/apple.yaml +F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml F: Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml F: arch/arm64/boot/dts/apple/ +F: drivers/i2c/busses/i2c-pasemi-core.c +F: drivers/i2c/busses/i2c-pasemi-platform.c F: drivers/irqchip/irq-apple-aic.c +F: drivers/pinctrl/pinctrl-apple-gpio.c F: include/dt-bindings/interrupt-controller/apple-aic.h F: include/dt-bindings/pinctrl/apple.h
@@@ -2307,14 -2312,6 +2307,14 @@@ F: arch/arm/boot/dts/nuvoton-wpcm450 F: arch/arm/mach-npcm/wpcm450.c F: drivers/*/*wpcm*
+ARM/NXP S32G ARCHITECTURE +M: Chester Lin clin@suse.com +R: Andreas F��rber afaerber@suse.de +R: Matthias Brugger mbrugger@suse.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm64/boot/dts/freescale/s32g*.dts* + ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT L: openmoko-kernel@lists.openmoko.org (subscribers-only) S: Orphan @@@ -2902,6 -2899,12 +2902,12 @@@ S: Maintaine F: Documentation/hwmon/asc7621.rst F: drivers/hwmon/asc7621.c
+ ASIX AX88796C SPI ETHERNET ADAPTER + M: ��ukasz Stelmach l.stelmach@samsung.com + S: Maintained + F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml + F: drivers/net/ethernet/asix/ax88796c_* + ASPEED PINCTRL DRIVERS M: Andrew Jeffery andrew@aj.id.au L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers) @@@ -4439,7 -4442,7 +4445,7 @@@ N: cros_e N: cros-ec
CHRONTEL CH7322 CEC DRIVER -M: Jeff Chase jnchase@google.com +M: Joe Tessler jrt@google.com L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git @@@ -5461,19 -5464,6 +5467,19 @@@ F: include/net/devlink. F: include/uapi/linux/devlink.h F: net/core/devlink.c
+DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT +M: Christoph Niedermaier cniedermaier@dh-electronics.com +L: kernel@dh-electronics.com +S: Maintained +F: arch/arm/boot/dts/imx6*-dhcom-* + +DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT +M: Marek Vasut marex@denx.de +L: kernel@dh-electronics.com +S: Maintained +F: arch/arm/boot/dts/stm32mp1*-dhcom-* +F: arch/arm/boot/dts/stm32mp1*-dhcor-* + DIALOG SEMICONDUCTOR DRIVERS M: Support Opensource support.opensource@diasemi.com S: Supported @@@ -6308,7 -6298,6 +6314,7 @@@ L: linux-tegra@vger.kernel.or S: Supported T: git git://anongit.freedesktop.org/tegra/linux.git F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt +F: Documentation/devicetree/bindings/gpu/host1x/ F: drivers/gpu/drm/tegra/ F: drivers/gpu/host1x/ F: include/linux/host1x.h @@@ -7029,7 -7018,6 +7035,6 @@@ F: drivers/net/mdio/fwnode_mdio. F: drivers/net/mdio/of_mdio.c F: drivers/net/pcs/ F: drivers/net/phy/ - F: drivers/of/of_net.c F: include/dt-bindings/net/qca-ar803x.h F: include/linux/*mdio*.h F: include/linux/mdio/*.h @@@ -7041,6 -7029,7 +7046,7 @@@ F: include/linux/platform_data/mdio-gpi F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h + F: net/core/of_net.c
EXFAT FILE SYSTEM M: Namjae Jeon linkinjeon@kernel.org @@@ -8215,7 -8204,7 +8221,7 @@@ T: git git://linuxtv.org/anttip/media_t F: drivers/media/usb/hackrf/
HANTRO VPU CODEC DRIVER -M: Ezequiel Garcia ezequiel@collabora.com +M: Ezequiel Garcia ezequiel@vanguardiasur.com.ar M: Philipp Zabel p.zabel@pengutronix.de L: linux-media@vger.kernel.org L: linux-rockchip@lists.infradead.org @@@ -8683,12 -8672,6 +8689,12 @@@ S: Maintaine T: git git://linuxtv.org/media_tree.git F: drivers/media/i2c/hi556.c
+HYNIX HI846 SENSOR DRIVER +M: Martin Kepplinger martin.kepplinger@puri.sm +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/hi846.c + Hyper-V/Azure CORE AND DRIVERS M: "K. Y. Srinivasan" kys@microsoft.com M: Haiyang Zhang haiyangz@microsoft.com @@@ -10024,7 -10007,6 +10030,7 @@@ JC42.4 TEMPERATURE SENSOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/hwmon/jedec,jc42.yaml F: Documentation/hwmon/jc42.rst F: drivers/hwmon/jc42.c
@@@ -10064,7 -10046,6 +10070,7 @@@ F: include/linux/jbd2. JPU V4L2 MEM2MEM DRIVER FOR RENESAS M: Mikhail Ulyanov mikhail.ulyanov@cogentembedded.com L: linux-media@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org S: Maintained F: drivers/media/platform/rcar_jpu.c
@@@ -10919,7 -10900,7 +10925,7 @@@ LM90 HARDWARE MONITOR DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/hwmon/lm90.txt +F: Documentation/devicetree/bindings/hwmon/national,lm90.yaml F: Documentation/hwmon/lm90.rst F: drivers/hwmon/lm90.c F: include/dt-bindings/thermal/lm90.h @@@ -11303,6 -11284,7 +11309,6 @@@ F: Documentation/networking/device_driv F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL PRESTERA ETHERNET SWITCH DRIVER -M: Vadym Kochan vkochan@marvell.com M: Taras Chornyi tchornyi@marvell.com S: Supported W: https://github.com/Marvell-switching/switchdev-prestera @@@ -11703,7 -11685,6 +11709,7 @@@ T: git git://linuxtv.org/media_tree.gi F: Documentation/devicetree/bindings/media/renesas,csi2.yaml F: Documentation/devicetree/bindings/media/renesas,isp.yaml F: Documentation/devicetree/bindings/media/renesas,vin.yaml +F: drivers/media/platform/rcar-isp.c F: drivers/media/platform/rcar-vin/
MEDIA DRIVERS FOR RENESAS - VSP1 @@@ -11849,7 -11830,9 +11855,9 @@@ F: drivers/mmc/host/mtk-sd. MEDIATEK MT76 WIRELESS LAN DRIVER M: Felix Fietkau nbd@nbd.name M: Lorenzo Bianconi lorenzo.bianconi83@gmail.com - R: Ryder Lee ryder.lee@mediatek.com + M: Ryder Lee ryder.lee@mediatek.com + R: Shayne Chen shayne.chen@mediatek.com + R: Sean Wang sean.wang@mediatek.com L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mediatek/mt76/ @@@ -11873,12 -11856,6 +11881,12 @@@ S: Maintaine F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt F: drivers/i2c/busses/i2c-mt7621.c
+MEDIATEK MT7621 PCIE CONTROLLER DRIVER +M: Sergio Paracuellos sergio.paracuellos@gmail.com +S: Maintained +F: Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml +F: drivers/pci/controller/pcie-mt7621.c + MEDIATEK MT7621 PHY PCI DRIVER M: Sergio Paracuellos sergio.paracuellos@gmail.com S: Maintained @@@ -11902,14 -11879,6 +11910,14 @@@ M: Sean Wang <sean.wang@mediatek.com S: Maintained F: drivers/char/hw_random/mtk-rng.c
+MEDIATEK SMI DRIVER +M: Yong Wu yong.wu@mediatek.com +L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: Documentation/devicetree/bindings/memory-controllers/mediatek,smi* +F: drivers/memory/mtk-smi.c +F: include/soc/mediatek/smi.h + MEDIATEK SWITCH DRIVER M: Sean Wang sean.wang@mediatek.com M: Landen Chao Landen.Chao@mediatek.com @@@ -12793,7 -12762,6 +12801,7 @@@ M: Laurent Pinchart <laurent.pinchart@i L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git +F: Documentation/devicetree/bindings/media/i2c/aptina,mt9p031.yaml F: drivers/media/i2c/mt9p031.c F: include/media/i2c/mt9p031.h
@@@ -13088,6 -13056,7 +13096,7 @@@ F: include/linux/dsa F: include/linux/platform_data/dsa.h F: include/net/dsa.h F: net/dsa/ + F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL] M: "David S. Miller" davem@davemloft.net @@@ -13300,12 -13269,6 +13309,12 @@@ W: http://www.netlab.is.tsukuba.ac.jp/~ F: Documentation/scsi/NinjaSCSI.rst F: drivers/scsi/nsp32*
+NINTENDO HID DRIVER +M: Daniel J. Ogorchock djogorchock@gmail.com +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/hid-nintendo* + NIOS2 ARCHITECTURE M: Dinh Nguyen dinguyen@kernel.org S: Maintained @@@ -13852,13 -13815,6 +13861,13 @@@ S: Maintaine T: git git://linuxtv.org/media_tree.git F: drivers/media/i2c/ov13858.c
+OMNIVISION OV13B10 SENSOR DRIVER +M: Arec Kao arec.kao@intel.com +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: drivers/media/i2c/ov13b10.c + OMNIVISION OV2680 SENSOR DRIVER M: Rui Miguel Silva rmfrfs@gmail.com L: linux-media@vger.kernel.org @@@ -14655,15 -14611,7 +14664,15 @@@ M: Stanimir Varbanov <svarbanov@mm-sol. L: linux-pci@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained -F: drivers/pci/controller/dwc/*qcom* +F: drivers/pci/controller/dwc/pcie-qcom.c + +PCIE ENDPOINT DRIVER FOR QUALCOMM +M: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org +L: linux-pci@vger.kernel.org +L: linux-arm-msm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml +F: drivers/pci/controller/dwc/pcie-qcom-ep.c
PCIE DRIVER FOR ROCKCHIP M: Shawn Lin shawn.lin@rock-chips.com @@@ -14920,6 -14868,13 +14929,6 @@@ L: linux-omap@vger.kernel.or S: Maintained F: drivers/pinctrl/pinctrl-single.c
-PIN CONTROLLER - ST SPEAR -M: Viresh Kumar vireshk@kernel.org -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained -W: http://www.st.com/spear -F: drivers/pinctrl/spear/ - PKTCDVD DRIVER M: linux-block@vger.kernel.org S: Orphan @@@ -14968,6 -14923,7 +14977,6 @@@ S: Maintaine W: http://hwmon.wiki.kernel.org/ W: http://www.roeck-us.net/linux/drivers/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git -F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt F: Documentation/devicetree/bindings/hwmon/ltc2978.txt F: Documentation/devicetree/bindings/hwmon/max31785.txt F: Documentation/hwmon/adm1275.rst @@@ -15516,6 -15472,7 +15525,7 @@@ M: ath9k-devel@qca.qualcomm.co L: linux-wireless@vger.kernel.org S: Supported W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k + F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml F: drivers/net/wireless/ath/ath9k/
QUALCOMM CAMERA SUBSYSTEM DRIVER @@@ -15937,6 -15894,12 +15947,12 @@@ L: linux-wireless@vger.kernel.or S: Maintained F: drivers/net/wireless/realtek/rtw88/
+ REALTEK WIRELESS DRIVER (rtw89) + M: Ping-Ke Shih pkshih@realtek.com + L: linux-wireless@vger.kernel.org + S: Maintained + F: drivers/net/wireless/realtek/rtw89/ + REDPINE WIRELESS DRIVER M: Amitkumar Karwar amitkarwar@gmail.com M: Siva Rebbagondla siva8118@gmail.com @@@ -16171,7 -16134,7 +16187,7 @@@ F: include/uapi/linux/rkisp1-config.
ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER M: Jacob Chen jacob-chen@iotwrt.com -M: Ezequiel Garcia ezequiel@collabora.com +M: Ezequiel Garcia ezequiel@vanguardiasur.com.ar L: linux-media@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained @@@ -16179,7 -16142,7 +16195,7 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/platform/rockchip/rga/
ROCKCHIP VIDEO DECODER DRIVER -M: Ezequiel Garcia ezequiel@collabora.com +M: Ezequiel Garcia ezequiel@vanguardiasur.com.ar L: linux-media@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained @@@ -17684,17 -17647,21 +17700,17 @@@ W: https://github.com/linux-speakup/spe B: https://github.com/linux-speakup/speakup/issues F: drivers/accessibility/speakup/
-SPEAR CLOCK FRAMEWORK SUPPORT -M: Viresh Kumar vireshk@kernel.org -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained -W: http://www.st.com/spear -F: drivers/clk/spear/ - -SPEAR PLATFORM SUPPORT +SPEAR PLATFORM/CLOCK/PINCTRL SUPPORT M: Viresh Kumar vireshk@kernel.org M: Shiraz Hashim shiraz.linux.kernel@gmail.com +M: soc@kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained W: http://www.st.com/spear F: arch/arm/boot/dts/spear* F: arch/arm/mach-spear/ +F: drivers/clk/spear/ +F: drivers/pinctrl/spear/
SPI NOR SUBSYSTEM M: Tudor Ambarus tudor.ambarus@microchip.com @@@ -18611,9 -18578,7 +18627,9 @@@ L: linux-pm@vger.kernel.or S: Supported Q: https://patchwork.kernel.org/project/linux-pm/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal +F: Documentation/ABI/testing/sysfs-class-thermal F: Documentation/devicetree/bindings/thermal/ +F: Documentation/driver-api/thermal/ F: drivers/thermal/ F: include/linux/cpu_cooling.h F: include/linux/thermal.h @@@ -20387,7 -20352,6 +20403,7 @@@ X86 ARCHITECTURE (32-BIT AND 64-BIT M: Thomas Gleixner tglx@linutronix.de M: Ingo Molnar mingo@redhat.com M: Borislav Petkov bp@alien8.de +M: Dave Hansen dave.hansen@linux.intel.com M: x86@kernel.org R: "H. Peter Anvin" hpa@zytor.com L: linux-kernel@vger.kernel.org diff --combined drivers/infiniband/hw/mlx4/main.c index 38742e233915,f3fa2fe6a88a..ceca05982f61 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@@ -2105,10 -2105,10 +2105,10 @@@ mlx4_ib_alloc_hw_device_stats(struct ib struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[0].name) + if (!diag[0].descs) return NULL;
- return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters, + return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); }
@@@ -2118,10 -2118,10 +2118,10 @@@ mlx4_ib_alloc_hw_port_stats(struct ib_d struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[1].name) + if (!diag[1].descs) return NULL;
- return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters, + return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); }
@@@ -2151,8 -2151,10 +2151,8 @@@ static int mlx4_ib_get_hw_stats(struct }
static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, - const char ***name, - u32 **offset, - u32 *num, - bool port) + struct rdma_stat_desc **pdescs, + u32 **offset, u32 *num, bool port) { u32 num_counters;
@@@ -2164,46 -2166,46 +2164,46 @@@ if (!port) num_counters += ARRAY_SIZE(diag_device_only);
- *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL); - if (!*name) + *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc), + GFP_KERNEL); + if (!*pdescs) return -ENOMEM;
*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL); if (!*offset) - goto err_name; + goto err;
*num = num_counters;
return 0;
-err_name: - kfree(*name); +err: + kfree(*pdescs); return -ENOMEM; }
static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, - const char **name, - u32 *offset, - bool port) + struct rdma_stat_desc *descs, + u32 *offset, bool port) { int i; int j;
for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) { - name[i] = diag_basic[i].name; + descs[i].name = diag_basic[i].name; offset[i] = diag_basic[i].offset; }
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) { - name[j] = diag_ext[i].name; + descs[j].name = diag_ext[i].name; offset[j] = diag_ext[i].offset; } }
if (!port) { for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) { - name[j] = diag_device_only[i].name; + descs[j].name = diag_device_only[i].name; offset[j] = diag_device_only[i].offset; } } @@@ -2231,13 -2233,13 +2231,13 @@@ static int mlx4_ib_alloc_diag_counters( if (i && !per_port) continue;
- ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name, + ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs, &diag[i].offset, &diag[i].num_counters, i); if (ret) goto err_alloc;
- mlx4_ib_fill_diag_counters(ibdev, diag[i].name, + mlx4_ib_fill_diag_counters(ibdev, diag[i].descs, diag[i].offset, i); }
@@@ -2247,7 -2249,7 +2247,7 @@@
err_alloc: if (i) { - kfree(diag[i - 1].name); + kfree(diag[i - 1].descs); kfree(diag[i - 1].offset); }
@@@ -2260,7 -2262,7 +2260,7 @@@ static void mlx4_ib_diag_cleanup(struc
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { kfree(ibdev->diag_counters[i].offset); - kfree(ibdev->diag_counters[i].name); + kfree(ibdev->diag_counters[i].descs); } }
@@@ -2273,7 -2275,7 +2273,7 @@@ static void mlx4_ib_update_qps(struct m u64 release_mac = MLX4_IB_INVALID_MAC; struct mlx4_ib_qp *qp;
- new_smac = mlx4_mac_to_u64(dev->dev_addr); + new_smac = ether_addr_to_u64(dev->dev_addr); atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */ diff --combined drivers/infiniband/hw/mlx4/qp.c index 3a1a4ac9dd33,aea4182f33a4..b17d6ebc5b70 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@@ -1099,10 -1099,8 +1099,10 @@@ static int create_qp_common(struct ib_p if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) qp->flags |= MLX4_IB_QP_NETIF; - else + else { + err = -EINVAL; goto err; + } }
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); @@@ -1855,7 -1853,7 +1855,7 @@@ static int mlx4_set_path(struct mlx4_ib u16 vlan_id, u8 *smac) { return _mlx4_set_path(dev, &qp->ah_attr, - mlx4_mac_to_u64(smac), + ether_addr_to_u64(smac), vlan_id, path, &mqp->pri, port); } diff --combined drivers/infiniband/hw/mlx5/odp.c index 31596d8c5212,81147d774dd2..91eb615b89ee --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@@ -430,7 -430,7 +430,7 @@@ static struct mlx5_ib_mr *implicit_get_ mr->umem = &odp->umem; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; - mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE; + mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; mr->parent = imr; odp->private = mr;
@@@ -500,7 -500,7 +500,7 @@@ struct mlx5_ib_mr *mlx5_ib_alloc_implic }
imr->ibmr.pd = &pd->ibpd; - imr->mmkey.iova = 0; + imr->ibmr.iova = 0; imr->umem = &umem_odp->umem; imr->ibmr.lkey = imr->mmkey.key; imr->ibmr.rkey = imr->mmkey.key; @@@ -738,7 -738,7 +738,7 @@@ static int pagefault_mr(struct mlx5_ib_ { struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->mmkey.iova)) + if (unlikely(io_virt < mr->ibmr.iova)) return -EFAULT;
if (mr->umem->is_dmabuf) @@@ -747,7 -747,7 +747,7 @@@ if (!odp->is_implicit_odp) { u64 user_va;
- if (check_add_overflow(io_virt - mr->mmkey.iova, + if (check_add_overflow(io_virt - mr->ibmr.iova, (u64)odp->umem.address, &user_va)) return -EFAULT; if (unlikely(user_va >= ib_umem_end(odp) || @@@ -788,7 -788,7 +788,7 @@@ struct pf_frame int depth; };
-static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key) +static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) { if (!mmkey) return false; @@@ -797,6 -797,21 +797,6 @@@ return mmkey->key == key; }
-static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey) -{ - struct mlx5_ib_mw *mw; - struct mlx5_ib_devx_mr *devx_mr; - - if (mmkey->type == MLX5_MKEY_MW) { - mw = container_of(mmkey, struct mlx5_ib_mw, mmkey); - return mw->ndescs; - } - - devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr, - mmkey); - return devx_mr->ndescs; -} - /* * Handle a single data segment in a page-fault WQE or RDMA region. * @@@ -816,11 -831,12 +816,11 @@@ static int pagefault_single_data_segmen { int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0; struct pf_frame *head = NULL, *frame; - struct mlx5_core_mkey *mmkey; + struct mlx5_ib_mkey *mmkey; struct mlx5_ib_mr *mr; struct mlx5_klm *pklm; u32 *out = NULL; size_t offset; - int ndescs;
io_virt += *bytes_committed; bcnt -= *bytes_committed; @@@ -869,6 -885,8 +869,6 @@@ next_mr
case MLX5_MKEY_MW: case MLX5_MKEY_INDIRECT_DEVX: - ndescs = get_indirect_num_descs(mmkey); - if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) { mlx5_ib_dbg(dev, "indirection level exceeded\n"); ret = -EFAULT; @@@ -876,7 -894,7 +876,7 @@@ }
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) + - sizeof(*pklm) * (ndescs - 2); + sizeof(*pklm) * (mmkey->ndescs - 2);
if (outlen > cur_outlen) { kfree(out); @@@ -891,14 -909,14 +891,14 @@@ pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out, bsf0_klm0_pas_mtt0_1);
- ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen); + ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen); if (ret) goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out, memory_key_mkey_entry.start_addr);
- for (i = 0; bcnt && i < ndescs; i++, pklm++) { + for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) { if (offset >= be32_to_cpu(pklm->bcount)) { offset -= be32_to_cpu(pklm->bcount); continue; @@@ -1541,6 -1559,7 +1541,7 @@@ int mlx5r_odp_create_eq(struct mlx5_ib_
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_IB_NUM_PF_EQE, }; param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; @@@ -1685,31 -1704,25 +1686,31 @@@ get_prefetchable_mr(struct ib_pd *pd, e u32 lkey) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr = NULL; + struct mlx5_ib_mkey *mmkey;
xa_lock(&dev->odp_mkeys); mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey)); - if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR) + if (!mmkey || mmkey->key != lkey) { + mr = ERR_PTR(-ENOENT); goto end; + } + if (mmkey->type != MLX5_MKEY_MR) { + mr = ERR_PTR(-EINVAL); + goto end; + }
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
if (mr->ibmr.pd != pd) { - mr = NULL; + mr = ERR_PTR(-EPERM); goto end; }
/* prefetch with write-access must be supported by the MR */ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && !mr->umem->writable) { - mr = NULL; + mr = ERR_PTR(-EPERM); goto end; }
@@@ -1741,7 -1754,7 +1742,7 @@@ static void mlx5_ib_prefetch_mr_work(st destroy_prefetch_work(work); }
-static bool init_prefetch_work(struct ib_pd *pd, +static int init_prefetch_work(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 pf_flags, struct prefetch_mr_work *work, struct ib_sge *sg_list, u32 num_sge) @@@ -1752,19 -1765,17 +1753,19 @@@ work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) { - work->frags[i].io_virt = sg_list[i].addr; - work->frags[i].length = sg_list[i].length; - work->frags[i].mr = - get_prefetchable_mr(pd, advice, sg_list[i].lkey); - if (!work->frags[i].mr) { + struct mlx5_ib_mr *mr; + + mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); + if (IS_ERR(mr)) { work->num_sge = i; - return false; + return PTR_ERR(mr); } + work->frags[i].io_virt = sg_list[i].addr; + work->frags[i].length = sg_list[i].length; + work->frags[i].mr = mr; } work->num_sge = num_sge; - return true; + return 0; }
static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, @@@ -1780,8 -1791,8 +1781,8 @@@ struct mlx5_ib_mr *mr;
mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); - if (!mr) - return -ENOENT; + if (IS_ERR(mr)) + return PTR_ERR(mr); ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, &bytes_mapped, pf_flags); if (ret < 0) { @@@ -1801,7 -1812,6 +1802,7 @@@ int mlx5_ib_advise_mr_prefetch(struct i { u32 pf_flags = 0; struct prefetch_mr_work *work; + int rc;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH) pf_flags |= MLX5_PF_FLAGS_DOWNGRADE; @@@ -1817,10 -1827,9 +1818,10 @@@ if (!work) return -ENOMEM;
- if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) { + rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge); + if (rc) { destroy_prefetch_work(work); - return -EINVAL; + return rc; } queue_work(system_unbound_wq, &work->work); return 0; diff --combined drivers/infiniband/hw/qedr/main.c index 8b5291d424d2,dc203f3d0f25..65ce6d0f1885 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@@ -228,6 -228,7 +228,6 @@@ static const struct ib_device_ops qedr_ .query_srq = qedr_query_srq, .reg_user_mr = qedr_reg_user_mr, .req_notify_cq = qedr_arm_cq, - .resize_cq = qedr_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq), @@@ -271,7 -272,7 +271,7 @@@ static int qedr_register_device(struct static int qedr_alloc_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; int rc;
diff --combined drivers/net/ethernet/chelsio/cxgb3/common.h index d115ec93296d,a309016f7f8c..ecd025dda8d6 --- a/drivers/net/ethernet/chelsio/cxgb3/common.h +++ b/drivers/net/ethernet/chelsio/cxgb3/common.h @@@ -676,6 -676,8 +676,6 @@@ void t3_link_changed(struct adapter *ad void t3_link_fault(struct adapter *adapter, int port_id); int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); const struct adapter_info *t3_get_adapter_info(unsigned int board_id); -int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data); -int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data); int t3_seeprom_wp(struct adapter *adapter, int enable); int t3_get_tp_version(struct adapter *adapter, u32 *vers); int t3_check_tpsram_version(struct adapter *adapter); @@@ -708,7 -710,7 +708,7 @@@ int t3_mac_enable(struct cmac *mac, in int t3_mac_disable(struct cmac *mac, int which); int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev); - int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); + int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]); int t3_mac_set_num_ucast(struct cmac *mac, int n); const struct mac_stats *t3_mac_update_stats(struct cmac *mac); int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); diff --combined drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index e185f5f24849,9cf9e33664e4..bfffcaeee624 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@@ -2036,16 -2036,20 +2036,16 @@@ static int get_eeprom(struct net_devic { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - int i, err = 0; - - u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; + int cnt;
e->magic = EEPROM_MAGIC; - for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) - err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]); + cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data); + if (cnt < 0) + return cnt;
- if (!err) - memcpy(data, buf + e->offset, e->len); - kfree(buf); - return err; + e->len = cnt; + + return 0; }
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, @@@ -2054,6 -2058,7 +2054,6 @@@ struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; u32 aligned_offset, aligned_len; - __le32 *p; u8 *buf; int err;
@@@ -2067,9 -2072,12 +2067,9 @@@ buf = kmalloc(aligned_len, GFP_KERNEL); if (!buf) return -ENOMEM; - err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf); - if (!err && aligned_len > 4) - err = t3_seeprom_read(adapter, - aligned_offset + aligned_len - 4, - (__le32 *) & buf[aligned_len - 4]); - if (err) + err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len, + buf); + if (err < 0) goto out; memcpy(buf + (eeprom->offset & 3), data, eeprom->len); } else @@@ -2079,13 -2087,17 +2079,13 @@@ if (err) goto out;
- for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) { - err = t3_seeprom_write(adapter, aligned_offset, *p); - aligned_offset += 4; - } - - if (!err) + err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf); + if (err >= 0) err = t3_seeprom_wp(adapter, 1); out: if (buf != data) kfree(buf); - return err; + return err < 0 ? err : 0; }
static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@@ -2574,7 -2586,7 +2574,7 @@@ static int cxgb_set_mac_addr(struct net if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); if (offload_running(adapter)) write_smt_entry(adapter, pi->port_id); diff --combined drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index cb85c2f21525,53feac8da503..da41eee2f25c --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@@ -29,6 -29,7 +29,7 @@@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ + #include <linux/etherdevice.h> #include "common.h" #include "regs.h" #include "sge_defs.h" @@@ -595,9 -596,80 +596,9 @@@ struct t3_vpd u32 pad; /* for multiple-of-4 sizing and alignment */ };
-#define EEPROM_MAX_POLL 40 #define EEPROM_STAT_ADDR 0x4000 #define VPD_BASE 0xc00
-/** - * t3_seeprom_read - read a VPD EEPROM location - * @adapter: adapter to read - * @addr: EEPROM address - * @data: where to store the read data - * - * Read a 32-bit word from a location in VPD EEPROM using the card's PCI - * VPD ROM capability. A zero is written to the flag bit when the - * address is written to the control register. The hardware device will - * set the flag to 1 when 4 bytes have been read into the data register. - */ -int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data) -{ - u16 val; - int attempts = EEPROM_MAX_POLL; - u32 v; - unsigned int base = adapter->params.pci.vpd_cap_addr; - - if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) - return -EINVAL; - - pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr); - do { - udelay(10); - pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); - } while (!(val & PCI_VPD_ADDR_F) && --attempts); - - if (!(val & PCI_VPD_ADDR_F)) { - CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); - return -EIO; - } - pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v); - *data = cpu_to_le32(v); - return 0; -} - -/** - * t3_seeprom_write - write a VPD EEPROM location - * @adapter: adapter to write - * @addr: EEPROM address - * @data: value to write - * - * Write a 32-bit word to a location in VPD EEPROM using the card's PCI - * VPD ROM capability. - */ -int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data) -{ - u16 val; - int attempts = EEPROM_MAX_POLL; - unsigned int base = adapter->params.pci.vpd_cap_addr; - - if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) - return -EINVAL; - - pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA, - le32_to_cpu(data)); - pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR, - addr | PCI_VPD_ADDR_F); - do { - msleep(1); - pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); - } while ((val & PCI_VPD_ADDR_F) && --attempts); - - if (val & PCI_VPD_ADDR_F) { - CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); - return -EIO; - } - return 0; -} - /** * t3_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter @@@ -607,14 -679,7 +608,14 @@@ */ int t3_seeprom_wp(struct adapter *adapter, int enable) { - return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); + u32 data = enable ? 0xc : 0; + int ret; + + /* EEPROM_STAT_ADDR is outside VPD area, use pci_write_vpd_any() */ + ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32), + &data); + + return ret < 0 ? ret : 0; }
static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val) @@@ -644,22 -709,24 +645,22 @@@ static int vpdstrtou16(char *s, u8 len */ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { - int i, addr, ret; struct t3_vpd vpd; + u8 base_val = 0; + int addr, ret;
/* * Card information is normally at VPD_BASE but some early cards had * it at 0. */ - ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd); - if (ret) + ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val); + if (ret < 0) return ret; - addr = vpd.id_tag == 0x82 ? VPD_BASE : 0; + addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
- for (i = 0; i < sizeof(vpd); i += 4) { - ret = t3_seeprom_read(adapter, addr + i, - (__le32 *)((u8 *)&vpd + i)); - if (ret) - return ret; - } + ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd); + if (ret < 0) + return ret;
ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk); if (ret) @@@ -3692,8 -3759,7 +3693,7 @@@ int t3_prep_adapter(struct adapter *ada memcpy(hw_addr, adapter->params.vpd.eth_base, 5); hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr, - ETH_ALEN); + eth_hw_addr_set(adapter->port[i], hw_addr); init_link_config(&p->link_config, p->phy.caps); p->phy.ops->power_down(&p->phy, 1);
diff --combined drivers/net/ethernet/hisilicon/hns3/hnae3.h index da3a593f6a56,3f7a9a4c59d5..9ae1aa96c545 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@@ -95,6 -95,7 +95,7 @@@ enum HNAE3_DEV_CAP_BITS HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, + HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, };
#define hnae3_dev_fd_supported(hdev) \ @@@ -151,6 -152,9 +152,9 @@@ #define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \ test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
+ #define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \ + test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps) + enum HNAE3_PF_CAP_BITS { HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0, }; @@@ -294,6 -298,8 +298,8 @@@ enum hnae3_dbg_cmd HNAE3_DBG_CMD_MAC_TNL_STATUS, HNAE3_DBG_CMD_SERV_INFO, HNAE3_DBG_CMD_UMV_INFO, + HNAE3_DBG_CMD_PAGE_POOL_INFO, + HNAE3_DBG_CMD_COAL_INFO, HNAE3_DBG_CMD_UNKNOWN, };
@@@ -341,6 -347,9 +347,9 @@@ struct hnae3_dev_specs u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */ u16 max_frm_size; u16 max_qset_num; + u16 umv_size; + u16 mc_mac_size; + u32 mac_stats_num; };
struct hnae3_client_ops { @@@ -568,7 -577,6 +577,7 @@@ struct hnae3_ae_ops u32 *auto_neg, u32 *rx_en, u32 *tx_en); int (*set_pauseparam)(struct hnae3_handle *handle, u32 auto_neg, u32 rx_en, u32 tx_en); + int (*restore_pauseparam)(struct hnae3_handle *handle);
int (*set_autoneg)(struct hnae3_handle *handle, bool enable); int (*get_autoneg)(struct hnae3_handle *handle); @@@ -589,7 -597,7 +598,7 @@@ u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); - int (*set_mac_addr)(struct hnae3_handle *handle, void *p, + int (*set_mac_addr)(struct hnae3_handle *handle, const void *p, bool is_first); int (*do_ioctl)(struct hnae3_handle *handle, struct ifreq *ifr, int cmd); diff --combined drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index e54f96251fea,1a1bebd453d3..67364ab63a1f --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@@ -137,7 -137,7 +137,7 @@@ static struct hns3_dbg_cmd_info hns3_db .name = "uc", .cmd = HNAE3_DBG_CMD_MAC_UC, .dentry = HNS3_DBG_DENTRY_MAC, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_128KB, .init = hns3_dbg_common_file_init, }, { @@@ -256,7 -256,7 +256,7 @@@ .name = "tqp", .cmd = HNAE3_DBG_CMD_REG_TQP, .dentry = HNS3_DBG_DENTRY_REG, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_128KB, .init = hns3_dbg_common_file_init, }, { @@@ -298,7 -298,7 +298,7 @@@ .name = "fd_tcam", .cmd = HNAE3_DBG_CMD_FD_TCAM, .dentry = HNS3_DBG_DENTRY_FD, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_1MB, .init = hns3_dbg_common_file_init, }, { @@@ -336,6 -336,20 +336,20 @@@ .buf_len = HNS3_DBG_READ_LEN, .init = hns3_dbg_common_file_init, }, + { + .name = "page_pool_info", + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN, + .init = hns3_dbg_common_file_init, + }, + { + .name = "coalesce_info", + .cmd = HNAE3_DBG_CMD_COAL_INFO, + .dentry = HNS3_DBG_DENTRY_COMMON, + .buf_len = HNS3_DBG_READ_LEN_1MB, + .init = hns3_dbg_common_file_init, + }, };
static struct hns3_dbg_cap_info hns3_dbg_cap[] = { @@@ -384,6 -398,26 +398,26 @@@ } };
+ static const struct hns3_dbg_item coal_info_items[] = { + { "VEC_ID", 2 }, + { "ALGO_STATE", 2 }, + { "PROFILE_ID", 2 }, + { "CQE_MODE", 2 }, + { "TUNE_STATE", 2 }, + { "STEPS_LEFT", 2 }, + { "STEPS_RIGHT", 2 }, + { "TIRED", 2 }, + { "SW_GL", 2 }, + { "SW_QL", 2 }, + { "HW_GL", 2 }, + { "HW_QL", 2 }, + }; + + static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" }; + static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" }; + static const char * const + dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" }; + static void hns3_dbg_fill_content(char *content, u16 len, const struct hns3_dbg_item *items, const char **result, u16 size) @@@ -405,6 -439,94 +439,94 @@@ *pos++ = '\0'; }
+ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector, + char **result, int i, bool is_tx) + { + unsigned int gl_offset, ql_offset; + struct hns3_enet_coalesce *coal; + unsigned int reg_val; + unsigned int j = 0; + struct dim *dim; + bool ql_enable; + + if (is_tx) { + coal = &tqp_vector->tx_group.coal; + dim = &tqp_vector->tx_group.dim; + gl_offset = HNS3_VECTOR_GL1_OFFSET; + ql_offset = HNS3_VECTOR_TX_QL_OFFSET; + ql_enable = tqp_vector->tx_group.coal.ql_enable; + } else { + coal = &tqp_vector->rx_group.coal; + dim = &tqp_vector->rx_group.dim; + gl_offset = HNS3_VECTOR_GL0_OFFSET; + ql_offset = HNS3_VECTOR_RX_QL_OFFSET; + ql_enable = tqp_vector->rx_group.coal.ql_enable; + } + + sprintf(result[j++], "%d", i); + sprintf(result[j++], "%s", dim_state_str[dim->state]); + sprintf(result[j++], "%u", dim->profile_ix); + sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]); + sprintf(result[j++], "%s", + dim_tune_stat_str[dim->tune_state]); + sprintf(result[j++], "%u", dim->steps_left); + sprintf(result[j++], "%u", dim->steps_right); + sprintf(result[j++], "%u", dim->tired); + sprintf(result[j++], "%u", coal->int_gl); + sprintf(result[j++], "%u", coal->int_ql); + reg_val = readl(tqp_vector->mask_addr + gl_offset) & + HNS3_VECTOR_GL_MASK; + sprintf(result[j++], "%u", reg_val); + if (ql_enable) { + reg_val = readl(tqp_vector->mask_addr + ql_offset) & + HNS3_VECTOR_QL_MASK; + sprintf(result[j++], "%u", reg_val); + } else { + sprintf(result[j++], "NA"); + } + } + + static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len, + int *pos, bool is_tx) + { + char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(coal_info_items)]; + struct hns3_enet_tqp_vector *tqp_vector; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(coal_info_items); i++) + result[i] = &data_str[i][0]; + + *pos += scnprintf(buf + *pos, len - *pos, + "%s interrupt coalesce info:\n", + is_tx ? "tx" : "rx"); + hns3_dbg_fill_content(content, sizeof(content), coal_info_items, + NULL, ARRAY_SIZE(coal_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + hns3_get_coal_info(tqp_vector, result, i, is_tx); + hns3_dbg_fill_content(content, sizeof(content), coal_info_items, + (const char **)result, + ARRAY_SIZE(coal_info_items)); + *pos += scnprintf(buf + *pos, len - *pos, "%s", content); + } + } + + static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len) + { + int pos = 0; + + hns3_dump_coal_info(h, buf, len, &pos, true); + pos += scnprintf(buf + pos, len - pos, "\n"); + hns3_dump_coal_info(h, buf, len, &pos, false); + + return 0; + } + static const struct hns3_dbg_item tx_spare_info_items[] = { { "QUEUE_ID", 2 }, { "COPYBREAK", 2 }, @@@ -462,7 -584,7 +584,7 @@@ static const struct hns3_dbg_item rx_qu { "TAIL", 2 }, { "HEAD", 2 }, { "FBDNUM", 2 }, - { "PKTNUM", 2 }, + { "PKTNUM", 5 }, { "COPYBREAK", 2 }, { "RING_EN", 2 }, { "RX_RING_EN", 2 }, @@@ -565,7 -687,7 +687,7 @@@ static const struct hns3_dbg_item tx_qu { "HEAD", 2 }, { "FBDNUM", 2 }, { "OFFSET", 2 }, - { "PKTNUM", 2 }, + { "PKTNUM", 5 }, { "RING_EN", 2 }, { "TX_RING_EN", 2 }, { "BASE_ADDR", 10 }, @@@ -790,13 -912,13 +912,13 @@@ static int hns3_dbg_rx_bd_info(struct h }
static const struct hns3_dbg_item tx_bd_info_items[] = { - { "BD_IDX", 5 }, - { "ADDRESS", 2 }, + { "BD_IDX", 2 }, + { "ADDRESS", 13 }, { "VLAN_TAG", 2 }, { "SIZE", 2 }, { "T_CS_VLAN_TSO", 2 }, { "OT_VLAN_TAG", 3 }, - { "TV", 2 }, + { "TV", 5 }, { "OLT_VLAN_LEN", 2 }, { "PAYLEN_OL4CS", 2 }, { "BD_FE_SC_VLD", 2 }, @@@ -924,6 -1046,12 +1046,12 @@@ hns3_dbg_dev_specs(struct hnae3_handle dev_specs->max_tm_rate); *pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n", dev_specs->max_qset_num); + *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n", + dev_specs->umv_size); + *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n", + dev_specs->mc_mac_size); + *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n", + dev_specs->mac_stats_num); }
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) @@@ -937,6 -1065,69 +1065,69 @@@ return 0; }
+ static const struct hns3_dbg_item page_pool_info_items[] = { + { "QUEUE_ID", 2 }, + { "ALLOCATE_CNT", 2 }, + { "FREE_CNT", 6 }, + { "POOL_SIZE(PAGE_NUM)", 2 }, + { "ORDER", 2 }, + { "NUMA_ID", 2 }, + { "MAX_LEN", 2 }, + }; + + static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring, + char **result, u32 index) + { + u32 j = 0; + + sprintf(result[j++], "%u", index); + sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt); + sprintf(result[j++], "%u", + atomic_read(&ring->page_pool->pages_state_release_cnt)); + sprintf(result[j++], "%u", ring->page_pool->p.pool_size); + sprintf(result[j++], "%u", ring->page_pool->p.order); + sprintf(result[j++], "%d", ring->page_pool->p.nid); + sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024); + } + + static int + hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len) + { + char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN]; + char *result[ARRAY_SIZE(page_pool_info_items)]; + struct hns3_nic_priv *priv = h->priv; + char content[HNS3_DBG_INFO_LEN]; + struct hns3_enet_ring *ring; + int pos = 0; + u32 i; + + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++) + result[i] = &data_str[i][0]; + + hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items, + NULL, ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; + hns3_dump_page_pool_info(ring, result, i); + hns3_dbg_fill_content(content, sizeof(content), + page_pool_info_items, + (const char **)result, + ARRAY_SIZE(page_pool_info_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); + } + + return 0; + } + static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) { u32 i; @@@ -978,6 -1169,14 +1169,14 @@@ static const struct hns3_dbg_func hns3_ .cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO, .dbg_dump = hns3_dbg_tx_queue_info, }, + { + .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO, + .dbg_dump = hns3_dbg_page_pool_info, + }, + { + .cmd = HNAE3_DBG_CMD_COAL_INFO, + .dbg_dump = hns3_dbg_coal_info, + }, };
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data, diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 9cda8b3562b8,f0aa4fbd2200..4e0a8c2f7c05 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@@ -391,7 -391,7 +391,7 @@@ static int hclge_dbg_dump_mac(struct hc static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len, int *pos) { - struct hclge_dbg_bitmap_cmd *bitmap; + struct hclge_dbg_bitmap_cmd req; struct hclge_desc desc; u16 qset_id, qset_num; int ret; @@@ -408,12 -408,12 +408,12 @@@ if (ret) return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; + req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "%04u %#x %#x %#x %#x\n", - qset_id, bitmap->bit0, bitmap->bit1, - bitmap->bit2, bitmap->bit3); + qset_id, req.bit0, req.bit1, req.bit2, + req.bit3); }
return 0; @@@ -422,7 -422,7 +422,7 @@@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len, int *pos) { - struct hclge_dbg_bitmap_cmd *bitmap; + struct hclge_dbg_bitmap_cmd req; struct hclge_desc desc; u8 pri_id, pri_num; int ret; @@@ -439,11 -439,12 +439,11 @@@ if (ret) return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; + req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "%03u %#x %#x %#x\n", - pri_id, bitmap->bit0, bitmap->bit1, - bitmap->bit2); + pri_id, req.bit0, req.bit1, req.bit2); }
return 0; @@@ -452,7 -453,7 +452,7 @@@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len, int *pos) { - struct hclge_dbg_bitmap_cmd *bitmap; + struct hclge_dbg_bitmap_cmd req; struct hclge_desc desc; u8 pg_id; int ret; @@@ -465,11 -466,12 +465,11 @@@ if (ret) return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; + req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "%03u %#x %#x %#x\n", - pg_id, bitmap->bit0, bitmap->bit1, - bitmap->bit2); + pg_id, req.bit0, req.bit1, req.bit2); }
return 0; @@@ -509,7 -511,7 +509,7 @@@ static int hclge_dbg_dump_dcb_queue(str static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len, int *pos) { - struct hclge_dbg_bitmap_cmd *bitmap; + struct hclge_dbg_bitmap_cmd req; struct hclge_desc desc; u8 port_id = 0; int ret; @@@ -519,12 -521,12 +519,12 @@@ if (ret) return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1]; + req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n", - bitmap->bit0); + req.bit0); *pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n", - bitmap->bit1); + req.bit1);
return 0; } @@@ -1990,6 -1992,9 +1990,9 @@@ static int hclge_dbg_dump_umv_info(stru } mutex_unlock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n", + hdev->used_mc_mac_num); + return 0; }
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 269e579762b2,f1db6699f81f..22d08ed092cb --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@@ -156,174 -156,210 +156,210 @@@ static const char hns3_nic_test_strs[][ };
static const struct hclge_comm_stats_str g_mac_stats_string[] = { - {"mac_tx_mac_pause_num", + {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, - {"mac_rx_mac_pause_num", + {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, - {"mac_tx_control_pkt_num", + {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)}, + {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)}, + {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)}, - {"mac_rx_control_pkt_num", + {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)}, - {"mac_tx_pfc_pkt_num", + {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)}, - {"mac_tx_pfc_pri0_pkt_num", + {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, - {"mac_tx_pfc_pri1_pkt_num", + {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, - {"mac_tx_pfc_pri2_pkt_num", + {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, - {"mac_tx_pfc_pri3_pkt_num", + {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, - {"mac_tx_pfc_pri4_pkt_num", + {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, - {"mac_tx_pfc_pri5_pkt_num", + {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, - {"mac_tx_pfc_pri6_pkt_num", + {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, - {"mac_tx_pfc_pri7_pkt_num", + {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, - {"mac_rx_pfc_pkt_num", + {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)}, + {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)}, + {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)}, + {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)}, + {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)}, + {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)}, + {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)}, + {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)}, + {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)}, - {"mac_rx_pfc_pri0_pkt_num", + {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, - {"mac_rx_pfc_pri1_pkt_num", + {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, - {"mac_rx_pfc_pri2_pkt_num", + {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, - {"mac_rx_pfc_pri3_pkt_num", + {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, - {"mac_rx_pfc_pri4_pkt_num", + {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, - {"mac_rx_pfc_pri5_pkt_num", + {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, - {"mac_rx_pfc_pri6_pkt_num", + {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, - {"mac_rx_pfc_pri7_pkt_num", + {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, - {"mac_tx_total_pkt_num", + {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)}, + {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)}, + {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)}, + {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)}, + {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)}, + {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)}, + {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)}, + {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2, + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)}, + {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, - {"mac_tx_total_oct_num", + {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, - {"mac_tx_good_pkt_num", + {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, - {"mac_tx_bad_pkt_num", + {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, - {"mac_tx_good_oct_num", + {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, - {"mac_tx_bad_oct_num", + {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, - {"mac_tx_uni_pkt_num", + {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, - {"mac_tx_multi_pkt_num", + {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, - {"mac_tx_broad_pkt_num", + {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, - {"mac_tx_undersize_pkt_num", + {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, - {"mac_tx_oversize_pkt_num", + {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, - {"mac_tx_64_oct_pkt_num", + {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, - {"mac_tx_65_127_oct_pkt_num", + {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, - {"mac_tx_128_255_oct_pkt_num", + {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, - {"mac_tx_256_511_oct_pkt_num", + {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, - {"mac_tx_512_1023_oct_pkt_num", + {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, - {"mac_tx_1024_1518_oct_pkt_num", + {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, - {"mac_tx_1519_2047_oct_pkt_num", + {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, - {"mac_tx_2048_4095_oct_pkt_num", + {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, - {"mac_tx_4096_8191_oct_pkt_num", + {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, - {"mac_tx_8192_9216_oct_pkt_num", + {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, - {"mac_tx_9217_12287_oct_pkt_num", + {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, - {"mac_tx_12288_16383_oct_pkt_num", + {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, - {"mac_tx_1519_max_good_pkt_num", + {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, - {"mac_tx_1519_max_bad_pkt_num", + {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, - {"mac_rx_total_pkt_num", + {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, - {"mac_rx_total_oct_num", + {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, - {"mac_rx_good_pkt_num", + {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, - {"mac_rx_bad_pkt_num", + {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, - {"mac_rx_good_oct_num", + {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, - {"mac_rx_bad_oct_num", + {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, - {"mac_rx_uni_pkt_num", + {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, - {"mac_rx_multi_pkt_num", + {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, - {"mac_rx_broad_pkt_num", + {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, - {"mac_rx_undersize_pkt_num", + {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, - {"mac_rx_oversize_pkt_num", + {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, - {"mac_rx_64_oct_pkt_num", + {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, - {"mac_rx_65_127_oct_pkt_num", + {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, - {"mac_rx_128_255_oct_pkt_num", + {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, - {"mac_rx_256_511_oct_pkt_num", + {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, - {"mac_rx_512_1023_oct_pkt_num", + {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, - {"mac_rx_1024_1518_oct_pkt_num", + {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, - {"mac_rx_1519_2047_oct_pkt_num", + {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, - {"mac_rx_2048_4095_oct_pkt_num", + {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, - {"mac_rx_4096_8191_oct_pkt_num", + {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, - {"mac_rx_8192_9216_oct_pkt_num", + {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, - {"mac_rx_9217_12287_oct_pkt_num", + {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, - {"mac_rx_12288_16383_oct_pkt_num", + {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, - {"mac_rx_1519_max_good_pkt_num", + {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, - {"mac_rx_1519_max_bad_pkt_num", + {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
- {"mac_tx_fragment_pkt_num", + {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, - {"mac_tx_undermin_pkt_num", + {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, - {"mac_tx_jabber_pkt_num", + {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, - {"mac_tx_err_all_pkt_num", + {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, - {"mac_tx_from_app_good_pkt_num", + {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, - {"mac_tx_from_app_bad_pkt_num", + {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, - {"mac_rx_fragment_pkt_num", + {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, - {"mac_rx_undermin_pkt_num", + {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, - {"mac_rx_jabber_pkt_num", + {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, - {"mac_rx_fcs_err_pkt_num", + {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, - {"mac_rx_send_app_good_pkt_num", + {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, - {"mac_rx_send_app_bad_pkt_num", + {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1, HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} };
@@@ -451,8 -487,9 +487,9 @@@ static int hclge_mac_update_stats_defec u64 *data = (u64 *)(&hdev->mac_stats); struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; __le64 *desc_data; - int i, k, n; + u32 data_size; int ret; + u32 i;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); @@@ -463,33 -500,37 +500,37 @@@ return ret; }
- for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { - /* for special opcode 0032, only the first desc has the head */ - if (unlikely(i == 0)) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RD_FIRST_STATS_NUM; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RD_OTHER_STATS_NUM; - } + /* The first desc has a 64-bit header, so data size need to minus 1 */ + data_size = sizeof(desc) / (sizeof(u64)) - 1;
- for (k = 0; k < n; k++) { - *data += le64_to_cpu(*desc_data); - data++; - desc_data++; - } + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; }
return 0; }
- static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num) + static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) { + #define HCLGE_REG_NUM_PER_DESC 4 + + u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; u64 *data = (u64 *)(&hdev->mac_stats); struct hclge_desc *desc; __le64 *desc_data; - u16 i, k, n; + u32 data_size; + u32 desc_num; int ret; + u32 i; + + /* The first desc has a 64-bit header, so need to consider it */ + desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections, * so GFP_ATOMIC is more suitalbe here @@@ -505,21 -546,16 +546,16 @@@ return ret; }
- for (i = 0; i < desc_num; i++) { - /* for special opcode 0034, only the first desc has the head */ - if (i == 0) { - desc_data = (__le64 *)(&desc[i].data[0]); - n = HCLGE_RD_FIRST_STATS_NUM; - } else { - desc_data = (__le64 *)(&desc[i]); - n = HCLGE_RD_OTHER_STATS_NUM; - } + data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
- for (k = 0; k < n; k++) { - *data += le64_to_cpu(*desc_data); - data++; - desc_data++; - } + desc_data = (__le64 *)(&desc[0].data[0]); + for (i = 0; i < data_size; i++) { + /* data memory is continuous becase only the first desc has a + * header in this command + */ + *data += le64_to_cpu(*desc_data); + data++; + desc_data++; }
kfree(desc); @@@ -527,42 -563,37 +563,37 @@@ return 0; }
- static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num) + static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) { struct hclge_desc desc; - __le32 *desc_data; - u32 reg_num; int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query mac statistic reg number, ret = %d\n", + ret); return ret; + }
- desc_data = (__le32 *)(&desc.data[0]); - reg_num = le32_to_cpu(*desc_data); - - *desc_num = 1 + ((reg_num - 3) >> 2) + - (u32)(((reg_num - 3) & 0x3) ? 1 : 0); + *reg_num = le32_to_cpu(desc.data[0]); + if (*reg_num == 0) { + dev_err(&hdev->pdev->dev, + "mac statistic reg number is invalid!\n"); + return -ENODATA; + }
return 0; }
static int hclge_mac_update_stats(struct hclge_dev *hdev) { - u32 desc_num; - int ret; - - ret = hclge_mac_query_reg_num(hdev, &desc_num); /* The firmware supports the new statistics acquisition method */ - if (!ret) - ret = hclge_mac_update_stats_complete(hdev, desc_num); - else if (ret == -EOPNOTSUPP) - ret = hclge_mac_update_stats_defective(hdev); + if (hdev->ae_dev->dev_specs.mac_stats_num) + return hclge_mac_update_stats_complete(hdev); else - dev_err(&hdev->pdev->dev, "query mac reg num fail!\n"); - - return ret; + return hclge_mac_update_stats_defective(hdev); }
static int hclge_tqps_update_stats(struct hnae3_handle *handle) @@@ -670,20 -701,39 +701,39 @@@ static u8 *hclge_tqps_get_strings(struc return buff; }
- static u64 *hclge_comm_get_stats(const void *comm_stats, + static int hclge_comm_get_count(struct hclge_dev *hdev, + const struct hclge_comm_stats_str strs[], + u32 size) + { + int count = 0; + u32 i; + + for (i = 0; i < size; i++) + if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) + count++; + + return count; + } + + static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, const struct hclge_comm_stats_str strs[], int size, u64 *data) { u64 *buf = data; u32 i;
- for (i = 0; i < size; i++) - buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); + for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + + *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); + buf++; + }
- return buf + size; + return buf; }
- static u8 *hclge_comm_get_strings(u32 stringset, + static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, const struct hclge_comm_stats_str strs[], int size, u8 *data) { @@@ -694,6 -744,9 +744,9 @@@ return buff;
for (i = 0; i < size; i++) { + if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) + continue; + snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc); buff = buff + ETH_GSTRING_LEN; } @@@ -785,7 -838,8 +838,8 @@@ static int hclge_get_sset_count(struct handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; } } else if (stringset == ETH_SS_STATS) { - count = ARRAY_SIZE(g_mac_stats_string) + + count = hclge_comm_get_count(hdev, g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string)) + hclge_tqps_get_sset_count(handle, stringset); }
@@@ -795,12 -849,14 +849,14 @@@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset, u8 *data) { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; u8 *p = (char *)data; int size;
if (stringset == ETH_SS_STATS) { size = ARRAY_SIZE(g_mac_stats_string); - p = hclge_comm_get_strings(stringset, g_mac_stats_string, + p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, size, p); p = hclge_tqps_get_strings(handle, p); } else if (stringset == ETH_SS_TEST) { @@@ -834,7 -890,7 +890,7 @@@ static void hclge_get_stats(struct hnae struct hclge_dev *hdev = vport->back; u64 *p;
- p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string, + p = hclge_comm_get_stats(hdev, g_mac_stats_string, ARRAY_SIZE(g_mac_stats_string), data); p = hclge_tqps_get_stats(handle, p); } @@@ -1037,96 -1093,100 +1093,100 @@@ static int hclge_check_port_speed(struc return -EINVAL; }
- static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability) + static void hclge_convert_setting_sr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - mac->supported); + link_mode); }
- static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability) + static void hclge_convert_setting_lr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit( ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, - mac->supported); + link_mode); }
- static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability) + static void hclge_convert_setting_cr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, - mac->supported); + link_mode); }
- static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability) + static void hclge_convert_setting_kr(u16 speed_ability, + unsigned long *link_mode) { if (speed_ability & HCLGE_SUPPORT_1G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_40G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_50G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_100G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - mac->supported); + link_mode); if (speed_ability & HCLGE_SUPPORT_200G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - mac->supported); + link_mode); }
static void hclge_convert_setting_fec(struct hclge_mac *mac) @@@ -1170,9 -1230,9 +1230,9 @@@ static void hclge_parse_fiber_link_mode linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, mac->supported);
- hclge_convert_setting_sr(mac, speed_ability); - hclge_convert_setting_lr(mac, speed_ability); - hclge_convert_setting_cr(mac, speed_ability); + hclge_convert_setting_sr(speed_ability, mac->supported); + hclge_convert_setting_lr(speed_ability, mac->supported); + hclge_convert_setting_cr(speed_ability, mac->supported); if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac);
@@@ -1188,7 -1248,7 +1248,7 @@@ static void hclge_parse_backplane_link_ { struct hclge_mac *mac = &hdev->hw.mac;
- hclge_convert_setting_kr(mac, speed_ability); + hclge_convert_setting_kr(speed_ability, mac->supported); if (hnae3_dev_fec_supported(hdev)) hclge_convert_setting_fec(mac);
@@@ -1342,8 -1402,6 +1402,6 @@@ static void hclge_parse_cfg(struct hclg cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]), HCLGE_CFG_UMV_TBL_SPACE_M, HCLGE_CFG_UMV_TBL_SPACE_S); - if (!cfg->umv_space) - cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]), HCLGE_CFG_PF_RSS_SIZE_M, @@@ -1419,6 -1477,7 +1477,7 @@@ static void hclge_set_default_dev_specs ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL; ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME; ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM; + ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; }
static void hclge_parse_dev_specs(struct hclge_dev *hdev, @@@ -1440,6 -1499,8 +1499,8 @@@ ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num); ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); + ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size); + ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size); }
static void hclge_check_dev_specs(struct hclge_dev *hdev) @@@ -1460,6 -1521,21 +1521,21 @@@ dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL; if (!dev_specs->max_frm_size) dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME; + if (!dev_specs->umv_size) + dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF; + } + + static int hclge_query_mac_stats_num(struct hclge_dev *hdev) + { + u32 reg_num = 0; + int ret; + + ret = hclge_mac_query_reg_num(hdev, ®_num); + if (ret && ret != -EOPNOTSUPP) + return ret; + + hdev->ae_dev->dev_specs.mac_stats_num = reg_num; + return 0; }
static int hclge_query_dev_specs(struct hclge_dev *hdev) @@@ -1468,6 -1544,10 +1544,10 @@@ int ret; int i;
+ ret = hclge_query_mac_stats_num(hdev); + if (ret) + return ret; + /* set default specifications as devices lower than version V3 do not * support querying specifications from firmware. */ @@@ -1549,7 -1629,10 +1629,10 @@@ static int hclge_configure(struct hclge hdev->tm_info.num_pg = 1; hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; - hdev->wanted_umv_size = cfg.umv_space; + if (cfg.umv_space) + hdev->wanted_umv_size = cfg.umv_space; + else + hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) @@@ -2847,29 -2930,33 +2930,29 @@@ static void hclge_mbx_task_schedule(str { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), - hclge_wq, &hdev->service_task, 0); + mod_delayed_work(hclge_wq, &hdev->service_task, 0); }
static void hclge_reset_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), - hclge_wq, &hdev->service_task, 0); + mod_delayed_work(hclge_wq, &hdev->service_task, 0); }
static void hclge_errhand_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), - hclge_wq, &hdev->service_task, 0); + mod_delayed_work(hclge_wq, &hdev->service_task, 0); }
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) - mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), - hclge_wq, &hdev->service_task, - delay_time); + mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); }
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) @@@ -2964,6 -3051,82 +3047,82 @@@ static void hclge_update_link_status(st clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); }
+ static void hclge_update_speed_advertising(struct hclge_mac *mac) + { + u32 speed_ability; + + if (hclge_get_speed_bit(mac->speed, &speed_ability)) + return; + + switch (mac->module_type) { + case HNAE3_MODULE_TYPE_FIBRE_LR: + hclge_convert_setting_lr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_FIBRE_SR: + case HNAE3_MODULE_TYPE_AOC: + hclge_convert_setting_sr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_CR: + hclge_convert_setting_cr(speed_ability, mac->advertising); + break; + case HNAE3_MODULE_TYPE_KR: + hclge_convert_setting_kr(speed_ability, mac->advertising); + break; + default: + break; + } + } + + static void hclge_update_fec_advertising(struct hclge_mac *mac) + { + if (mac->fec_mode & BIT(HNAE3_FEC_RS)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->advertising); + else if (mac->fec_mode & BIT(HNAE3_FEC_BASER)) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->advertising); + else + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + mac->advertising); + } + + static void hclge_update_pause_advertising(struct hclge_dev *hdev) + { + struct hclge_mac *mac = &hdev->hw.mac; + bool rx_en, tx_en; + + switch (hdev->fc_mode_last_time) { + case HCLGE_FC_RX_PAUSE: + rx_en = true; + tx_en = false; + break; + case HCLGE_FC_TX_PAUSE: + rx_en = false; + tx_en = true; + break; + case HCLGE_FC_FULL: + rx_en = true; + tx_en = true; + break; + default: + rx_en = false; + tx_en = false; + break; + } + + linkmode_set_pause(mac->advertising, tx_en, rx_en); + } + + static void hclge_update_advertising(struct hclge_dev *hdev) + { + struct hclge_mac *mac = &hdev->hw.mac; + + linkmode_zero(mac->advertising); + hclge_update_speed_advertising(mac); + hclge_update_fec_advertising(mac); + hclge_update_pause_advertising(hdev); + } + static void hclge_update_port_capability(struct hclge_dev *hdev, struct hclge_mac *mac) { @@@ -2986,7 -3149,7 +3145,7 @@@ } else { linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); - linkmode_zero(mac->advertising); + hclge_update_advertising(hdev); } }
@@@ -3487,14 -3650,33 +3646,14 @@@ static void hclge_get_misc_vector(struc hdev->num_msi_used += 1; }
-static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct hclge_dev *hdev = container_of(notify, struct hclge_dev, - affinity_notify); - - cpumask_copy(&hdev->affinity_mask, mask); -} - -static void hclge_irq_affinity_release(struct kref *ref) -{ -} - static void hclge_misc_affinity_setup(struct hclge_dev *hdev) { irq_set_affinity_hint(hdev->misc_vector.vector_irq, &hdev->affinity_mask); - - hdev->affinity_notify.notify = hclge_irq_affinity_notify; - hdev->affinity_notify.release = hclge_irq_affinity_release; - irq_set_affinity_notifier(hdev->misc_vector.vector_irq, - &hdev->affinity_notify); }
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) { - irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL); irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); }
@@@ -8475,6 -8657,9 +8634,9 @@@ static int hclge_init_umv_space(struct hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ if (hdev->ae_dev->dev_specs.mc_mac_size) + set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); + return 0; }
@@@ -8492,6 -8677,8 +8654,8 @@@ static void hclge_reset_umv_space(struc hdev->share_umv_size = hdev->priv_umv_size + hdev->max_umv_size % (hdev->num_alloc_vport + 1); mutex_unlock(&hdev->vport_lock); + + hdev->used_mc_mac_num = 0; }
static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock) @@@ -8746,6 -8933,7 +8910,7 @@@ int hclge_add_mc_addr_common(struct hcl struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; + bool is_new_addr = false; int status;
/* mac addr check */ @@@ -8759,6 -8947,13 +8924,13 @@@ hclge_prepare_mac_addr(&req, addr, true); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (status) { + if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && + hdev->used_mc_mac_num >= + hdev->ae_dev->dev_specs.mc_mac_size) + goto err_no_space; + + is_new_addr = true; + /* This mac addr do not exist, add new entry for it */ memset(desc[0].data, 0, sizeof(desc[0].data)); memset(desc[1].data, 0, sizeof(desc[0].data)); @@@ -8768,12 -8963,18 +8940,18 @@@ if (status) return status; status = hclge_add_mac_vlan_tbl(vport, &req, desc); - /* if already overflow, not to print each time */ - if (status == -ENOSPC && - !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) - dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + if (status == -ENOSPC) + goto err_no_space; + else if (!status && is_new_addr) + hdev->used_mc_mac_num++;
return status; + + err_no_space: + /* if already overflow, not to print each time */ + if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) + dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); + return -ENOSPC; }
static int hclge_rm_mc_addr(struct hnae3_handle *handle, @@@ -8810,12 -9011,15 +8988,15 @@@ int hclge_rm_mc_addr_common(struct hclg if (status) return status;
- if (hclge_is_all_function_id_zero(desc)) + if (hclge_is_all_function_id_zero(desc)) { /* All the vfid is zero, so need to delete this entry */ status = hclge_remove_mac_vlan_tbl(vport, &req); - else + if (!status) + hdev->used_mc_mac_num--; + } else { /* Not all the vfid is zero, update the vfid */ status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } } else if (status == -ENOENT) { status = 0; } @@@ -9391,7 -9595,7 +9572,7 @@@ int hclge_update_mac_node_for_dev_addr( return 0; }
- static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, + static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { const unsigned char *new_addr = (const unsigned char *)p; @@@ -10998,35 -11202,6 +11179,35 @@@ static int hclge_set_pauseparam(struct return -EOPNOTSUPP; }
+static int hclge_restore_pauseparam(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 auto_neg, rx_pause, tx_pause; + int ret; + + hclge_get_pauseparam(handle, &auto_neg, &rx_pause, &tx_pause); + /* when autoneg is disabled, the pause setting of phy has no effect + * unless the link goes down. + */ + ret = phy_suspend(hdev->hw.mac.phydev); + if (ret) + return ret; + + phy_set_asym_pause(hdev->hw.mac.phydev, rx_pause, tx_pause); + + ret = phy_resume(hdev->hw.mac.phydev); + if (ret) + return ret; + + ret = hclge_mac_pause_setup_hw(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "restore pauseparam error, ret = %d.\n", ret); + + return ret; +} + static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, u8 *auto_neg, u32 *speed, u8 *duplex) { @@@ -12990,7 -13165,6 +13171,7 @@@ static const struct hnae3_ae_ops hclge_ .halt_autoneg = hclge_halt_autoneg, .get_pauseparam = hclge_get_pauseparam, .set_pauseparam = hclge_set_pauseparam, + .restore_pauseparam = hclge_restore_pauseparam, .set_mtu = hclge_set_mtu, .reset_queue = hclge_reset_tqp, .get_stats = hclge_get_stats, @@@ -13059,7 -13233,7 +13240,7 @@@ static int hclge_init(void { pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME); + hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME); if (!hclge_wq) { pr_err("%s: failed to create workqueue\n", HCLGE_NAME); return -ENOMEM; diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 69cd8f87b4c8,4f8403af84be..9e1eede599ec --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@@ -403,8 -403,13 +403,13 @@@ struct hclge_tm_info u8 pfc_en; /* PFC enabled or not for user priority */ };
+ /* max number of mac statistics on each version */ + #define HCLGE_MAC_STATS_MAX_NUM_V1 84 + #define HCLGE_MAC_STATS_MAX_NUM_V2 105 + struct hclge_comm_stats_str { char desc[ETH_GSTRING_LEN]; + u32 stats_num; unsigned long offset; };
@@@ -412,6 -417,7 +417,7 @@@ struct hclge_mac_stats { u64 mac_tx_mac_pause_num; u64 mac_rx_mac_pause_num; + u64 rsv0; u64 mac_tx_pfc_pri0_pkt_num; u64 mac_tx_pfc_pri1_pkt_num; u64 mac_tx_pfc_pri2_pkt_num; @@@ -448,7 -454,7 +454,7 @@@ u64 mac_tx_1519_2047_oct_pkt_num; u64 mac_tx_2048_4095_oct_pkt_num; u64 mac_tx_4096_8191_oct_pkt_num; - u64 rsv0; + u64 rsv1; u64 mac_tx_8192_9216_oct_pkt_num; u64 mac_tx_9217_12287_oct_pkt_num; u64 mac_tx_12288_16383_oct_pkt_num; @@@ -475,7 -481,7 +481,7 @@@ u64 mac_rx_1519_2047_oct_pkt_num; u64 mac_rx_2048_4095_oct_pkt_num; u64 mac_rx_4096_8191_oct_pkt_num; - u64 rsv1; + u64 rsv2; u64 mac_rx_8192_9216_oct_pkt_num; u64 mac_rx_9217_12287_oct_pkt_num; u64 mac_rx_12288_16383_oct_pkt_num; @@@ -498,6 -504,28 +504,28 @@@ u64 mac_rx_pfc_pause_pkt_num; u64 mac_tx_ctrl_pkt_num; u64 mac_rx_ctrl_pkt_num; + + /* duration of pfc */ + u64 mac_tx_pfc_pri0_xoff_time; + u64 mac_tx_pfc_pri1_xoff_time; + u64 mac_tx_pfc_pri2_xoff_time; + u64 mac_tx_pfc_pri3_xoff_time; + u64 mac_tx_pfc_pri4_xoff_time; + u64 mac_tx_pfc_pri5_xoff_time; + u64 mac_tx_pfc_pri6_xoff_time; + u64 mac_tx_pfc_pri7_xoff_time; + u64 mac_rx_pfc_pri0_xoff_time; + u64 mac_rx_pfc_pri1_xoff_time; + u64 mac_rx_pfc_pri2_xoff_time; + u64 mac_rx_pfc_pri3_xoff_time; + u64 mac_rx_pfc_pri4_xoff_time; + u64 mac_rx_pfc_pri5_xoff_time; + u64 mac_rx_pfc_pri6_xoff_time; + u64 mac_rx_pfc_pri7_xoff_time; + + /* duration of pause */ + u64 mac_tx_pause_xoff_time; + u64 mac_rx_pause_xoff_time; };
#define HCLGE_STATS_TIMER_INTERVAL 300UL @@@ -938,12 -966,15 +966,14 @@@ struct hclge_dev u16 priv_umv_size; /* unicast mac vlan space shared by PF and its VFs */ u16 share_umv_size; + /* multicast mac address number used by PF and its VFs */ + u16 used_mc_mac_num;
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, HCLGE_MAC_TNL_LOG_SIZE);
/* affinity mask and notify for misc interrupt */ cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; struct hclge_ptp *ptp; struct devlink *devlink; }; diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index cf00ad7bb881,3306050ad72c..645b2c0011e6 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@@ -1349,7 -1349,7 +1349,7 @@@ static void hclgevf_get_mac_addr(struc ether_addr_copy(p, hdev->hw.mac.mac_addr); }
- static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, + static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, bool is_first) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); @@@ -2232,7 -2232,6 +2232,7 @@@ static void hclgevf_get_misc_vector(str void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) { if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && + test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); @@@ -3450,8 -3449,6 +3450,8 @@@ static int hclgevf_init_hdev(struct hcl
hclgevf_init_rxd_adv_layout(hdev);
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); + hdev->last_reset_time = jiffies; dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); @@@ -3902,7 -3899,7 +3902,7 @@@ static int hclgevf_init(void { pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); + hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); if (!hclgevf_wq) { pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); return -ENOMEM; diff --combined drivers/net/ethernet/intel/ice/ice_ptp.c index d1ef3d48a4b0,a1be0d04a2d0..bf7247c6f58e --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@@ -6,6 -6,252 +6,252 @@@
#define E810_OUT_PROP_DELAY_NS 1
+ static const struct ptp_pin_desc ice_pin_desc_e810t[] = { + /* name idx func chan */ + { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, + { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, + { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, + { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, + { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, + }; + + /** + * ice_get_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Read the configuration of the SMA control logic and put it into the + * ptp_pin_desc structure + */ + static int + ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) + { + u8 data, i; + int status; + + /* Read initial pin state */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* initialize with defaults */ + for (i = 0; i < NUM_PTP_PINS_E810T; i++) { + snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), + "%s", ice_pin_desc_e810t[i].name); + ptp_pins[i].index = ice_pin_desc_e810t[i].index; + ptp_pins[i].func = ice_pin_desc_e810t[i].func; + ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; + } + + /* Parse SMA1/UFL1 */ + switch (data & ICE_SMA1_MASK_E810T) { + case ICE_SMA1_MASK_E810T: + default: + ptp_pins[SMA1].func = PTP_PF_NONE; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_DIR_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_PEROUT; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_TX_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case 0: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_PEROUT; + break; + } + + /* Parse SMA2/UFL2 */ + switch (data & ICE_SMA2_MASK_E810T) { + case ICE_SMA2_MASK_E810T: + default: + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_EXTTS; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + case ICE_SMA2_DIR_EN_E810T: + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + } + + return 0; + } + + /** + * ice_ptp_set_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Set the configuration of the SMA control logic based on the configuration in + * num_pins parameter + */ + static int + ice_ptp_set_sma_config_e810t(struct ice_hw *hw, + const struct ptp_pin_desc *ptp_pins) + { + int status; + u8 data; + + /* SMA1 and UFL1 cannot be set to TX at the same time */ + if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_PEROUT) + return -EINVAL; + + /* SMA2 and UFL2 cannot be set to RX at the same time */ + if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_EXTTS) + return -EINVAL; + + /* Read initial pin state value */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* Set the right sate based on the desired configuration */ + data &= ~ICE_SMA1_MASK_E810T; + if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); + data |= ICE_SMA1_MASK_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX"); + data |= ICE_SMA1_TX_EN_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + /* U.FL 1 TX will always enable SMA 1 RX */ + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 TX"); + data |= ICE_SMA1_DIR_EN_E810T; + } + + data &= ~ICE_SMA2_MASK_E810T; + if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); + data |= ICE_SMA2_MASK_E810T; + } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 RX"); + data |= (ICE_SMA2_TX_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "UFL2 RX"); + data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX"); + data |= (ICE_SMA2_DIR_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); + data |= ICE_SMA2_DIR_EN_E810T; + } + + return ice_write_sma_ctrl_e810t(hw, data); + } + + /** + * ice_ptp_set_sma_e810t + * @info: the driver's PTP info structure + * @pin: pin index in kernel structure + * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) + * + * Set the configuration of a single SMA pin + */ + static int + ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func) + { + struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; + struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_hw *hw = &pf->hw; + int err; + + if (pin < SMA1 || func > PTP_PF_PEROUT) + return -EOPNOTSUPP; + + err = ice_get_sma_config_e810t(hw, ptp_pins); + if (err) + return err; + + /* Disable the same function on the other pin sharing the channel */ + if (pin == SMA1 && ptp_pins[UFL1].func == func) + ptp_pins[UFL1].func = PTP_PF_NONE; + if (pin == UFL1 && ptp_pins[SMA1].func == func) + ptp_pins[SMA1].func = PTP_PF_NONE; + + if (pin == SMA2 && ptp_pins[UFL2].func == func) + ptp_pins[UFL2].func = PTP_PF_NONE; + if (pin == UFL2 && ptp_pins[SMA2].func == func) + ptp_pins[SMA2].func = PTP_PF_NONE; + + /* Set up new pin function in the temp table */ + ptp_pins[pin].func = func; + + return ice_ptp_set_sma_config_e810t(hw, ptp_pins); + } + + /** + * ice_verify_pin_e810t + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Verify if pin supports requested pin function. If the Check pins consistency. + * Reconfigure the SMA logic attached to the given pin to enable its + * desired functionality + */ + static int + ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) + { + /* Don't allow channel reassignment */ + if (chan != ice_pin_desc_e810t[pin].chan) + return -EOPNOTSUPP; + + /* Check if functions are properly assigned */ + switch (func) { + case PTP_PF_NONE: + break; + case PTP_PF_EXTTS: + if (pin == UFL1) + return -EOPNOTSUPP; + break; + case PTP_PF_PEROUT: + if (pin == UFL2 || pin == GNSS) + return -EOPNOTSUPP; + break; + case PTP_PF_PHYSYNC: + return -EOPNOTSUPP; + } + + return ice_ptp_set_sma_e810t(info, pin, func); + } + /** * ice_set_tx_tstamp - Enable or disable Tx timestamping * @pf: The PF pointer to search in @@@ -735,17 -981,34 +981,34 @@@ ice_ptp_gpio_enable_e810(struct ptp_clo { struct ice_pf *pf = ptp_info_to_pf(info); struct ice_perout_channel clk_cfg = {0}; + bool sma_pres = false; unsigned int chan; u32 gpio_pin; int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + sma_pres = true; + switch (rq->type) { case PTP_CLK_REQ_PEROUT: chan = rq->perout.index; - if (chan == PPS_CLK_GEN_CHAN) + if (sma_pres) { + if (chan == ice_pin_desc_e810t[SMA1].chan) + clk_cfg.gpio_pin = GPIO_20; + else if (chan == ice_pin_desc_e810t[SMA2].chan) + clk_cfg.gpio_pin = GPIO_22; + else + return -1; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + clk_cfg.gpio_pin = GPIO_20; + else + clk_cfg.gpio_pin = GPIO_22; + } else if (chan == PPS_CLK_GEN_CHAN) { clk_cfg.gpio_pin = PPS_PIN_INDEX; - else + } else { clk_cfg.gpio_pin = chan; + }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + rq->perout.period.nsec); @@@ -757,7 -1020,19 +1020,19 @@@ break; case PTP_CLK_REQ_EXTTS: chan = rq->extts.index; - gpio_pin = chan; + if (sma_pres) { + if (chan < ice_pin_desc_e810t[SMA2].chan) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else { + gpio_pin = chan; + }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, rq->extts.flags); @@@ -1012,7 -1287,7 +1287,7 @@@ int ice_ptp_set_ts_config(struct ice_p * The timestamp is in ns, so we must convert the result first. */ void - ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, + ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { u32 ts_high; @@@ -1037,14 -1312,94 +1312,94 @@@ } }
+ /** + * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Disable the OS access to the SMA pins. Called to clear out the OS + * indications of pin support when we fail to setup the E810-T SMA control + * register. + */ + static void + ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) + { + struct device *dev = ice_pf_to_dev(pf); + + dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); + + info->enable = NULL; + info->verify = NULL; + info->n_pins = 0; + info->n_ext_ts = 0; + info->n_per_out = 0; + } + + /** + * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Finish setting up the SMA pins by allocating pin_config, and setting it up + * according to the current status of the SMA. On failure, disable all of the + * extended SMA pin support. + */ + static void + ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) + { + struct device *dev = ice_pf_to_dev(pf); + int err; + + /* Allocate memory for kernel pins interface */ + info->pin_config = devm_kcalloc(dev, info->n_pins, + sizeof(*info->pin_config), GFP_KERNEL); + if (!info->pin_config) { + ice_ptp_disable_sma_pins_e810t(pf, info); + return; + } + + /* Read current SMA status */ + err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); + if (err) + ice_ptp_disable_sma_pins_e810t(pf, info); + } + + /** + * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs + * @pf: pointer to the PF instance + * @info: PTP clock capabilities + */ + static void + ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) + { + /* Check if SMA controller is in the netlist */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) && + !ice_is_pca9575_present(&pf->hw)) + ice_clear_feature_support(pf, ICE_F_SMA_CTRL); + + if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + info->n_ext_ts = N_EXT_TS_E810_NO_SMA; + info->n_per_out = N_PER_OUT_E810T_NO_SMA; + return; + } + + info->n_per_out = N_PER_OUT_E810T; + info->n_ext_ts = N_EXT_TS_E810; + info->n_pins = NUM_PTP_PINS_E810T; + info->verify = ice_verify_pin_e810t; + + /* Complete setup of the SMA pins */ + ice_ptp_setup_sma_pins_e810t(pf, info); + } + /** * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs * @info: PTP clock capabilities */ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info) { - info->n_per_out = E810_N_PER_OUT; - info->n_ext_ts = E810_N_EXT_TS; + info->n_per_out = N_PER_OUT_E810; + info->n_ext_ts = N_EXT_TS_E810; }
/** @@@ -1062,7 -1417,10 +1417,10 @@@ ice_ptp_set_funcs_e810(struct ice_pf *p { info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info); + if (ice_is_e810t(&pf->hw)) + ice_ptp_setup_pins_e810t(pf, info); + else + ice_ptp_setup_pins_e810(info); }
/** @@@ -1571,9 -1929,6 +1929,9 @@@ err_kworker */ void ice_ptp_release(struct ice_pf *pf) { + if (!test_bit(ICE_FLAG_PTP, pf->flags)) + return; + /* Disable timestamping for both Tx and Rx */ ice_ptp_cfg_timestamp(pf, false);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 02db3148240a,da1bec04efff..eae9aa9c0811 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@@ -745,7 -745,7 +745,7 @@@ static int mlx5_fw_tracer_set_mtrc_conf MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY); MLX5_SET(mtrc_conf, in, log_trace_buffer_size, ilog2(TRACER_BUFFER_PAGE_NUM)); - MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key); + MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTRC_CONF, 0, 1); @@@ -1028,7 -1028,7 +1028,7 @@@ int mlx5_fw_tracer_init(struct mlx5_fw_
err_notifier_unregister: mlx5_eq_notifier_unregister(dev, &tracer->nb); - mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); + mlx5_core_destroy_mkey(dev, tracer->buff.mkey); err_dealloc_pd: mlx5_core_dealloc_pd(dev, tracer->buff.pdn); err_cancel_work: @@@ -1051,7 -1051,7 +1051,7 @@@ void mlx5_fw_tracer_cleanup(struct mlx5 if (tracer->owner) mlx5_fw_tracer_ownership_release(tracer);
- mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey); + mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey); mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn); }
@@@ -1069,7 -1069,6 +1069,6 @@@ void mlx5_fw_tracer_destroy(struct mlx5 mlx5_fw_tracer_clean_saved_traces_array(tracer); mlx5_fw_tracer_free_strings_db(tracer); mlx5_fw_tracer_destroy_log_buf(tracer); - flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); kvfree(tracer); } diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h index 7761daa25f63,67453dd75b93..f0ac6b0d9653 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@@ -79,6 -79,11 +79,11 @@@ struct page_pool SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256) + #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) + #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) + #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) + #define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024) + #define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \ (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */ @@@ -152,6 -157,25 +157,25 @@@ #define MLX5E_UMR_WQEBBS \ (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
+ #define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\ + (sizeof(struct mlx5e_umr_wqe) +\ + (sizeof(struct mlx5_klm) * (sgl_len))) + + #define MLX5E_KLM_UMR_WQEBBS(klm_entries) \ + (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB)) + + #define MLX5E_KLM_UMR_DS_CNT(klm_entries)\ + (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS)) + + #define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\ + (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm)) + + #define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ + ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT) + + #define MLX5E_MAX_KLM_PER_WQE(mdev) \ + MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE) + #define MLX5E_MSG_LEVEL NETIF_MSG_LINK
#define mlx5e_dbg(mlevel, priv, format, ...) \ @@@ -217,11 -241,12 +241,12 @@@ struct mlx5e_umr_wqe struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; - struct mlx5_mtt inline_mtts[0]; + union { + struct mlx5_mtt inline_mtts[0]; + struct mlx5_klm inline_klms[0]; + }; };
- extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; - enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER, MLX5E_PFLAG_TX_CQE_BASED_MODER, @@@ -244,6 -269,21 +269,21 @@@
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
+ enum packet_merge { + MLX5E_PACKET_MERGE_NONE, + MLX5E_PACKET_MERGE_LRO, + MLX5E_PACKET_MERGE_SHAMPO, + }; + + struct mlx5e_packet_merge_param { + enum packet_merge type; + u32 timeout; + struct { + u8 match_criteria_type; + u8 alignment_granularity; + } shampo; + }; + struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@@ -253,18 -293,20 +293,20 @@@ u16 mode; u8 num_tc; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; + struct { + struct mlx5e_mqprio_rl *rl; + } channel; } mqprio; bool rx_cqe_compress_def; bool tunneled_offload_en; struct dim_cq_moder rx_cq_moderation; struct dim_cq_moder tx_cq_moderation; - bool lro_en; + struct mlx5e_packet_merge_param packet_merge; u8 tx_min_inline_mode; bool vlan_strip_disable; bool scatter_fcs_en; bool rx_dim_enabled; bool tx_dim_enabled; - u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; struct mlx5e_xsk *xsk; @@@ -286,7 -328,8 +328,8 @@@ enum MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */ - MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */ + MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ + MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ };
struct mlx5e_cq { @@@ -577,6 -620,7 +620,7 @@@ typedef struct sk_buff struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); + typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params); @@@ -598,6 -642,25 +642,25 @@@ struct mlx5e_rq_frags_info u8 wqe_bulk; };
+ struct mlx5e_shampo_hd { - struct mlx5_core_mkey mkey; ++ u32 mkey; + struct mlx5e_dma_info *info; + struct page *last_page; + u16 hd_per_wq; + u16 hd_per_wqe; + unsigned long *bitmap; + u16 pi; + u16 ci; + __be32 key; + u64 last_addr; + }; + + struct mlx5e_hw_gro_data { + struct sk_buff *skb; + struct flow_keys fk; + int second_ip_id; + }; + struct mlx5e_rq { /* data path */ union { @@@ -619,6 -682,7 +682,7 @@@ u8 umr_in_progress; u8 umr_last_bulk; u8 umr_completed; + struct mlx5e_shampo_hd *shampo; } mpwqe; }; struct { @@@ -638,6 -702,8 +702,8 @@@ struct mlx5e_icosq *icosq; struct mlx5e_priv *priv;
+ struct mlx5e_hw_gro_data *hw_gro_data; + mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_post_rx_wqes post_wqes; mlx5e_fp_dealloc_wqe dealloc_wqe; @@@ -665,7 -731,7 +731,7 @@@ u8 wq_type; u32 rqn; struct mlx5_core_dev *mdev; - struct mlx5_core_mkey umr_mkey; + u32 umr_mkey; struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */ @@@ -879,11 -945,13 +945,13 @@@ struct mlx5e_priv #endif struct mlx5e_scratchpad scratchpad; struct mlx5e_htb htb; + struct mlx5e_mqprio_rl *mqprio_rl; };
struct mlx5e_rx_handlers { mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; + mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo; };
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; @@@ -913,11 -981,13 +981,13 @@@ void mlx5e_build_ptys2ethtool_map(void)
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv); + int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); void mlx5e_set_rx_mode_work(struct work_struct *work); @@@ -1003,7 -1073,8 +1073,8 @@@ int mlx5e_modify_sq(struct mlx5_core_de struct mlx5e_modify_sq_param *p); int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid); + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, + struct mlx5e_sq_stats *sq_stats); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5fce9401ac74,6f398f636f01..9febe4a916df --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -218,6 -218,45 +218,45 @@@ static inline void mlx5e_build_umr_wqe( ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); }
+ static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node) + { + rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo), + GFP_KERNEL, node); + if (!rq->mpwqe.shampo) + return -ENOMEM; + return 0; + } + + static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq) + { + kvfree(rq->mpwqe.shampo); + } + + static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) + { + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + + shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL, + node); + if (!shampo->bitmap) + return -ENOMEM; + + shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq, + sizeof(*shampo->info)), + GFP_KERNEL, node); + if (!shampo->info) { + kvfree(shampo->bitmap); + return -ENOMEM; + } + return 0; + } + + static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) + { + kvfree(rq->mpwqe.shampo->bitmap); + kvfree(rq->mpwqe.shampo->info); + } + static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) { int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); @@@ -233,9 -272,10 +272,9 @@@ return 0; }
- static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, - u64 npages, u8 page_shift, u32 *umr_mkey, - dma_addr_t filler_addr) + static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev, - u64 npages, u8 page_shift, - struct mlx5_core_mkey *umr_mkey, ++ u64 npages, u8 page_shift, u32 *umr_mkey, + dma_addr_t filler_addr) { struct mlx5_mtt *mtt; int inlen; @@@ -283,12 -323,59 +322,58 @@@ return err; }
+ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, - u64 nentries, - struct mlx5_core_mkey *umr_mkey) ++ u64 nentries, u32 *umr_mkey) + { + int inlen; + void *mkc; + u32 *in; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); + mlx5e_mkey_set_relaxed_ordering(mdev, mkc); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); + MLX5_SET(mkc, mkc, translations_octword_size, nentries); + MLX5_SET(mkc, mkc, length64, 1); + err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); + + kvfree(in); + return err; + } + static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
- return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey, - rq->wqe_overflow.addr); + return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT, + &rq->umr_mkey, rq->wqe_overflow.addr); + } + + static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, + struct mlx5e_rq *rq) + { + u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); + + if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) { + mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", + max_klm_size, rq->mpwqe.shampo->hd_per_wq); + return -EINVAL; + } + return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + &rq->mpwqe.shampo->mkey); }
static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) @@@ -402,6 -489,65 +487,65 @@@ static int mlx5e_init_rxq_rq(struct mlx return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0); }
+ static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev, + struct mlx5e_params *params, + struct mlx5e_rq_param *rqp, + struct mlx5e_rq *rq, + u32 *pool_size, + int node) + { + void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq); + int wq_size; + int err; + + if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + return 0; + err = mlx5e_rq_shampo_hd_alloc(rq, node); + if (err) + goto out; + rq->mpwqe.shampo->hd_per_wq = + mlx5e_shampo_hd_per_wq(mdev, params, rqp); + err = mlx5e_create_rq_hd_umr_mkey(mdev, rq); + if (err) + goto err_shampo_hd; + err = mlx5e_rq_shampo_hd_info_alloc(rq, node); + if (err) + goto err_shampo_info; + rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node); + if (!rq->hw_gro_data) { + err = -ENOMEM; + goto err_hw_gro_data; + } + rq->mpwqe.shampo->key = - cpu_to_be32(rq->mpwqe.shampo->mkey.key); ++ cpu_to_be32(rq->mpwqe.shampo->mkey); + rq->mpwqe.shampo->hd_per_wqe = + mlx5e_shampo_hd_per_wqe(mdev, params, rqp); + wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); + *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) / + MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; + return 0; + + err_hw_gro_data: + mlx5e_rq_shampo_hd_info_free(rq); + err_shampo_info: - mlx5_core_destroy_mkey(mdev, &rq->mpwqe.shampo->mkey); ++ mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey); + err_shampo_hd: + mlx5e_rq_shampo_hd_free(rq); + out: + return err; + } + + static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq) + { + if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + return; + + kvfree(rq->hw_gro_data); + mlx5e_rq_shampo_hd_info_free(rq); - mlx5_core_destroy_mkey(rq->mdev, &rq->mpwqe.shampo->mkey); ++ mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey); + mlx5e_rq_shampo_hd_free(rq); + } + static int mlx5e_alloc_rq(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5e_rq_param *rqp, @@@ -454,11 -600,16 +598,16 @@@ err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) goto err_rq_drop_page; - rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); + rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node); if (err) goto err_rq_mkey; + + err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node); + if (err) + goto err_free_by_rq_type; + break; default: /* MLX5_WQ_TYPE_CYCLIC */ err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, @@@ -486,7 -637,7 +635,7 @@@ if (err) goto err_rq_frags;
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key); + rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey); }
if (xsk) { @@@ -511,14 -662,14 +660,14 @@@ if (IS_ERR(rq->page_pool)) { err = PTR_ERR(rq->page_pool); rq->page_pool = NULL; - goto err_free_by_rq_type; + goto err_free_shampo; } if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_PAGE_POOL, rq->page_pool); } if (err) - goto err_free_by_rq_type; + goto err_free_shampo;
for (i = 0; i < wq_sz; i++) { if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { @@@ -527,8 -678,10 +676,10 @@@ u32 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; u64 dma_offset = mlx5e_get_mpwqe_offset(i); + u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ? + 0 : rq->buff.headroom;
- wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); + wqe->data[0].addr = cpu_to_be64(dma_offset + headroom); wqe->data[0].byte_count = cpu_to_be32(byte_count); wqe->data[0].lkey = rq->mkey_be; } else { @@@ -568,12 -721,14 +719,14 @@@
return 0;
+ err_free_shampo: + mlx5e_rq_free_shampo(rq); err_free_by_rq_type: switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); err_rq_mkey: - mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); + mlx5_core_destroy_mkey(mdev, rq->umr_mkey); err_rq_drop_page: mlx5e_free_mpwqe_rq_drop_page(rq); break; @@@ -606,8 -761,9 +759,9 @@@ static void mlx5e_free_rq(struct mlx5e_ switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); - mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); + mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey); mlx5e_free_mpwqe_rq_drop_page(rq); + mlx5e_rq_free_shampo(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ kvfree(rq->wqe.frags); @@@ -661,6 -817,12 +815,12 @@@ int mlx5e_create_rq(struct mlx5e_rq *rq MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { + MLX5_SET(wq, wq, log_headers_buffer_entry_num, + order_base_2(rq->mpwqe.shampo->hd_per_wq)); - MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey.key); ++ MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey); + } + mlx5_fill_page_frag_array(&rq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
@@@ -800,6 -962,15 +960,15 @@@ void mlx5e_free_rx_in_progress_descs(st head = mlx5_wq_ll_get_wqe_next_ix(wq, head); }
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { + u16 len; + + len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) & + (rq->mpwqe.shampo->hd_per_wq - 1); + mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false); + rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci; + } + rq->mpwqe.actual_wq_head = wq->head; rq->mpwqe.umr_in_progress = 0; rq->mpwqe.umr_completed = 0; @@@ -825,6 -996,10 +994,10 @@@ void mlx5e_free_rx_descs(struct mlx5e_r mlx5_wq_ll_pop(wq, wqe_ix_be, &wqe->next.next_wqe_index); } + + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq, + 0, true); } else { struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@@ -844,6 -1019,9 +1017,9 @@@ int mlx5e_open_rq(struct mlx5e_params * struct mlx5_core_dev *mdev = rq->mdev; int err;
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) + __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state); + err = mlx5e_alloc_rq(params, xsk, param, node, rq); if (err) return err; @@@ -929,9 -1107,10 +1105,10 @@@ static int mlx5e_alloc_xdpsq_fifo(struc struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + size_t size;
- xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, - GFP_KERNEL, numa); + size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq); + xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa); if (!xdpi_fifo->xi) return -ENOMEM;
@@@ -945,10 -1124,11 +1122,11 @@@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + size_t size; int err;
- sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, - GFP_KERNEL, numa); + size = array_size(sizeof(*sq->db.wqe_info), wq_sz); + sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa); if (!sq->db.wqe_info) return -ENOMEM;
@@@ -1297,7 -1477,8 +1475,8 @@@ static int mlx5e_set_sq_maxrate(struct
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid) + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, + struct mlx5e_sq_stats *sq_stats) { struct mlx5e_create_sq_param csp = {}; u32 tx_rate; @@@ -1307,10 -1488,7 +1486,7 @@@ if (err) return err;
- if (qos_queue_group_id) - sq->stats = c->priv->htb.qos_sq_stats[qos_qid]; - else - sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + sq->stats = sq_stats;
csp.tisn = tisn; csp.tis_lst_sz = 1; @@@ -1704,6 -1882,36 +1880,36 @@@ static void mlx5e_close_tx_cqs(struct m mlx5e_close_cq(&c->sq[tc].cq); }
+ static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq) + { + int tc; + + for (tc = 0; tc < TC_MAX_QUEUE; tc++) + if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count) + return tc; + + WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq); + return -ENOENT; + } + + static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix, + u32 *hw_id) + { + int tc; + + if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL || + !params->mqprio.channel.rl) { + *hw_id = 0; + return 0; + } + + tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix); + if (tc < 0) + return tc; + + return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id); + } + static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@@ -1712,9 -1920,16 +1918,16 @@@
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { int txq_ix = c->ix + tc * params->num_channels; + u32 qos_queue_group_id; + + err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id); + if (err) + goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, - params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0); + params, &cparam->txq_sq, &c->sq[tc], tc, + qos_queue_group_id, + &c->priv->channel_stats[c->ix].sq[tc]); if (err) goto err_close_sqs; } @@@ -1990,7 -2205,7 +2203,7 @@@ static int mlx5e_open_channel(struct ml c->cpu = cpu; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); c->num_tc = mlx5e_get_dcb_num_tc(params); c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; @@@ -2184,17 -2399,14 +2397,14 @@@ void mlx5e_close_channels(struct mlx5e_ chs->num = 0; }
- static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) + static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv) { struct mlx5e_rx_res *res = priv->rx_res; - struct mlx5e_lro_param lro_param; - - lro_param = mlx5e_get_lro_param(&priv->channels.params);
- return mlx5e_rx_res_lro_set_param(res, &lro_param); + return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge); }
- static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); + static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 mtu) @@@ -2339,6 -2551,13 +2549,13 @@@ static int mlx5e_update_netdev_queues(s netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); goto err_txqs; } + if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) { + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + priv->mqprio_rl = priv->channels.params.mqprio.channel.rl; + }
return 0;
@@@ -2900,15 -3119,18 +3117,18 @@@ static void mlx5e_params_mqprio_dcb_set { params->mqprio.mode = TC_MQPRIO_MODE_DCB; params->mqprio.num_tc = num_tc; + params->mqprio.channel.rl = NULL; mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, params->num_channels); }
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, - struct tc_mqprio_qopt *qopt) + struct tc_mqprio_qopt *qopt, + struct mlx5e_mqprio_rl *rl) { params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; params->mqprio.num_tc = qopt->num_tc; + params->mqprio.channel.rl = rl; mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); }
@@@ -2968,9 -3190,13 +3188,13 @@@ static int mlx5e_mqprio_channel_validat netdev_err(netdev, "Min tx rate is not supported\n"); return -EINVAL; } + if (mqprio->max_rate[i]) { - netdev_err(netdev, "Max tx rate is not supported\n"); - return -EINVAL; + int err; + + err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]); + if (err) + return err; }
if (mqprio->qopt.offset[i] != agg_count) { @@@ -2989,11 -3215,22 +3213,22 @@@ return 0; }
+ static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio) + { + int tc; + + for (tc = 0; tc < mqprio->qopt.num_tc; tc++) + if (mqprio->max_rate[tc]) + return true; + return false; + } + static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, struct tc_mqprio_qopt_offload *mqprio) { mlx5e_fp_preactivate preactivate; struct mlx5e_params new_params; + struct mlx5e_mqprio_rl *rl; bool nch_changed; int err;
@@@ -3001,13 -3238,32 +3236,32 @@@ if (err) return err;
+ rl = NULL; + if (mlx5e_mqprio_rate_limit(mqprio)) { + rl = mlx5e_mqprio_rl_alloc(); + if (!rl) + return -ENOMEM; + err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc, + mqprio->max_rate); + if (err) { + mlx5e_mqprio_rl_free(rl); + return err; + } + } + new_params = priv->channels.params; - mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt); + mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : mlx5e_update_netdev_queues_ctx; - return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + if (err && rl) { + mlx5e_mqprio_rl_cleanup(rl); + mlx5e_mqprio_rl_free(rl); + } + + return err; }
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, @@@ -3225,7 -3481,7 +3479,7 @@@ static int mlx5e_set_mac(struct net_dev return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev); - ether_addr_copy(netdev->dev_addr, saddr->sa_data); + eth_hw_addr_set(netdev, saddr->sa_data); netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv); @@@ -3269,16 -3525,59 +3523,59 @@@ static int set_feature_lro(struct net_d }
new_params = *cur_params; - new_params.lro_en = enable;
- if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) == - mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL)) - reset = false; + if (enable) + new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO; + else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO) + new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE; + else + goto out; + + if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO && + new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) { + if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) == + mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL)) + reset = false; + } }
err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_modify_tirs_lro_ctx, NULL, reset); + mlx5e_modify_tirs_packet_merge_ctx, NULL, reset); + out: + mutex_unlock(&priv->state_lock); + return err; + } + + static int set_feature_hw_gro(struct net_device *netdev, bool enable) + { + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params new_params; + bool reset = true; + int err = 0; + + mutex_lock(&priv->state_lock); + new_params = priv->channels.params; + + if (enable) { + if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { + netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n"); + err = -EINVAL; + goto out; + } + new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO; + new_params.packet_merge.shampo.match_criteria_type = + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED; + new_params.packet_merge.shampo.alignment_granularity = + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE; + } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { + new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE; + } else { + goto out; + } + + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_modify_tirs_packet_merge_ctx, NULL, reset); out: mutex_unlock(&priv->state_lock); return err; @@@ -3457,6 -3756,7 +3754,7 @@@ int mlx5e_set_features(struct net_devic mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc); @@@ -3517,6 -3817,10 +3815,10 @@@ static netdev_features_t mlx5e_fix_feat netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n"); features &= ~NETIF_F_LRO; } + if (features & NETIF_F_GRO_HW) { + netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n"); + features &= ~NETIF_F_GRO_HW; + } }
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { @@@ -3605,7 -3909,7 +3907,7 @@@ int mlx5e_change_mtu(struct net_device goto out; }
- if (params->lro_en) + if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO) reset = false;
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { @@@ -4062,8 -4366,8 +4364,8 @@@ static int mlx5e_xdp_allowed(struct mlx struct net_device *netdev = priv->netdev; struct mlx5e_params new_params;
- if (priv->channels.params.lro_en) { - netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); + if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) { + netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n"); return -EINVAL; }
@@@ -4320,9 -4624,10 +4622,10 @@@ void mlx5e_build_nic_params(struct mlx5 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { /* No XSK params: checking the availability of striding RQ in general. */ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) - params->lro_en = !slow_pci_heuristic(mdev); + params->packet_merge.type = slow_pci_heuristic(mdev) ? + MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO; } - params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); + params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */ rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? @@@ -4350,13 -4655,17 +4653,17 @@@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + u8 addr[ETH_ALEN];
- mlx5_query_mac_address(priv->mdev, netdev->dev_addr); - if (is_zero_ether_addr(netdev->dev_addr) && + mlx5_query_mac_address(priv->mdev, addr); + if (is_zero_ether_addr(addr) && !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) { eth_hw_addr_random(netdev); mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr); + return; } + + eth_hw_addr_set(netdev, addr); }
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table, @@@ -4453,6 -4762,10 +4760,10 @@@ static void mlx5e_build_nic_netdev(stru netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ if (!!MLX5_CAP_GEN(mdev, shampo) && + mlx5e_check_fragmented_striding_rq_cap(mdev)) + netdev->hw_features |= NETIF_F_GRO_HW; + if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@@ -4503,6 -4816,7 +4814,7 @@@ if (fcs_enabled) netdev->features &= ~NETIF_F_RXALL; netdev->features &= ~NETIF_F_LRO; + netdev->features &= ~NETIF_F_GRO_HW; netdev->features &= ~NETIF_F_RXFCS;
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) @@@ -4607,7 -4921,6 +4919,6 @@@ static int mlx5e_init_nic_rx(struct mlx { struct mlx5_core_dev *mdev = priv->mdev; enum mlx5e_rx_res_features features; - struct mlx5e_lro_param lro_param; int err;
priv->rx_res = mlx5e_rx_res_alloc(); @@@ -4625,9 -4938,9 +4936,9 @@@ features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP; if (priv->channels.params.tunneled_offload_en) features |= MLX5E_RX_RES_FEATURE_INNER_FT; - lro_param = mlx5e_get_lro_param(&priv->channels.params); err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, - priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, priv->channels.params.num_channels); if (err) goto err_close_drop_rq; @@@ -4861,6 -5174,11 +5172,11 @@@ void mlx5e_priv_cleanup(struct mlx5e_pr kfree(priv->htb.qos_sq_stats[i]); kvfree(priv->htb.qos_sq_stats);
+ if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + memset(priv, 0, sizeof(*priv)); }
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 29a6586ef28d,fe979edd96dc..f63c8ff3ef3f --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@@ -33,9 -33,12 +33,12 @@@ #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/tcp.h> + #include <linux/bitmap.h> #include <net/ip6_checksum.h> #include <net/page_pool.h> #include <net/inet_ecn.h> + #include <net/udp.h> + #include <net/tcp.h> #include "en.h" #include "en/txrx.h" #include "en_tc.h" @@@ -62,10 -65,12 +65,12 @@@ mlx5e_skb_from_cqe_mpwrq_nonlinear(stru u16 cqe_bcnt, u32 head_offset, u32 page_idx); static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); + static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { .handle_rx_cqe = mlx5e_handle_rx_cqe, .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, + .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo, };
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) @@@ -185,8 -190,9 +190,9 @@@ static inline u32 mlx5e_decompress_cqes mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); - INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, - mlx5e_handle_rx_cqe, rq, &cqd->title); + INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, + mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, + rq, &cqd->title); } mlx5e_cqes_update_owner(wq, cqcc - wq->cc); wq->cc = cqcc; @@@ -206,8 -212,9 +212,9 @@@ static inline u32 mlx5e_decompress_cqes mlx5e_read_title_slot(rq, wq, cc); mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); mlx5e_decompress_cqe(rq, wq, cc); - INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, - mlx5e_handle_rx_cqe, rq, &cqd->title); + INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, + mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, + rq, &cqd->title); cqd->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; @@@ -448,13 -455,13 +455,13 @@@ mlx5e_add_skb_frag(struct mlx5e_rq *rq static inline void mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, struct mlx5e_dma_info *dma_info, - int offset_from, u32 headlen) + int offset_from, int dma_offset, u32 headlen) { const void *from = page_address(dma_info->page) + offset_from; /* Aligning len to sizeof(long) optimizes memcpy performance */ unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, + dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, from, len); } @@@ -494,6 -501,157 +501,157 @@@ static void mlx5e_post_rx_mpwqe(struct mlx5_wq_ll_update_db_record(wq); }
+ /* This function returns the size of the continuous free space inside a bitmap + * that starts from first and no longer than len including circular ones. + */ + static int bitmap_find_window(unsigned long *bitmap, int len, + int bitmap_size, int first) + { + int next_one, count; + + next_one = find_next_bit(bitmap, bitmap_size, first); + if (next_one == bitmap_size) { + if (bitmap_size - first >= len) + return len; + next_one = find_next_bit(bitmap, bitmap_size, 0); + count = next_one + bitmap_size - first; + } else { + count = next_one - first; + } + + return min(len, count); + } + + static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, + __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs) + { + memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms)); + umr_wqe->ctrl.opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | + MLX5_OPCODE_UMR); + umr_wqe->ctrl.umr_mkey = key; + umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) + | MLX5E_KLM_UMR_DS_CNT(klm_len)); + umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; + umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); + umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len); + umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); + } + + static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, + struct mlx5e_icosq *sq, + u16 klm_entries, u16 index) + { + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries; - u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey.key; ++ u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey; + struct page *page = shampo->last_page; + u64 addr = shampo->last_addr; + struct mlx5e_dma_info *dma_info; + struct mlx5e_umr_wqe *umr_wqe; + int headroom; + + headroom = rq->buff.headroom; + new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); + entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT); + wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries); + pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); + umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); + build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs); + + for (i = 0; i < entries; i++, index++) { + dma_info = &shampo->info[index]; + if (i >= klm_entries || (index < shampo->pi && shampo->pi - index < + MLX5_UMR_KLM_ALIGNMENT)) + goto update_klm; + header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << + MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; + if (!(header_offset & (PAGE_SIZE - 1))) { + err = mlx5e_page_alloc(rq, dma_info); + if (unlikely(err)) + goto err_unmap; + addr = dma_info->addr; + page = dma_info->page; + } else { + dma_info->addr = addr + header_offset; + dma_info->page = page; + } + + update_klm: + umr_wqe->inline_klms[i].bcount = + cpu_to_be32(MLX5E_RX_MAX_HEAD); + umr_wqe->inline_klms[i].key = cpu_to_be32(lkey); + umr_wqe->inline_klms[i].va = + cpu_to_be64(dma_info->addr + headroom); + } + + sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { + .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR, + .num_wqebbs = wqe_bbs, + .shampo.len = new_entries, + }; + + shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1); + shampo->last_page = page; + shampo->last_addr = addr; + sq->pc += wqe_bbs; + sq->doorbell_cseg = &umr_wqe->ctrl; + + return 0; + + err_unmap: + while (--i >= 0) { + if (--index < 0) + index = shampo->hd_per_wq - 1; + dma_info = &shampo->info[index]; + if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) { + dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE); + mlx5e_page_release(rq, dma_info, true); + } + } + rq->stats->buff_alloc_err++; + return err; + } + + static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) + { + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + u16 klm_entries, num_wqe, index, entries_before; + struct mlx5e_icosq *sq = rq->icosq; + int i, err, max_klm_entries, len; + + max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); + klm_entries = bitmap_find_window(shampo->bitmap, + shampo->hd_per_wqe, + shampo->hd_per_wq, shampo->pi); + if (!klm_entries) + return 0; + + klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1)); + index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT); + entries_before = shampo->hd_per_wq - index; + + if (unlikely(entries_before < klm_entries)) + num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) + + DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries); + else + num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries); + + for (i = 0; i < num_wqe; i++) { + len = (klm_entries > max_klm_entries) ? max_klm_entries : + klm_entries; + if (unlikely(index + len > shampo->hd_per_wq)) + len = shampo->hd_per_wq - index; + err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); + if (unlikely(err)) + return err; + index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); + klm_entries -= len; + } + + return 0; + } + static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@@ -514,6 -672,12 +672,12 @@@ goto err; }
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { + err = mlx5e_alloc_rx_hd_mpwqe(rq); + if (unlikely(err)) + goto err; + } + pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS); umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts)); @@@ -558,6 -722,44 +722,44 @@@ err return err; }
+ /* This function is responsible to dealloc SHAMPO header buffer. + * close == true specifies that we are in the middle of closing RQ operation so + * we go over all the entries and if they are not in use we free them, + * otherwise we only go over a specific range inside the header buffer that are + * not in use. + */ + void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) + { + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + int hd_per_wq = shampo->hd_per_wq; + struct page *deleted_page = NULL; + struct mlx5e_dma_info *hd_info; + int i, index = start; + + for (i = 0; i < len; i++, index++) { + if (index == hd_per_wq) + index = 0; + + if (close && !test_bit(index, shampo->bitmap)) + continue; + + hd_info = &shampo->info[index]; + hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); + if (hd_info->page != deleted_page) { + deleted_page = hd_info->page; + mlx5e_page_release(rq, hd_info, false); + } + } + + if (start + len > hd_per_wq) { + len -= hd_per_wq - start; + bitmap_clear(shampo->bitmap, start, hd_per_wq - start); + start = 0; + } + + bitmap_clear(shampo->bitmap, start, len); + } + static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; @@@ -629,6 -831,28 +831,28 @@@ void mlx5e_free_icosq_descs(struct mlx5 sq->cc = sqcc; }
+ static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr, + struct mlx5e_icosq *sq) + { + struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq); + struct mlx5e_shampo_hd *shampo; + /* assume 1:1 relationship between RQ and icosq */ + struct mlx5e_rq *rq = &c->rq; + int end, from, len = umr.len; + + shampo = rq->mpwqe.shampo; + end = shampo->hd_per_wq; + from = shampo->ci; + if (from + len > shampo->hd_per_wq) { + len -= end - from; + bitmap_set(shampo->bitmap, from, end - from); + from = 0; + } + + bitmap_set(shampo->bitmap, from, len); + shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1); + } + int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) { struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); @@@ -685,6 -909,9 +909,9 @@@ break; case MLX5E_ICOSQ_WQE_NOP: break; + case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR: + mlx5e_handle_shampo_hd_umr(wi->shampo, sq); + break; #ifdef CONFIG_MLX5_EN_TLS case MLX5E_ICOSQ_WQE_UMR_TLS: break; @@@ -782,8 -1009,8 +1009,8 @@@ static void mlx5e_lro_update_tcp_hdr(st
if (tcp_ack) { tcp->ack = 1; - tcp->ack_seq = cqe->lro_ack_seq_num; - tcp->window = cqe->lro_tcp_win; + tcp->ack_seq = cqe->lro.ack_seq_num; + tcp->window = cqe->lro.tcp_win; } }
@@@ -809,7 -1036,7 +1036,7 @@@ static void mlx5e_lro_update_hdr(struc tcp = ip_p + sizeof(struct iphdr); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
- ipv4->ttl = cqe->lro_min_ttl; + ipv4->ttl = cqe->lro.min_ttl; ipv4->tot_len = cpu_to_be16(tot_len); ipv4->check = 0; ipv4->check = ip_fast_csum((unsigned char *)ipv4, @@@ -829,7 -1056,7 +1056,7 @@@ tcp = ip_p + sizeof(struct ipv6hdr); skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- ipv6->hop_limit = cqe->lro_min_ttl; + ipv6->hop_limit = cqe->lro.min_ttl; ipv6->payload_len = cpu_to_be16(payload_len);
mlx5e_lro_update_tcp_hdr(cqe, tcp); @@@ -841,6 -1068,142 +1068,142 @@@ } }
+ static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) + { + struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index]; + u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom; + + return page_address(last_head->page) + head_offset; + } + + static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4) + { + int udp_off = rq->hw_gro_data->fk.control.thoff; + struct sk_buff *skb = rq->hw_gro_data->skb; + struct udphdr *uh; + + uh = (struct udphdr *)(skb->data + udp_off); + uh->len = htons(skb->len - udp_off); + + if (uh->check) + uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr, + ipv4->daddr, 0); + + skb->csum_start = (unsigned char *)uh - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; + } + + static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6) + { + int udp_off = rq->hw_gro_data->fk.control.thoff; + struct sk_buff *skb = rq->hw_gro_data->skb; + struct udphdr *uh; + + uh = (struct udphdr *)(skb->data + udp_off); + uh->len = htons(skb->len - udp_off); + + if (uh->check) + uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr, + &ipv6->daddr, 0); + + skb->csum_start = (unsigned char *)uh - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; + } + + static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, + struct tcphdr *skb_tcp_hd) + { + u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); + struct tcphdr *last_tcp_hd; + void *last_hd_addr; + + last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); + last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff; + tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH); + } + + static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, + struct mlx5_cqe64 *cqe, bool match) + { + int tcp_off = rq->hw_gro_data->fk.control.thoff; + struct sk_buff *skb = rq->hw_gro_data->skb; + struct tcphdr *tcp; + + tcp = (struct tcphdr *)(skb->data + tcp_off); + if (match) + mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); + + tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr, + ipv4->daddr, 0); + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; + if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; + + skb->csum_start = (unsigned char *)tcp - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + + if (tcp->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + } + + static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, + struct mlx5_cqe64 *cqe, bool match) + { + int tcp_off = rq->hw_gro_data->fk.control.thoff; + struct sk_buff *skb = rq->hw_gro_data->skb; + struct tcphdr *tcp; + + tcp = (struct tcphdr *)(skb->data + tcp_off); + if (match) + mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp); + + tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr, + &ipv6->daddr, 0); + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; + skb->csum_start = (unsigned char *)tcp - skb->head; + skb->csum_offset = offsetof(struct tcphdr, check); + + if (tcp->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + } + + static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) + { + bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)); + struct sk_buff *skb = rq->hw_gro_data->skb; + + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + skb->ip_summed = CHECKSUM_PARTIAL; + + if (is_ipv4) { + int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr); + struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff); + __be16 newlen = htons(skb->len - nhoff); + + csum_replace2(&ipv4->check, ipv4->tot_len, newlen); + ipv4->tot_len = newlen; + + if (ipv4->protocol == IPPROTO_TCP) + mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match); + else + mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4); + } else { + int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr); + struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff); + + ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6)); + + if (ipv6->nexthdr == IPPROTO_TCP) + mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match); + else + mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6); + } + } + static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { @@@ -1090,6 -1453,27 +1453,27 @@@ static inline void mlx5e_build_rx_skb(s stats->mcast_packets++; }
+ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u32 cqe_bcnt, + struct sk_buff *skb) + { + struct mlx5e_rq_stats *stats = rq->stats; + + stats->packets++; + stats->gro_packets++; + stats->bytes += cqe_bcnt; + stats->gro_bytes += cqe_bcnt; + if (NAPI_GRO_CB(skb)->count != 1) + return; + mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); + skb_reset_network_header(skb); + if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) { + napi_gro_receive(rq->cq.napi, skb); + rq->hw_gro_data->skb = NULL; + } + } + static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, @@@ -1199,7 -1583,8 +1583,8 @@@ mlx5e_skb_from_cqe_nonlinear(struct mlx }
/* copy header */ - mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen); + mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset, + headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; @@@ -1395,6 -1780,30 +1780,30 @@@ const struct mlx5e_rx_handlers mlx5e_rx }; #endif
+ static void + mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di, + u32 data_bcnt, u32 data_offset) + { + net_prefetchw(skb->data); + + while (data_bcnt) { + u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt); + unsigned int truesize; + + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) + truesize = pg_consumed_bytes; + else + truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); + + mlx5e_add_skb_frag(rq, skb, di, data_offset, + pg_consumed_bytes, truesize); + + data_bcnt -= pg_consumed_bytes; + data_offset = 0; + di++; + } + } + static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) @@@ -1420,20 -1829,9 +1829,9 @@@ frag_offset -= PAGE_SIZE; }
- while (byte_cnt) { - u32 pg_consumed_bytes = - min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); - unsigned int truesize = - ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - - mlx5e_add_skb_frag(rq, skb, di, frag_offset, - pg_consumed_bytes, truesize); - byte_cnt -= pg_consumed_bytes; - frag_offset = 0; - di++; - } + mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset); /* copy header */ - mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen); + mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; skb->len += headlen; @@@ -1487,6 -1885,181 +1885,181 @@@ mlx5e_skb_from_cqe_mpwrq_linear(struct return skb; }
+ static void + mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + struct mlx5_cqe64 *cqe, u16 header_index) + { + struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index]; + u16 head_offset = head->addr & (PAGE_SIZE - 1); + u16 head_size = cqe->shampo.header_size; + u16 rx_headroom = rq->buff.headroom; + struct sk_buff *skb = NULL; + void *hdr, *data; + u32 frag_size; + + hdr = page_address(head->page) + head_offset; + data = hdr + rx_headroom; + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size); + + if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { + /* build SKB around header */ + dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE); + prefetchw(hdr); + prefetch(data); + skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); + + if (unlikely(!skb)) + return; + + /* queue up for recycling/reuse */ + page_ref_inc(head->page); + + } else { + /* allocate SKB and copy header for large header */ + rq->stats->gro_large_hds++; + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(head_size, sizeof(long))); + if (unlikely(!skb)) { + rq->stats->buff_alloc_err++; + return; + } + + prefetchw(skb->data); + mlx5e_copy_skb_header(rq->pdev, skb, head, + head_offset + rx_headroom, + rx_headroom, head_size); + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += head_size; + skb->len += head_size; + } + rq->hw_gro_data->skb = skb; + NAPI_GRO_CB(skb)->count = 1; + skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size; + } + + static void + mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz) + { + skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + unsigned int frag_size = skb_frag_size(last_frag); + unsigned int frag_truesize; + + frag_truesize = ALIGN(frag_size, BIT(log_stride_sz)); + skb->truesize += frag_truesize - frag_size; + } + + static void + mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) + { + struct sk_buff *skb = rq->hw_gro_data->skb; + struct mlx5e_rq_stats *stats = rq->stats; + + stats->gro_skbs++; + if (likely(skb_shinfo(skb)->nr_frags)) + mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz); + if (NAPI_GRO_CB(skb)->count > 1) + mlx5e_shampo_update_hdr(rq, cqe, match); + napi_gro_receive(rq->cq.napi, skb); + rq->hw_gro_data->skb = NULL; + } + + static bool + mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) + { + int nr_frags = skb_shinfo(skb)->nr_frags; + + return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE; + } + + static void + mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) + { + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + u64 addr = shampo->info[header_index].addr; + + if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { + shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE); + mlx5e_page_release(rq, &shampo->info[header_index], true); + } + bitmap_clear(shampo->bitmap, header_index, 1); + } + + static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) + { + u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; + u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); + u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); + u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); + u32 data_offset = wqe_offset & (PAGE_SIZE - 1); + u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); + u16 wqe_id = be16_to_cpu(cqe->wqe_id); + u32 page_idx = wqe_offset >> PAGE_SHIFT; + struct sk_buff **skb = &rq->hw_gro_data->skb; + bool flush = cqe->shampo.flush; + bool match = cqe->shampo.match; + struct mlx5e_rq_stats *stats = rq->stats; + struct mlx5e_rx_wqe_ll *wqe; + struct mlx5e_dma_info *di; + struct mlx5e_mpw_info *wi; + struct mlx5_wq_ll *wq; + + wi = &rq->mpwqe.info[wqe_id]; + wi->consumed_strides += cstrides; + + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { + trigger_report(rq, cqe); + stats->wqe_err++; + goto mpwrq_cqe_out; + } + + if (unlikely(mpwrq_is_filler_cqe(cqe))) { + stats->mpwqe_filler_cqes++; + stats->mpwqe_filler_strides += cstrides; + goto mpwrq_cqe_out; + } + + stats->gro_match_packets += match; + + if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) { + match = false; + mlx5e_shampo_flush_skb(rq, cqe, match); + } + + if (!*skb) { + mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); + if (unlikely(!*skb)) + goto free_hd_entry; + } else { + NAPI_GRO_CB(*skb)->count++; + if (NAPI_GRO_CB(*skb)->count == 2 && + rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) { + void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index); + int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff - + sizeof(struct iphdr); + struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff); + + rq->hw_gro_data->second_ip_id = ntohs(iph->id); + } + } + + di = &wi->umr.dma_info[page_idx]; + mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); + + mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); + if (flush) + mlx5e_shampo_flush_skb(rq, cqe, match); + free_hd_entry: + mlx5e_free_rx_shampo_hd_entry(rq, header_index); + mpwrq_cqe_out: + if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) + return; + + wq = &rq->mpwqe.wq; + wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); + mlx5e_free_rx_mpwqe(rq, wi, true); + mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); + } + static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); @@@ -1579,11 -2152,15 +2152,15 @@@ int mlx5e_poll_rx_cq(struct mlx5e_cq *c
mlx5_cqwq_pop(cqwq);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, - mlx5e_handle_rx_cqe, rq, cqe); + INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, + mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo, + rq, cqe); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out: + if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb) + mlx5e_shampo_flush_skb(rq, NULL, false); + if (rcu_access_pointer(rq->xdp_prog)) mlx5e_xdp_rx_poll_complete(rq);
@@@ -1784,15 -2361,24 +2361,24 @@@ int mlx5e_rq_set_handlers(struct mlx5e_ rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; if (mlx5_fpga_is_ipsec_device(mdev)) { netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n"); return -EINVAL; } - if (!rq->handle_rx_cqe) { - netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); - return -EINVAL; + if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) { + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo; + if (!rq->handle_rx_cqe) { + netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n"); + return -EINVAL; + } + } else { + rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe; + if (!rq->handle_rx_cqe) { + netdev_err(netdev, "RX handler of MPWQE RQ is not set\n"); + return -EINVAL; + } } + break; default: /* MLX5_WQ_TYPE_CYCLIC */ rq->wqe.skb_from_cqe = xsk ? diff --combined drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 71a08f84d49d,873efde0d458..386ab9a2d490 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@@ -99,9 -99,6 +99,9 @@@ #define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1
+#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1 +#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1 + #define BY_PASS_PRIO_NUM_LEVELS 1 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) @@@ -209,63 -206,34 +209,63 @@@ static struct init_tree_node egress_roo } };
-#define RDMA_RX_BYPASS_PRIO 0 -#define RDMA_RX_KERNEL_PRIO 1 +enum { + RDMA_RX_COUNTERS_PRIO, + RDMA_RX_BYPASS_PRIO, + RDMA_RX_KERNEL_PRIO, +}; + +#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS +#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1) +#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2) + static struct init_tree_node rdma_rx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 2, + .ar_size = 3, .children = (struct init_tree_node[]) { + [RDMA_RX_COUNTERS_PRIO] = + ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS, + RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))), [RDMA_RX_BYPASS_PRIO] = - ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0, + ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), [RDMA_RX_KERNEL_PRIO] = - ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0, + ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, ADD_MULTIPLE_PRIO(1, 1))), } };
+enum { + RDMA_TX_COUNTERS_PRIO, + RDMA_TX_BYPASS_PRIO, +}; + +#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS +#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) + static struct init_tree_node rdma_tx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 1, + .ar_size = 2, .children = (struct init_tree_node[]) { - ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, + [RDMA_TX_COUNTERS_PRIO] = + ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS, + RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))), + [RDMA_TX_BYPASS_PRIO] = + ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0, FS_CHAINING_CAPS_RDMA_TX, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, - ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL, BY_PASS_PRIO_NUM_LEVELS))), } }; @@@ -2223,6 -2191,10 +2223,10 @@@ struct mlx5_flow_namespace *mlx5_get_fl if (steering->fdb_root_ns) return &steering->fdb_root_ns->ns; return NULL; + case MLX5_FLOW_NAMESPACE_PORT_SEL: + if (steering->port_sel_root_ns) + return &steering->port_sel_root_ns->ns; + return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_RX: if (steering->sniffer_rx_root_ns) return &steering->sniffer_rx_root_ns->ns; @@@ -2247,12 -2219,6 +2251,12 @@@ prio = RDMA_RX_KERNEL_PRIO; } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) { root_ns = steering->rdma_tx_root_ns; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) { + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_COUNTERS_PRIO; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) { + root_ns = steering->rdma_tx_root_ns; + prio = RDMA_TX_COUNTERS_PRIO; } else { /* Must be NIC RX */ root_ns = steering->root_ns; prio = type; @@@ -2634,6 -2600,7 +2638,7 @@@ void mlx5_cleanup_fs(struct mlx5_core_d steering->fdb_root_ns = NULL; kfree(steering->fdb_sub_ns); steering->fdb_sub_ns = NULL; + cleanup_root_ns(steering->port_sel_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); cleanup_root_ns(steering->rdma_rx_root_ns); @@@ -2672,6 -2639,21 +2677,21 @@@ static int init_sniffer_rx_root_ns(stru return PTR_ERR_OR_ZERO(prio); }
+ #define PORT_SEL_NUM_LEVELS 3 + static int init_port_sel_root_ns(struct mlx5_flow_steering *steering) + { + struct fs_prio *prio; + + steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL); + if (!steering->port_sel_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0, + PORT_SEL_NUM_LEVELS); + return PTR_ERR_OR_ZERO(prio); + } + static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) { int err; @@@ -3058,6 -3040,12 +3078,12 @@@ int mlx5_init_fs(struct mlx5_core_dev * goto err; }
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) { + err = init_port_sel_root_ns(steering); + if (err) + goto err; + } + if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) { err = init_rdma_rx_root_ns(steering); @@@ -3262,6 -3250,52 +3288,52 @@@ void mlx5_packet_reformat_dealloc(struc } EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+ int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer) + { + return definer->id; + } + + struct mlx5_flow_definer * + mlx5_create_match_definer(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type ns_type, u16 format_id, + u32 *match_mask) + { + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_definer *definer; + int id; + + root = get_root_namespace(dev, ns_type); + if (!root) + return ERR_PTR(-EOPNOTSUPP); + + definer = kzalloc(sizeof(*definer), GFP_KERNEL); + if (!definer) + return ERR_PTR(-ENOMEM); + + definer->ns_type = ns_type; + id = root->cmds->create_match_definer(root, format_id, match_mask); + if (id < 0) { + mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id); + kfree(definer); + return ERR_PTR(id); + } + definer->id = id; + return definer; + } + + void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, + struct mlx5_flow_definer *definer) + { + struct mlx5_flow_root_namespace *root; + + root = get_root_namespace(dev, definer->ns_type); + if (WARN_ON(!root)) + return; + + root->cmds->destroy_match_definer(root, definer->id); + kfree(definer); + } + int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns) { diff --combined drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index baebc1aac5d0,84297cc1b509..ea1efdecc88c --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@@ -67,7 -67,7 +67,7 @@@ static void mlx5i_build_nic_params(stru MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
- params->lro_en = false; + params->packet_merge.type = MLX5E_PACKET_MERGE_NONE; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; params->tunneled_offload_en = false; } @@@ -219,7 -219,7 +219,7 @@@ void mlx5i_uninit_underlay_qp(struct ml
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) { - unsigned char *dev_addr = priv->netdev->dev_addr; + const unsigned char *dev_addr = priv->netdev->dev_addr; u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; struct mlx5i_priv *ipriv = priv->ppriv; @@@ -336,6 -336,8 +336,8 @@@ static int mlx5i_create_flow_steering(s goto err_destroy_arfs_tables; }
+ mlx5e_ethtool_init_steering(priv); + return 0;
err_destroy_arfs_tables: @@@ -348,12 -350,12 +350,12 @@@ static void mlx5i_destroy_flow_steering { mlx5e_destroy_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); + mlx5e_ethtool_cleanup_steering(priv); }
static int mlx5i_init_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_lro_param lro_param; int err;
priv->rx_res = mlx5e_rx_res_alloc(); @@@ -368,9 -370,9 +370,9 @@@ goto err_destroy_q_counters; }
- lro_param = mlx5e_get_lro_param(&priv->channels.params); err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, - priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->max_nch, priv->drop_rq.rqn, + &priv->channels.params.packet_merge, priv->channels.params.num_channels); if (err) goto err_close_drop_rq; @@@ -472,13 -474,11 +474,13 @@@ int mlx5i_dev_init(struct net_device *d { struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5i_priv *ipriv = priv->ppriv; + u8 addr_mod[3];
/* Set dev address using underlay QP */ - dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff; - dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff; - dev->dev_addr[3] = (ipriv->qpn) & 0xff; + addr_mod[0] = (ipriv->qpn >> 16) & 0xff; + addr_mod[1] = (ipriv->qpn >> 8) & 0xff; + addr_mod[2] = (ipriv->qpn) & 0xff; + dev_addr_mod(dev, 1, addr_mod, sizeof(addr_mod));
/* Add QPN to net-device mapping to HT */ mlx5i_pkey_add_qpn(dev, ipriv->qpn); diff --combined drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 6c67185c05c1,73fed94af09a..3f47d2b3b6e6 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@@ -4,7 -4,7 +4,7 @@@ #ifndef _DR_TYPES_ #define _DR_TYPES_
- #include <linux/mlx5/driver.h> + #include <linux/mlx5/vport.h> #include <linux/refcount.h> #include "fs_core.h" #include "wq.h" @@@ -14,7 -14,6 +14,6 @@@
#define DR_RULE_MAX_STES 18 #define DR_ACTION_MAX_STES 5 - #define WIRE_PORT 0xFFFF #define DR_STE_SVLAN 0x1 #define DR_STE_CVLAN 0x2 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) @@@ -752,9 -751,9 +751,9 @@@ struct mlx5dr_esw_caps struct mlx5dr_cmd_vport_cap { u16 vport_gvmi; u16 vhca_gvmi; + u16 num; u64 icm_address_rx; u64 icm_address_tx; - u32 num; };
struct mlx5dr_roce_cap { @@@ -763,6 -762,11 +762,11 @@@ u8 fl_rc_qp_when_roce_enabled:1; };
+ struct mlx5dr_vports { + struct mlx5dr_cmd_vport_cap esw_manager_caps; + struct xarray vports_caps_xa; + }; + struct mlx5dr_cmd_caps { u16 gvmi; u64 nic_rx_drop_address; @@@ -786,7 -790,6 +790,6 @@@ u8 flex_parser_id_gtpu_first_ext_dw_0; u8 max_ft_level; u16 roce_min_src_udp; - u8 num_esw_ports; u8 sw_format_ver; bool eswitch_manager; bool rx_sw_owner; @@@ -795,11 -798,11 +798,11 @@@ u8 rx_sw_owner_v2:1; u8 tx_sw_owner_v2:1; u8 fdb_sw_owner_v2:1; - u32 num_vports; struct mlx5dr_esw_caps esw_caps; - struct mlx5dr_cmd_vport_cap *vports_caps; + struct mlx5dr_vports vports; bool prio_tag_required; struct mlx5dr_roce_cap roce_caps; + u8 is_ecpf:1; u8 isolate_vl_tc:1; };
@@@ -826,10 -829,6 +829,6 @@@ struct mlx5dr_domain_info struct mlx5dr_cmd_caps caps; };
- struct mlx5dr_domain_cache { - struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft; - }; - struct mlx5dr_domain { struct mlx5dr_domain *peer_dmn; struct mlx5_core_dev *mdev; @@@ -841,7 -840,7 +840,7 @@@ struct mlx5dr_icm_pool *action_icm_pool; struct mlx5dr_send_ring *send_ring; struct mlx5dr_domain_info info; - struct mlx5dr_domain_cache cache; + struct xarray csum_fts_xa; struct mlx5dr_ste_ctx *ste_ctx; };
@@@ -942,7 -941,7 +941,7 @@@ struct mlx5dr_action_dest_tbl
struct mlx5dr_action_ctr { u32 ctr_id; - u32 offeset; + u32 offset; };
struct mlx5dr_action_vport { @@@ -1102,18 -1101,8 +1101,8 @@@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ return true; }
- static inline struct mlx5dr_cmd_vport_cap * - mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) - { - if (!caps->vports_caps || - (vport >= caps->num_vports && vport != WIRE_PORT)) - return NULL; - - if (vport == WIRE_PORT) - vport = caps->num_vports; - - return &caps->vports_caps[vport]; - } + struct mlx5dr_cmd_vport_cap * + mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
struct mlx5dr_cmd_query_flow_table_details { u8 status; @@@ -1154,7 -1143,7 +1143,7 @@@ int mlx5dr_cmd_set_fte_modify_and_vport u32 table_id, u32 group_id, u32 modify_header_id, - u32 vport_id); + u16 vport_id); int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, u32 table_type, u32 table_id); @@@ -1275,7 -1264,7 +1264,7 @@@ struct mlx5dr_cq
struct mlx5dr_mr { struct mlx5_core_dev *mdev; - struct mlx5_core_mkey mkey; + u32 mkey; dma_addr_t dma_addr; void *addr; size_t size; @@@ -1372,12 -1361,12 +1361,12 @@@ struct mlx5dr_fw_recalc_cs_ft };
struct mlx5dr_fw_recalc_cs_ft * - mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num); + mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num); void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft); - int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, - u32 vport_num, - u64 *rx_icm_addr); + int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, + u16 vport_num, + u64 *rx_icm_addr); int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_cmd_flow_destination_hw_info *dest, int num_dest, diff --combined drivers/net/ethernet/microchip/lan743x_main.c index c4f32c7e0db1,03d02403c19e..9d1091237567 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@@ -816,7 -816,7 +816,7 @@@ static int lan743x_mac_init(struct lan7 eth_random_addr(adapter->mac_address); } lan743x_mac_set_address(adapter, adapter->mac_address); - ether_addr_copy(netdev->dev_addr, adapter->mac_address); + eth_hw_addr_set(netdev, adapter->mac_address);
return 0; } @@@ -1743,16 -1743,6 +1743,16 @@@ static int lan743x_tx_ring_init(struct ret = -EINVAL; goto cleanup; } + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, + DMA_BIT_MASK(64))) { + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, + DMA_BIT_MASK(32))) { + dev_warn(&tx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; + } + } ring_allocation_size = ALIGN(tx->ring_size * sizeof(struct lan743x_tx_descriptor), PAGE_SIZE); @@@ -2286,16 -2276,6 +2286,16 @@@ static int lan743x_rx_ring_init(struct ret = -EINVAL; goto cleanup; } + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, + DMA_BIT_MASK(64))) { + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, + DMA_BIT_MASK(32))) { + dev_warn(&rx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; + } + } ring_allocation_size = ALIGN(rx->ring_size * sizeof(struct lan743x_rx_descriptor), PAGE_SIZE); @@@ -2665,7 -2645,7 +2665,7 @@@ static int lan743x_netdev_set_mac_addre ret = eth_prepare_mac_addr_change(netdev, sock_addr); if (ret) return ret; - ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); + eth_hw_addr_set(netdev, sock_addr->sa_data); lan743x_mac_set_address(adapter, sock_addr->sa_data); lan743x_rfe_update_mac_address(adapter); return 0; @@@ -3039,8 -3019,6 +3039,8 @@@ static int lan743x_pm_resume(struct dev if (ret) { netif_err(adapter, probe, adapter->netdev, "lan743x_hardware_init returned %d\n", ret); + lan743x_pci_cleanup(adapter); + return ret; }
/* open netdev when netdev is at running state while resume. diff --combined drivers/net/ethernet/nxp/lpc_eth.c index c910fa2f40a4,a63cc295b979..bc39558fe82b --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@@ -419,7 -419,7 +419,7 @@@ struct netdata_local /* * MAC support functions */ - static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac) + static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac) { u32 tmp;
@@@ -1015,6 -1015,9 +1015,6 @@@ static int lpc_eth_close(struct net_dev napi_disable(&pldat->napi); netif_stop_queue(ndev);
- if (ndev->phydev) - phy_stop(ndev->phydev); - spin_lock_irqsave(&pldat->lock, flags); __lpc_eth_reset(pldat); netif_carrier_off(ndev); @@@ -1022,8 -1025,6 +1022,8 @@@ writel(0, LPC_ENET_MAC2(pldat->net_base)); spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev) + phy_stop(ndev->phydev); clk_disable_unprepare(pldat->clk);
return 0; @@@ -1092,7 -1093,7 +1092,7 @@@ static int lpc_set_mac_address(struct n
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@@ -1231,6 -1232,7 +1231,7 @@@ static int lpc_eth_drv_probe(struct pla struct net_device *ndev; dma_addr_t dma_handle; struct resource *res; + u8 addr[ETH_ALEN]; int irq, ret;
/* Setup network interface for RMII or MII mode */ @@@ -1346,10 -1348,11 +1347,11 @@@ pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */ - __lpc_get_mac(pldat, ndev->dev_addr); + __lpc_get_mac(pldat, addr); + eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) { - of_get_mac_address(np, ndev->dev_addr); + of_get_ethdev_address(np, ndev); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --combined drivers/net/ethernet/smsc/smc91x.c index e4fc6484faa8,a31c159e96ea..96a9400b5737 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@@ -1851,6 -1851,7 +1851,7 @@@ static int smc_probe(struct net_device int retval; unsigned int val, revision_register; const char *version_string; + u8 addr[ETH_ALEN];
DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
@@@ -1922,7 -1923,8 +1923,8 @@@
/* Get the MAC address */ SMC_SELECT_BANK(lp, 1); - SMC_GET_MAC_ADDR(lp, dev->dev_addr); + SMC_GET_MAC_ADDR(lp, addr); + eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */ smc_reset(dev); @@@ -2190,7 -2192,6 +2192,7 @@@ static const struct of_device_id smc91x }; MODULE_DEVICE_TABLE(of, smc91x_match);
+#if defined(CONFIG_GPIOLIB) /** * try_toggle_control_gpio - configure a gpio if it exists * @dev: net device @@@ -2221,15 -2222,6 +2223,15 @@@ static int try_toggle_control_gpio(stru
return 0; } +#else +static int try_toggle_control_gpio(struct device *dev, + struct gpio_desc **desc, + const char *name, int index, + int value, unsigned int nsdelay) +{ + return 0; +} +#endif #endif
/* diff --combined drivers/net/usb/lan78xx.c index 63cd72c5f580,03319fdb5235..f20376c1ef3f --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@@ -1817,7 -1817,7 +1817,7 @@@ static void lan78xx_init_mac_address(st lan78xx_write_reg(dev, MAF_LO(0), addr_lo); lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr); + eth_hw_addr_set(dev->net, addr); }
/* MDIO read and write wrappers for phylib */ @@@ -2416,7 -2416,7 +2416,7 @@@ static int lan78xx_set_mac_addr(struct if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] | netdev->dev_addr[1] << 8 | @@@ -4122,12 -4122,6 +4122,12 @@@ static int lan78xx_probe(struct usb_int
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ /* Reject broken descriptors. */ + if (dev->maxpacket == 0) { + ret = -ENODEV; + goto out4; + } + /* driver requires remote-wakeup capability during autosuspend. */ intf->needs_remote_wakeup = 1;
diff --combined drivers/net/usb/usbnet.c index a33d7fb82a00,350bae673ed4..9a6450f796dc --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@@ -165,12 -165,13 +165,13 @@@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints)
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) { + u8 addr[ETH_ALEN]; int tmp = -1, ret; unsigned char buf [13];
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf); if (ret == 12) - tmp = hex2bin(dev->net->dev_addr, buf, 6); + tmp = hex2bin(addr, buf, 6); if (tmp < 0) { dev_dbg(&dev->udev->dev, "bad MAC string %d fetch, %d\n", iMACAddress, tmp); @@@ -178,6 -179,7 +179,7 @@@ ret = -EINVAL; return ret; } + eth_hw_addr_set(dev->net, addr); return 0; } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); @@@ -1726,7 -1728,7 +1728,7 @@@ usbnet_probe (struct usb_interface *ude
dev->net = net; strscpy(net->name, "usb%d", sizeof(net->name)); - memcpy (net->dev_addr, node_id, sizeof node_id); + eth_hw_addr_set(net, node_id);
/* rx and tx sides can use different message sizes; * bind() should set rx_urb_size in that case. @@@ -1790,7 -1792,6 +1792,7 @@@ dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); if (dev->maxpacket == 0) { /* that is a broken device */ + status = -ENODEV; goto out4; }
diff --combined drivers/net/xen-netfront.c index fc41ba95f81d,57437e4b8a94..911f43986a8c --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@@ -1730,10 -1730,6 +1730,10 @@@ static int netfront_resume(struct xenbu
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ netif_tx_lock_bh(info->netdev); + netif_device_detach(info->netdev); + netif_tx_unlock_bh(info->netdev); + xennet_disconnect_backend(info); return 0; } @@@ -2161,6 -2157,7 +2161,7 @@@ static int talk_to_netback(struct xenbu unsigned int max_queues = 0; struct netfront_queue *queue = NULL; unsigned int num_queues = 1; + u8 addr[ETH_ALEN];
info->netdev->irq = 0;
@@@ -2174,11 -2171,12 +2175,12 @@@ "feature-split-event-channels", 0);
/* Read mac addr. */ - err = xen_net_read_mac(dev, info->netdev->dev_addr); + err = xen_net_read_mac(dev, addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out_unlocked; } + eth_hw_addr_set(info->netdev, addr);
info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, "feature-xdp-headroom", 0); @@@ -2353,10 -2351,6 +2355,10 @@@ static int xennet_connect(struct net_de * domain a kick because we've probably just requeued some * packets. */ + netif_tx_lock_bh(np->netdev); + netif_device_attach(np->netdev); + netif_tx_unlock_bh(np->netdev); + netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; diff --combined drivers/soc/fsl/dpio/dpio-service.c index 4d119e98ce7c,3fd0d0840287..1d2b27e3ea63 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c @@@ -12,6 -12,7 +12,7 @@@ #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> + #include <linux/dim.h> #include <linux/slab.h>
#include "dpio.h" @@@ -28,6 -29,14 +29,14 @@@ struct dpaa2_io spinlock_t lock_notifications; struct list_head notifications; struct device *dev; + + /* Net DIM */ + struct dim rx_dim; + /* protect against concurrent Net DIM updates */ + spinlock_t dim_lock; + u16 event_ctr; + u64 bytes; + u64 frames; };
struct dpaa2_io_store { @@@ -59,7 -68,7 +68,7 @@@ static inline struct dpaa2_io *service_ * potentially being migrated away. */ if (cpu < 0) - cpu = smp_processor_id(); + cpu = raw_smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */ return dpio_by_cpu[cpu]; @@@ -100,6 -109,17 +109,17 @@@ struct dpaa2_io *dpaa2_io_service_selec } EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+ static void dpaa2_io_dim_work(struct work_struct *w) + { + struct dim *dim = container_of(w, struct dim, work); + struct dim_cq_moder moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim); + + dpaa2_io_set_irq_coalescing(d, moder.usec); + dim->state = DIM_START_MEASURE; + } + /** * dpaa2_io_create() - create a dpaa2_io object. * @desc: the dpaa2_io descriptor @@@ -114,6 -134,7 +134,7 @@@ struct dpaa2_io *dpaa2_io_create(const struct device *dev) { struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL); + u32 qman_256_cycles_per_ns;
if (!obj) return NULL; @@@ -127,7 -148,15 +148,15 @@@ obj->dpio_desc = *desc; obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena; obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh; + obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk; obj->swp_desc.qman_version = obj->dpio_desc.qman_version; + + /* Compute how many 256 QBMAN cycles fit into one ns. This is because + * the interrupt timeout period register needs to be specified in QBMAN + * clock cycles in increments of 256. + */ + qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000); + obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns; obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) { @@@ -138,6 -167,7 +167,7 @@@ INIT_LIST_HEAD(&obj->node); spin_lock_init(&obj->lock_mgmt_cmd); spin_lock_init(&obj->lock_notifications); + spin_lock_init(&obj->dim_lock); INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */ @@@ -155,6 -185,12 +185,12 @@@
obj->dev = dev;
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim)); + INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work); + obj->event_ctr = 0; + obj->bytes = 0; + obj->frames = 0; + return obj; }
@@@ -194,6 -230,8 +230,8 @@@ irqreturn_t dpaa2_io_irq(struct dpaa2_i struct qbman_swp *swp; u32 status;
+ obj->event_ctr++; + swp = obj->swp; status = qbman_swp_interrupt_read_status(swp); if (!status) @@@ -462,7 -500,7 +500,7 @@@ int dpaa2_io_service_enqueue_multiple_f qbman_eq_desc_set_no_orp(&ed, 0); qbman_eq_desc_set_fq(&ed, fqid);
- return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb); + return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb); } EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
@@@ -779,3 -817,82 +817,82 @@@ int dpaa2_io_query_bp_count(struct dpaa return 0; } EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count); + + /** + * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values + * @d: the given DPIO object + * @irq_holdoff: interrupt holdoff (timeout) period in us + * + * Return 0 for success, or negative error code on error. + */ + int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff) + { + struct qbman_swp *swp = d->swp; + + return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1, + irq_holdoff); + } + EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing); + + /** + * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters + * @d: the given DPIO object + * @irq_holdoff: interrupt holdoff (timeout) period in us + */ + void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff) + { + struct qbman_swp *swp = d->swp; + + qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff); + } + EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing); + + /** + * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing + * @d: the given DPIO object + * @use_adaptive_rx_coalesce: adaptive coalescing state + */ + void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d, + int use_adaptive_rx_coalesce) + { + d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce; + } + EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing); + + /** + * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state + * @d: the given DPIO object + * + * Return 1 when adaptive coalescing is enabled on the DPIO object and 0 + * otherwise. + */ + int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d) + { + return d->swp->use_adaptive_rx_coalesce; + } + EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing); + + /** + * dpaa2_io_update_net_dim() - Update Net DIM + * @d: the given DPIO object + * @frames: how many frames have been dequeued by the user since the last call + * @bytes: how many bytes have been dequeued by the user since the last call + */ + void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes) + { + struct dim_sample dim_sample = {}; + + if (!d->swp->use_adaptive_rx_coalesce) + return; + + spin_lock(&d->dim_lock); + + d->bytes += bytes; + d->frames += frames; + + dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample); + net_dim(&d->rx_dim, dim_sample); + + spin_unlock(&d->dim_lock); + } + EXPORT_SYMBOL(dpaa2_io_update_net_dim); diff --combined drivers/soc/fsl/dpio/qbman-portal.c index c4282cc7b391,3474bf5f88d5..1397ec21873a --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@@ -29,6 -29,7 +29,7 @@@ #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0 #define QBMAN_CINH_SWP_DQPI 0xa00 + #define QBMAN_CINH_SWP_DQRR_ITR 0xa80 #define QBMAN_CINH_SWP_DCAP 0xac0 #define QBMAN_CINH_SWP_SDQCR 0xb00 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40 @@@ -38,6 -39,7 +39,7 @@@ #define QBMAN_CINH_SWP_IER 0xe40 #define QBMAN_CINH_SWP_ISDR 0xe80 #define QBMAN_CINH_SWP_IIR 0xec0 + #define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */ #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6)) @@@ -355,6 -357,9 +357,9 @@@ struct qbman_swp *qbman_swp_init(const & p->eqcr.pi_ci_mask; p->eqcr.available = p->eqcr.pi_ring_size;
+ /* Initialize the software portal with a irq timeout period of 0us */ + qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0); + return p; }
@@@ -674,9 -679,9 +679,9 @@@ int qbman_swp_enqueue_multiple_direct(s for (i = 0; i < num_enqueued; i++) { p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); /* Skip copying the verb */ - memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); - memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], - &fd[i], sizeof(*fd)); + memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)], + &fd[i], sizeof(*fd)); eqcr_pi++; }
@@@ -688,9 -693,9 +693,9 @@@ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); p[0] = cl[0] | s->eqcr.pi_vb; if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { - struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; + struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | + eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); } eqcr_pi++; @@@ -732,7 -737,8 +737,7 @@@ int qbman_swp_enqueue_multiple_mem_back int i, num_enqueued = 0; unsigned long irq_flags;
- spin_lock(&s->access_spinlock); - local_irq_save(irq_flags); + spin_lock_irqsave(&s->access_spinlock, irq_flags);
half_mask = (s->eqcr.pi_ci_mask>>1); full_mask = s->eqcr.pi_ci_mask; @@@ -743,7 -749,8 +748,7 @@@ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, eqcr_ci, s->eqcr.ci); if (!s->eqcr.available) { - local_irq_restore(irq_flags); - spin_unlock(&s->access_spinlock); + spin_unlock_irqrestore(&s->access_spinlock, irq_flags); return 0; } } @@@ -756,9 -763,9 +761,9 @@@ for (i = 0; i < num_enqueued; i++) { p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); /* Skip copying the verb */ - memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); - memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], - &fd[i], sizeof(*fd)); + memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)], + &fd[i], sizeof(*fd)); eqcr_pi++; }
@@@ -768,9 -775,9 +773,9 @@@ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); p[0] = cl[0] | s->eqcr.pi_vb; if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { - struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; + struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | + eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); } eqcr_pi++; @@@ -782,7 -789,8 +787,7 @@@ dma_wmb(); qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); - local_irq_restore(irq_flags); - spin_unlock(&s->access_spinlock); + spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
return num_enqueued; } @@@ -829,9 -837,9 +834,9 @@@ int qbman_swp_enqueue_multiple_desc_dir p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); cl = (uint32_t *)(&d[i]); /* Skip copying the verb */ - memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); - memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], - &fd[i], sizeof(*fd)); + memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)], + &fd[i], sizeof(*fd)); eqcr_pi++; }
@@@ -899,9 -907,9 +904,9 @@@ int qbman_swp_enqueue_multiple_desc_mem p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); cl = (uint32_t *)(&d[i]); /* Skip copying the verb */ - memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); - memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], - &fd[i], sizeof(*fd)); + memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)], + &fd[i], sizeof(*fd)); eqcr_pi++; }
@@@ -1793,3 -1801,56 +1798,56 @@@ u32 qbman_bp_info_num_free_bufs(struct { return le32_to_cpu(a->fill); } + + /** + * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values + * @p: the software portal object + * @irq_threshold: interrupt threshold + * @irq_holdoff: interrupt holdoff (timeout) period in us + * + * Return 0 for success, or negative error code on error. + */ + int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold, + u32 irq_holdoff) + { + u32 itp, max_holdoff; + + /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles + * increments. This depends on the QBMAN internal frequency. + */ + itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns; + if (itp > 4096) { + max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000; + pr_err("irq_holdoff must be <= %uus\n", max_holdoff); + return -EINVAL; + } + + if (irq_threshold >= p->dqrr.dqrr_size) { + pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1); + return -EINVAL; + } + + p->irq_threshold = irq_threshold; + p->irq_holdoff = irq_holdoff; + + qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold); + qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp); + + return 0; + } + + /** + * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters + * @p: the software portal object + * @irq_threshold: interrupt threshold (an IRQ is generated when there are more + * DQRR entries in the portal than the threshold) + * @irq_holdoff: interrupt holdoff (timeout) period in us + */ + void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold, + u32 *irq_holdoff) + { + if (irq_threshold) + *irq_threshold = p->irq_threshold; + if (irq_holdoff) + *irq_holdoff = p->irq_holdoff; + } diff --combined include/linux/bpf.h index 3db6f6c95489,1c7fd7c4c6d3..e6f5579f9356 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@@ -48,6 -48,7 +48,7 @@@ extern struct idr btf_idr extern spinlock_t btf_idr_lock; extern struct kobject *btf_kobj;
+ typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, struct bpf_iter_aux_info *aux); typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); @@@ -142,7 -143,8 +143,8 @@@ struct bpf_map_ops int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee); - int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, + int (*map_for_each_callback)(struct bpf_map *map, + bpf_callback_t callback_fn, void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */ @@@ -929,11 -931,8 +931,11 @@@ struct bpf_array_aux * stored in the map to make sure that all callers and callees have * the same prog type and JITed flag. */ - enum bpf_prog_type type; - bool jited; + struct { + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + } owner; /* Programs with direct jumps into programs part of this array. */ struct list_head poke_progs; struct bpf_map *map; @@@ -1092,6 -1091,7 +1094,7 @@@ bool bpf_prog_array_compatible(struct b int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); + const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); @@@ -2220,6 -2220,8 +2223,8 @@@ int bpf_arch_text_poke(void *ip, enum b struct btf_id_set; bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+ #define MAX_BPRINTF_VARARGS 12 + int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, u32 **bin_buf, u32 num_args); void bpf_bprintf_cleanup(void); diff --combined include/linux/filter.h index ef03ff34234d,47f80adbe744..8231a6a257f6 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -360,10 -360,9 +360,9 @@@ static inline bool insn_is_zext(const s .off = 0, \ .imm = TGT })
- /* Function call */ + /* Convert function address to BPF immediate */
- #define BPF_CAST_CALL(x) \ - ((u64 (*)(u64, u64, u64, u64, u64))(x)) + #define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ @@@ -371,7 -370,7 +370,7 @@@ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ - .imm = ((FUNC) - __bpf_call_base) }) + .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@@ -1051,7 -1050,6 +1050,7 @@@ extern int bpf_jit_enable extern int bpf_jit_harden; extern int bpf_jit_kallsyms; extern long bpf_jit_limit; +extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --combined include/linux/mlx5/device.h index ed0230ff9422,56bcf95d4ab7..9c25edfd59a6 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@@ -290,6 -290,7 +290,7 @@@ enum MLX5_UMR_INLINE = (1 << 7), };
+ #define MLX5_UMR_KLM_ALIGNMENT 4 #define MLX5_UMR_MTT_ALIGNMENT 0x40 #define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT @@@ -541,19 -542,21 +542,21 @@@ struct mlx5_cmd_layout u8 status_own; };
- enum mlx5_fatal_assert_bit_offsets { - MLX5_RFR_OFFSET = 31, + enum mlx5_rfr_severity_bit_offsets { + MLX5_RFR_BIT_OFFSET = 0x7, };
struct health_buffer { - __be32 assert_var[5]; - __be32 rsvd0[3]; + __be32 assert_var[6]; + __be32 rsvd0[2]; __be32 assert_exit_ptr; __be32 assert_callra; - __be32 rsvd1[2]; + __be32 rsvd1[1]; + __be32 time; __be32 fw_ver; __be32 hw_id; - __be32 rfr; + u8 rfr_severity; + u8 rsvd2[3]; u8 irisc_index; u8 synd; __be16 ext_synd; @@@ -577,7 -580,9 +580,9 @@@ struct mlx5_init_seg __be32 rsvd1[120]; __be32 initializing; struct health_buffer health; - __be32 rsvd2[880]; + __be32 rsvd2[878]; + __be32 cmd_exec_to; + __be32 cmd_q_init_to; __be32 internal_timer_h; __be32 internal_timer_l; __be32 rsvd3[2]; @@@ -795,10 -800,23 +800,23 @@@ struct mlx5_cqe64 u8 tls_outer_l3_tunneled; u8 rsvd0; __be16 wqe_id; - u8 lro_tcppsh_abort_dupack; - u8 lro_min_ttl; - __be16 lro_tcp_win; - __be32 lro_ack_seq_num; + union { + struct { + u8 tcppsh_abort_dupack; + u8 min_ttl; + __be16 tcp_win; + __be32 ack_seq_num; + } lro; + struct { + u8 reserved0:1; + u8 match:1; + u8 flush:1; + u8 reserved3:5; + u8 header_size; + __be16 header_entry_index; + __be32 data_offset; + } shampo; + }; __be32 rss_hash_result; u8 rss_hash_type; u8 ml_path; @@@ -868,7 -886,7 +886,7 @@@ static inline u8 get_cqe_opcode(struct
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { - return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; + return (cqe->lro.tcppsh_abort_dupack >> 6) & 1; }
static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) @@@ -1182,7 -1200,9 +1200,9 @@@ enum mlx5_cap_type MLX5_CAP_VDPA_EMULATION = 0x13, MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_IPSEC, + MLX5_CAP_DEV_SHAMPO = 0x1d, MLX5_CAP_GENERAL_2 = 0x20, + MLX5_CAP_PORT_SELECTION = 0x25, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@@ -1340,6 -1360,20 +1360,20 @@@ enum mlx5_qcam_feature_groups MLX5_GET(e_switch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
+ #define MLX5_CAP_PORT_SELECTION(mdev, cap) \ + MLX5_GET(port_selection_cap, \ + mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap) + + #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \ + MLX5_GET(port_selection_cap, \ + mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap) + + #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ + MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap) + + #define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \ + MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap) + #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
@@@ -1412,6 -1446,9 +1446,9 @@@ #define MLX5_CAP_IPSEC(mdev, cap)\ MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
+ #define MLX5_CAP_DEV_SHAMPO(mdev, cap)\ + MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, @@@ -1456,8 -1493,6 +1493,8 @@@ static inline u16 mlx5_to_sw_pkey_sz(in return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; }
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2 +#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 diff --combined include/linux/mlx5/driver.h index 5cc19b9483b9,f617dfbcd9fd..a623ec635947 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@@ -59,15 -59,13 +59,13 @@@
#define MLX5_ADEV_NAME "mlx5_core"
+ #define MLX5_IRQ_EQ_CTRL (U8_MAX) + enum { MLX5_BOARD_ID_LEN = 64, };
enum { - /* one minute for the sake of bringup. Generally, commands must always - * complete and we may need to increase this timeout value - */ - MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, };
@@@ -136,6 -134,7 +134,7 @@@ enum MLX5_REG_MCIA = 0x9014, MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MRTC = 0x902d, MLX5_REG_MTRC_CAP = 0x9040, MLX5_REG_MTRC_CONF = 0x9041, MLX5_REG_MTRC_STDB = 0x9042, @@@ -154,6 -153,7 +153,7 @@@ MLX5_REG_MIRC = 0x9162, MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, + MLX5_REG_DTOR = 0xC00E, };
enum mlx5_qpts_trust_state { @@@ -357,6 -357,22 +357,6 @@@ struct mlx5_core_sig_ctx u32 sigerr_count; };
-enum { - MLX5_MKEY_MR = 1, - MLX5_MKEY_MW, - MLX5_MKEY_INDIRECT_DEVX, -}; - -struct mlx5_core_mkey { - u64 iova; - u64 size; - u32 key; - u32 pd; - u32 type; - struct wait_queue_head wait; - refcount_t usecount; -}; - #define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type { @@@ -425,6 -441,7 +425,7 @@@ struct mlx5_core_health struct work_struct report_work; struct devlink_health_reporter *fw_reporter; struct devlink_health_reporter *fw_fatal_reporter; + struct delayed_work update_fw_log_ts_work; };
struct mlx5_qp_table { @@@ -637,7 -654,7 +638,7 @@@ struct mlx5e_resources struct mlx5e_hw_objs { u32 pdn; struct mlx5_td td; - struct mlx5_core_mkey mkey; + u32 mkey; struct mlx5_sq_bfreg bfreg; } hw_objs; struct devlink_port dl_port; @@@ -736,6 -753,7 +737,7 @@@ struct mlx5_core_dev u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; u8 embedded_cpu; } caps; + struct mlx5_timeouts *timeouts; u64 sys_image_guid; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; @@@ -989,6 -1007,8 +991,6 @@@ void mlx5_cmd_mbox_status(void *out, u bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); -int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); -int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); void mlx5_health_flush(struct mlx5_core_dev *dev); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); @@@ -1006,11 -1026,13 +1008,11 @@@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - u32 *in, int inlen); -int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey); -int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - u32 *out, int outlen); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, + int inlen); +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out, + int outlen); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_pagealloc_init(struct mlx5_core_dev *dev); @@@ -1222,6 -1244,16 +1224,16 @@@ static inline int mlx5_core_native_port return MLX5_CAP_GEN(dev, native_port_num); }
+ static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev) + { + int idx = MLX5_CAP_GEN(dev, native_port_num); + + if (idx >= 1 && idx <= MLX5_MAX_PORTS) + return idx - 1; + else + return PCI_FUNC(dev->pdev->devfn); + } + enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; @@@ -1230,11 -1262,12 +1242,12 @@@ static inline bool mlx5_is_roce_init_en { struct devlink *devlink = priv_to_devlink(dev); union devlink_param_value val; + int err;
- devlink_param_driverinit_value_get(devlink, - DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, - &val); - return val.vbool; + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, + &val); + return err ? MLX5_CAP_GEN(dev, roce) : val.vbool; }
#endif /* MLX5_DRIVER_H */ diff --combined include/linux/mlx5/fs.h index f2c3da2006d9,7a43fec63a35..90c548aa6389 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@@ -83,8 -83,7 +83,9 @@@ enum mlx5_flow_namespace_type MLX5_FLOW_NAMESPACE_RDMA_RX, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, MLX5_FLOW_NAMESPACE_RDMA_TX, + MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS, + MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS, + MLX5_FLOW_NAMESPACE_PORT_SEL, };
enum { @@@ -99,6 -98,7 +100,7 @@@
struct mlx5_pkt_reformat; struct mlx5_modify_hdr; + struct mlx5_flow_definer; struct mlx5_flow_table; struct mlx5_flow_group; struct mlx5_flow_namespace; @@@ -259,6 -259,13 +261,13 @@@ struct mlx5_modify_hdr *mlx5_modify_hea void *modify_actions); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr); + struct mlx5_flow_definer * + mlx5_create_match_definer(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type ns_type, u16 format_id, + u32 *match_mask); + void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, + struct mlx5_flow_definer *definer); + int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
struct mlx5_pkt_reformat_params { int type; diff --combined include/linux/mlx5/mlx5_ifc.h index 9011c4495867,0bb78c04336c..3636df90899a --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@@ -94,6 -94,7 +94,7 @@@ enum enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, + MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_PSV = 0xff03, @@@ -342,7 -343,7 +343,7 @@@ struct mlx5_ifc_flow_table_fields_suppo u8 outer_geneve_oam[0x1]; u8 outer_geneve_protocol_type[0x1]; u8 outer_geneve_opt_len[0x1]; - u8 reserved_at_1e[0x1]; + u8 source_vhca_port[0x1]; u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1]; @@@ -393,14 -394,6 +394,14 @@@ u8 metadata_reg_c_0[0x1]; };
+struct mlx5_ifc_flow_table_fields_supported_2_bits { + u8 reserved_at_0[0xe]; + u8 bth_opcode[0x1]; + u8 reserved_at_f[0x11]; + + u8 reserved_at_20[0x60]; +}; + struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; u8 reserved_at_1[0x1]; @@@ -547,7 -540,7 +548,7 @@@ struct mlx5_ifc_fte_match_set_misc_bit union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18]; - u8 reserved_at_b8[0x8]; + u8 bth_opcode[0x8];
u8 geneve_vni[0x18]; u8 reserved_at_d8[0x7]; @@@ -764,15 -757,7 +765,15 @@@ struct mlx5_ifc_flow_table_nic_cap_bit
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x1200]; + u8 reserved_at_e00[0x700]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma; + + u8 reserved_at_1580[0x280]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma; + + u8 reserved_at_1880[0x780];
u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
@@@ -783,6 -768,18 +784,18 @@@ u8 reserved_at_20c0[0x5f40]; };
+ struct mlx5_ifc_port_selection_cap_bits { + u8 reserved_at_0[0x10]; + u8 port_select_flow_table[0x1]; + u8 reserved_at_11[0xf]; + + u8 reserved_at_20[0x1e0]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection; + + u8 reserved_at_400[0x7c00]; + }; + enum { MLX5_FDB_TO_VPORT_REG_C_0 = 0x01, MLX5_FDB_TO_VPORT_REG_C_1 = 0x02, @@@ -1322,7 -1319,8 +1335,8 @@@ struct mlx5_ifc_cmd_hca_cap_bits u8 vhca_resource_manager[0x1];
u8 hca_cap_2[0x1]; - u8 reserved_at_21[0x2]; + u8 reserved_at_21[0x1]; + u8 dtor[0x1]; u8 event_on_vhca_state_teardown_request[0x1]; u8 event_on_vhca_state_in_use[0x1]; u8 event_on_vhca_state_active[0x1]; @@@ -1352,7 -1350,9 +1366,9 @@@ u8 reserved_at_b0[0x1]; u8 uplink_follow[0x1]; u8 ts_cqe_to_dest_cqn[0x1]; - u8 reserved_at_b3[0xd]; + u8 reserved_at_b3[0x7]; + u8 shampo[0x1]; + u8 reserved_at_bb[0x5];
u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; @@@ -1530,7 -1530,8 +1546,8 @@@ u8 uar_4k[0x1]; u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; - u8 reserved_at_248[0x2]; + u8 port_selection_cap[0x1]; + u8 reserved_at_248[0x1]; u8 umem_uid_0[0x1]; u8 reserved_at_250[0x5]; u8 log_pg_sz[0x8]; @@@ -1603,7 -1604,8 +1620,8 @@@ u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1]; - u8 reserved_at_3a1[0x2]; + u8 roce_rw_supported[0x1]; + u8 reserved_at_3a2[0x1]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; @@@ -1732,7 -1734,7 +1750,7 @@@ u8 flex_parser_id_outer_first_mpls_over_gre[0x4]; u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10]; + u8 max_num_match_definer[0x10]; u8 sf_base_id[0x10];
u8 flex_parser_id_gtpu_dw_2[0x4]; @@@ -1747,7 -1749,7 +1765,7 @@@
u8 reserved_at_760[0x20]; u8 vhca_tunnel_commands[0x40]; - u8 reserved_at_7c0[0x40]; + u8 match_definer_format_supported[0x40]; };
struct mlx5_ifc_cmd_hca_cap_2_bits { @@@ -1766,6 -1768,7 +1784,7 @@@ enum mlx5_flow_destination_type MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6, + MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, @@@ -1892,7 -1895,21 +1911,21 @@@ struct mlx5_ifc_wq_bits u8 reserved_at_139[0x4]; u8 log_wqe_stride_size[0x3];
- u8 reserved_at_140[0x4c0]; + u8 reserved_at_140[0x80]; + + u8 headers_mkey[0x20]; + + u8 shampo_enable[0x1]; + u8 reserved_at_1e1[0x4]; + u8 log_reservation_size[0x3]; + u8 reserved_at_1e8[0x5]; + u8 log_max_num_of_packets_per_reservation[0x3]; + u8 reserved_at_1f0[0x6]; + u8 log_headers_entry_size[0x2]; + u8 reserved_at_1f8[0x4]; + u8 log_headers_buffer_entry_num[0x4]; + + u8 reserved_at_200[0x400];
struct mlx5_ifc_cmd_pas_bits pas[]; }; @@@ -2823,6 -2840,40 +2856,40 @@@ struct mlx5_ifc_dropped_packet_logged_b u8 reserved_at_0[0xe0]; };
+ struct mlx5_ifc_default_timeout_bits { + u8 to_multiplier[0x3]; + u8 reserved_at_3[0x9]; + u8 to_value[0x14]; + }; + + struct mlx5_ifc_dtor_reg_bits { + u8 reserved_at_0[0x20]; + + struct mlx5_ifc_default_timeout_bits pcie_toggle_to; + + u8 reserved_at_40[0x60]; + + struct mlx5_ifc_default_timeout_bits health_poll_to; + + struct mlx5_ifc_default_timeout_bits full_crdump_to; + + struct mlx5_ifc_default_timeout_bits fw_reset_to; + + struct mlx5_ifc_default_timeout_bits flush_on_err_to; + + struct mlx5_ifc_default_timeout_bits pci_sync_update_to; + + struct mlx5_ifc_default_timeout_bits tear_down_to; + + struct mlx5_ifc_default_timeout_bits fsm_reactivate_to; + + struct mlx5_ifc_default_timeout_bits reclaim_pages_to; + + struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to; + + u8 reserved_at_1c0[0x40]; + }; + enum { MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, @@@ -3134,6 -3185,20 +3201,20 @@@ struct mlx5_ifc_roce_addr_layout_bits u8 reserved_at_e0[0x20]; };
+ struct mlx5_ifc_shampo_cap_bits { + u8 reserved_at_0[0x3]; + u8 shampo_log_max_reservation_size[0x5]; + u8 reserved_at_8[0x3]; + u8 shampo_log_min_reservation_size[0x5]; + u8 shampo_min_mss_size[0x10]; + + u8 reserved_at_20[0x3]; + u8 shampo_max_log_headers_entry_size[0x5]; + u8 reserved_at_28[0x18]; + + u8 reserved_at_40[0x7c0]; + }; + union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap; struct mlx5_ifc_cmd_hca_cap_2_bits cmd_hca_cap_2; @@@ -3144,6 -3209,7 +3225,7 @@@ struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; + struct mlx5_ifc_port_selection_cap_bits port_selection_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; struct mlx5_ifc_debug_cap_bits debug_cap; @@@ -3151,6 -3217,7 +3233,7 @@@ struct mlx5_ifc_tls_cap_bits tls_cap; struct mlx5_ifc_device_mem_cap_bits device_mem_cap; struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap; + struct mlx5_ifc_shampo_cap_bits shampo_cap; u8 reserved_at_0[0x8000]; };
@@@ -3325,8 -3392,9 +3408,9 @@@ enum };
enum { - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, + MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0), + MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1), + MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO = BIT(2), };
enum { @@@ -3351,7 -3419,7 +3435,7 @@@ struct mlx5_ifc_tirc_bits
u8 reserved_at_80[0x4]; u8 lro_timeout_period_usecs[0x10]; - u8 lro_enable_mask[0x4]; + u8 packet_merge_mask[0x4]; u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40]; @@@ -3533,6 -3601,18 +3617,18 @@@ enum MLX5_RQC_STATE_ERR = 0x3, };
+ enum { + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_BYTE = 0x0, + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE = 0x1, + MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_PAGE = 0x2, + }; + + enum { + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_NO_MATCH = 0x0, + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED = 0x1, + MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_FIVE_TUPLE = 0x2, + }; + struct mlx5_ifc_rqc_bits { u8 rlky[0x1]; u8 delay_drop_en[0x1]; @@@ -3565,7 -3645,13 +3661,13 @@@ u8 reserved_at_c0[0x10]; u8 hairpin_peer_vhca[0x10];
- u8 reserved_at_e0[0xa0]; + u8 reserved_at_e0[0x46]; + u8 shampo_no_match_alignment_granularity[0x2]; + u8 reserved_at_128[0x6]; + u8 shampo_match_criteria_type[0x2]; + u8 reservation_timeout[0x10]; + + u8 reserved_at_140[0x40];
struct mlx5_ifc_wq_bits wq; }; @@@ -4113,13 -4199,19 +4215,19 @@@ struct mlx5_ifc_health_buffer_bits
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40]; + u8 reserved_at_140[0x20]; + + u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20]; + u8 rfr[0x1]; + u8 reserved_at_1c1[0x3]; + u8 valid[0x1]; + u8 severity[0x3]; + u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8]; u8 synd[0x8]; @@@ -5632,6 -5724,236 +5740,236 @@@ struct mlx5_ifc_query_fte_in_bits u8 reserved_at_120[0xe0]; };
+ struct mlx5_ifc_match_definer_format_0_bits { + u8 reserved_at_0[0x100]; + + u8 metadata_reg_c_0[0x20]; + + u8 metadata_reg_c_1[0x20]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_dmac_15_0[0x10]; + u8 outer_ethertype[0x10]; + + u8 reserved_at_180[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 outer_ip_frag[0x1]; + u8 outer_qp_type[0x2]; + u8 outer_encap_type[0x2]; + u8 port_number[0x2]; + u8 outer_l3_type[0x2]; + u8 outer_l4_type[0x2]; + u8 outer_first_vlan_type[0x2]; + u8 outer_first_vlan_prio[0x3]; + u8 outer_first_vlan_cfi[0x1]; + u8 outer_first_vlan_vid[0xc]; + + u8 outer_l4_type_ext[0x4]; + u8 reserved_at_1a4[0x2]; + u8 outer_ipsec_layer[0x2]; + u8 outer_l2_type[0x2]; + u8 force_lb[0x1]; + u8 outer_l2_ok[0x1]; + u8 outer_l3_ok[0x1]; + u8 outer_l4_ok[0x1]; + u8 outer_second_vlan_type[0x2]; + u8 outer_second_vlan_prio[0x3]; + u8 outer_second_vlan_cfi[0x1]; + u8 outer_second_vlan_vid[0xc]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 inner_ipv4_checksum_ok[0x1]; + u8 inner_l4_checksum_ok[0x1]; + u8 outer_ipv4_checksum_ok[0x1]; + u8 outer_l4_checksum_ok[0x1]; + u8 inner_l3_ok[0x1]; + u8 inner_l4_ok[0x1]; + u8 outer_l3_ok_duplicate[0x1]; + u8 outer_l4_ok_duplicate[0x1]; + u8 outer_tcp_cwr[0x1]; + u8 outer_tcp_ece[0x1]; + u8 outer_tcp_urg[0x1]; + u8 outer_tcp_ack[0x1]; + u8 outer_tcp_psh[0x1]; + u8 outer_tcp_rst[0x1]; + u8 outer_tcp_syn[0x1]; + u8 outer_tcp_fin[0x1]; + }; + + struct mlx5_ifc_match_definer_format_22_bits { + u8 reserved_at_0[0x100]; + + u8 outer_ip_src_addr[0x20]; + + u8 outer_ip_dest_addr[0x20]; + + u8 outer_l4_sport[0x10]; + u8 outer_l4_dport[0x10]; + + u8 reserved_at_160[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 outer_ip_frag[0x1]; + u8 outer_qp_type[0x2]; + u8 outer_encap_type[0x2]; + u8 port_number[0x2]; + u8 outer_l3_type[0x2]; + u8 outer_l4_type[0x2]; + u8 outer_first_vlan_type[0x2]; + u8 outer_first_vlan_prio[0x3]; + u8 outer_first_vlan_cfi[0x1]; + u8 outer_first_vlan_vid[0xc]; + + u8 metadata_reg_c_0[0x20]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 outer_dmac_15_0[0x10]; + }; + + struct mlx5_ifc_match_definer_format_23_bits { + u8 reserved_at_0[0x100]; + + u8 inner_ip_src_addr[0x20]; + + u8 inner_ip_dest_addr[0x20]; + + u8 inner_l4_sport[0x10]; + u8 inner_l4_dport[0x10]; + + u8 reserved_at_160[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 inner_ip_frag[0x1]; + u8 inner_qp_type[0x2]; + u8 inner_encap_type[0x2]; + u8 port_number[0x2]; + u8 inner_l3_type[0x2]; + u8 inner_l4_type[0x2]; + u8 inner_first_vlan_type[0x2]; + u8 inner_first_vlan_prio[0x3]; + u8 inner_first_vlan_cfi[0x1]; + u8 inner_first_vlan_vid[0xc]; + + u8 tunnel_header_0[0x20]; + + u8 inner_dmac_47_16[0x20]; + + u8 inner_smac_47_16[0x20]; + + u8 inner_smac_15_0[0x10]; + u8 inner_dmac_15_0[0x10]; + }; + + struct mlx5_ifc_match_definer_format_29_bits { + u8 reserved_at_0[0xc0]; + + u8 outer_ip_dest_addr[0x80]; + + u8 outer_ip_src_addr[0x80]; + + u8 outer_l4_sport[0x10]; + u8 outer_l4_dport[0x10]; + + u8 reserved_at_1e0[0x20]; + }; + + struct mlx5_ifc_match_definer_format_30_bits { + u8 reserved_at_0[0xa0]; + + u8 outer_ip_dest_addr[0x80]; + + u8 outer_ip_src_addr[0x80]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 outer_dmac_15_0[0x10]; + }; + + struct mlx5_ifc_match_definer_format_31_bits { + u8 reserved_at_0[0xc0]; + + u8 inner_ip_dest_addr[0x80]; + + u8 inner_ip_src_addr[0x80]; + + u8 inner_l4_sport[0x10]; + u8 inner_l4_dport[0x10]; + + u8 reserved_at_1e0[0x20]; + }; + + struct mlx5_ifc_match_definer_format_32_bits { + u8 reserved_at_0[0xa0]; + + u8 inner_ip_dest_addr[0x80]; + + u8 inner_ip_src_addr[0x80]; + + u8 inner_dmac_47_16[0x20]; + + u8 inner_smac_47_16[0x20]; + + u8 inner_smac_15_0[0x10]; + u8 inner_dmac_15_0[0x10]; + }; + + struct mlx5_ifc_match_definer_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x40]; + + u8 reserved_at_80[0x10]; + u8 format_id[0x10]; + + u8 reserved_at_a0[0x160]; + + u8 match_mask[16][0x20]; + }; + + struct mlx5_ifc_general_obj_in_cmd_hdr_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 vhca_tunnel_id[0x10]; + u8 obj_type[0x10]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; + }; + + struct mlx5_ifc_general_obj_out_cmd_hdr_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; + }; + + struct mlx5_ifc_create_match_definer_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + + struct mlx5_ifc_match_definer_bits obj_context; + }; + + struct mlx5_ifc_create_match_definer_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + }; + enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@@ -6385,7 -6707,7 +6723,7 @@@ struct mlx5_ifc_modify_tir_bitmask_bit u8 reserved_at_3c[0x1]; u8 hash[0x1]; u8 reserved_at_3e[0x1]; - u8 lro[0x1]; + u8 packet_merge[0x1]; };
struct mlx5_ifc_modify_tir_out_bits { @@@ -7585,7 -7907,7 +7923,7 @@@ struct mlx5_ifc_dealloc_uar_out_bits
struct mlx5_ifc_dealloc_uar_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@@ -8105,6 -8427,11 +8443,11 @@@ struct mlx5_ifc_create_flow_group_out_b u8 reserved_at_60[0x20]; };
+ enum { + MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0, + MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1, + }; + enum { MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@@ -8126,7 -8453,9 +8469,9 @@@ struct mlx5_ifc_create_flow_group_in_bi u8 reserved_at_60[0x20];
u8 table_type[0x8]; - u8 reserved_at_88[0x18]; + u8 reserved_at_88[0x4]; + u8 group_type[0x4]; + u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8]; u8 table_id[0x18]; @@@ -8141,7 -8470,10 +8486,10 @@@
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0]; + u8 reserved_at_140[0x10]; + u8 match_definer_id[0x10]; + + u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18]; u8 match_criteria_enable[0x8]; @@@ -8432,7 -8764,7 +8780,7 @@@ struct mlx5_ifc_alloc_uar_out_bits
struct mlx5_ifc_alloc_uar_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@@ -10076,6 -10408,17 +10424,17 @@@ struct mlx5_ifc_pddr_reg_bits union mlx5_ifc_pddr_reg_page_data_auto_bits page_data; };
+ struct mlx5_ifc_mrtc_reg_bits { + u8 time_synced[0x1]; + u8 reserved_at_1[0x1f]; + + u8 reserved_at_20[0x20]; + + u8 time_h[0x20]; + + u8 time_l[0x20]; + }; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@@ -10137,6 -10480,7 +10496,7 @@@ struct mlx5_ifc_mirc_reg_bits mirc_reg; struct mlx5_ifc_mfrl_reg_bits mfrl_reg; struct mlx5_ifc_mtutc_reg_bits mtutc_reg; + struct mlx5_ifc_mrtc_reg_bits mrtc_reg; u8 reserved_at_0[0x60e0]; };
@@@ -10414,9 -10758,16 +10774,16 @@@ struct mlx5_ifc_dcbx_param_bits u8 reserved_at_a0[0x160]; };
+ enum { + MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0, + MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT, + }; + struct mlx5_ifc_lagc_bits { u8 fdb_selection_mode[0x1]; - u8 reserved_at_1[0x1c]; + u8 reserved_at_1[0x14]; + u8 port_select_mode[0x3]; + u8 reserved_at_18[0x5]; u8 lag_state[0x3];
u8 reserved_at_20[0x14]; @@@ -10630,29 -10981,6 +10997,6 @@@ struct mlx5_ifc_dealloc_memic_out_bits u8 reserved_at_40[0x40]; };
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 vhca_tunnel_id[0x10]; - u8 obj_type[0x10]; - - u8 obj_id[0x20]; - - u8 reserved_at_60[0x20]; - }; - - struct mlx5_ifc_general_obj_out_cmd_hdr_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 obj_id[0x20]; - - u8 reserved_at_60[0x20]; - }; - struct mlx5_ifc_umem_bits { u8 reserved_at_0[0x80];
diff --combined include/net/cfg80211.h index 27336fc70467,7c9d5db4f0e6..423f97b982ff --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@@ -739,6 -739,22 +739,22 @@@ struct cfg80211_tid_config struct cfg80211_tid_cfg tid_conf[]; };
+ /** + * struct cfg80211_fils_aad - FILS AAD data + * @macaddr: STA MAC address + * @kek: FILS KEK + * @kek_len: FILS KEK length + * @snonce: STA Nonce + * @anonce: AP Nonce + */ + struct cfg80211_fils_aad { + const u8 *macaddr; + const u8 *kek; + u8 kek_len; + const u8 *snonce; + const u8 *anonce; + }; + /** * cfg80211_get_chandef_type - return old channel type from chandef * @chandef: the channel definition @@@ -1040,6 -1056,36 +1056,36 @@@ struct cfg80211_crypto_settings enum nl80211_sae_pwe_mechanism sae_pwe; };
+ /** + * struct cfg80211_mbssid_config - AP settings for multi bssid + * + * @tx_wdev: pointer to the transmitted interface in the MBSSID set + * @index: index of this AP in the multi bssid group. + * @ema: set to true if the beacons should be sent out in EMA mode. + */ + struct cfg80211_mbssid_config { + struct wireless_dev *tx_wdev; + u8 index; + bool ema; + }; + + /** + * struct cfg80211_mbssid_elems - Multiple BSSID elements + * + * @cnt: Number of elements in array %elems. + * + * @elem: Array of multiple BSSID element(s) to be added into Beacon frames. + * @elem.data: Data for multiple BSSID elements. + * @elem.len: Length of data. + */ + struct cfg80211_mbssid_elems { + u8 cnt; + struct { + const u8 *data; + size_t len; + } elem[]; + }; + /** * struct cfg80211_beacon_data - beacon data * @head: head portion of beacon (before TIM IE) @@@ -1058,6 -1104,7 +1104,7 @@@ * @assocresp_ies_len: length of assocresp_ies in octets * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) + * @mbssid_ies: multiple BSSID elements * @ftm_responder: enable FTM responder functionality; -1 for no change * (which also implies no change in LCI/civic location data) * @lci: Measurement Report element content, starting with Measurement Token @@@ -1075,6 -1122,7 +1122,7 @@@ struct cfg80211_beacon_data const u8 *probe_resp; const u8 *lci; const u8 *civicloc; + struct cfg80211_mbssid_elems *mbssid_ies; s8 ftm_responder;
size_t head_len, tail_len; @@@ -1189,6 -1237,7 +1237,7 @@@ enum cfg80211_ap_settings_flags * @he_oper: HE operation IE (or %NULL if HE isn't enabled) * @fils_discovery: FILS discovery transmission parameters * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters + * @mbssid_config: AP settings for multiple bssid */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@@ -1221,6 -1270,7 +1270,7 @@@ struct cfg80211_he_bss_color he_bss_color; struct cfg80211_fils_discovery fils_discovery; struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; + struct cfg80211_mbssid_config mbssid_config; };
/** @@@ -4018,6 -4068,10 +4068,10 @@@ struct mgmt_frame_regs * @set_sar_specs: Update the SAR (TX power) settings. * * @color_change: Initiate a color change. + * + * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use + * those to decrypt (Re)Association Request and encrypt (Re)Association + * Response frame. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@@ -4348,6 -4402,8 +4402,8 @@@ int (*color_change)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_color_change_settings *params); + int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_fils_aad *fils_aad); };
/* @@@ -4981,6 -5037,13 +5037,13 @@@ struct wiphy_iftype_akm_suites * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes * @sar_capa: SAR control capabilities * @rfkill: a pointer to the rfkill structure + * + * @mbssid_max_interfaces: maximum number of interfaces supported by the driver + * in a multiple BSSID set. This field must be set to a non-zero value + * by the driver to advertise MBSSID support. + * @ema_max_profile_periodicity: maximum profile periodicity supported by + * the driver. Setting this field to a non-zero value indicates that the + * driver supports enhanced multi-BSSID advertisements (EMA AP). */ struct wiphy { struct mutex mtx; @@@ -5125,6 -5188,9 +5188,9 @@@
struct rfkill *rfkill;
+ u8 mbssid_max_interfaces; + u8 ema_max_profile_periodicity; + char priv[] __aligned(NETDEV_ALIGN); };
@@@ -5376,6 -5442,7 +5442,6 @@@ static inline void wiphy_unlock(struct * netdev and may otherwise be used by driver read-only, will be update * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames - * @mgmt_registrations_lock: lock for the list * @mgmt_registrations_need_update: mgmt registrations were updated, * need to propagate the update to the driver * @mtx: mutex used to lock data in this struct, may be used by drivers @@@ -5422,6 -5489,7 +5488,6 @@@ struct wireless_dev u32 identifier;
struct list_head mgmt_registrations; - spinlock_t mgmt_registrations_lock; u8 mgmt_registrations_need_update:1;
struct mutex mtx; @@@ -5490,7 -5558,7 +5556,7 @@@ unsigned long unprot_beacon_reported; };
- static inline u8 *wdev_address(struct wireless_dev *wdev) + static inline const u8 *wdev_address(struct wireless_dev *wdev) { if (wdev->netdev) return wdev->netdev->dev_addr; @@@ -6308,6 -6376,17 +6374,17 @@@ static inline void cfg80211_gen_new_bss u64_to_ether_addr(new_bssid_u64, new_bssid); }
+ /** + * cfg80211_get_ies_channel_number - returns the channel number from ies + * @ie: IEs + * @ielen: length of IEs + * @band: enum nl80211_band of the channel + * + * Returns the channel number, or -1 if none could be determined. + */ + int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, + enum nl80211_band band); + /** * cfg80211_is_element_inherited - returns if element ID should be inherited * @element: element to check diff --combined include/net/sock.h index 463f390d90b3,ff4e62aa62e5..db95ceaf4d4c --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -259,10 -259,11 +259,11 @@@ struct bpf_local_storage * @sk_rcvbuf: size of receive buffer in bytes * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux + * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst + * @sk_rx_dst_cookie: cookie for @sk_rx_dst * @sk_dst_cache: destination cache * @sk_dst_pending_confirm: need to confirm neighbour * @sk_policy: flow policy - * @sk_rx_skb_cache: cache copy of recently accessed RX skb * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_tsq_flags: TCP Small Queues flags @@@ -270,6 -271,7 +271,7 @@@ * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward + * @sk_reserved_mem: space reserved and non-reclaimable for the socket * @sk_napi_id: id of the last napi context to receive data for sk * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode @@@ -329,7 -331,6 +331,6 @@@ * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head] - * @sk_tx_skb_cache: cache copy of recently accessed TX skb * @sk_security: used by security modules * @sk_mark: generic packet mark * @sk_cgrp_data: cgroup data for this cgroup @@@ -394,7 -395,6 +395,6 @@@ struct sock atomic_t sk_drops; int sk_rcvlowat; struct sk_buff_head sk_error_queue; - struct sk_buff *sk_rx_skb_cache; struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with @@@ -413,6 -413,7 +413,7 @@@ #define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc; + u32 sk_reserved_mem; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_ll_usec; /* ===== mostly read cache line ===== */ @@@ -431,6 -432,9 +432,9 @@@ struct xfrm_policy __rcu *sk_policy[2]; #endif struct dst_entry *sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + struct dst_entry __rcu *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; @@@ -443,7 -447,6 +447,6 @@@ struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; - struct sk_buff *sk_tx_skb_cache; struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; @@@ -1208,7 -1211,7 +1211,7 @@@ struct proto #endif
bool (*stream_memory_free)(const struct sock *sk, int wake); - bool (*stream_memory_read)(const struct sock *sk); + bool (*sock_is_readable)(struct sock *sk); /* Memory pressure */ void (*enter_memory_pressure)(struct sock *sk); void (*leave_memory_pressure)(struct sock *sk); @@@ -1237,7 -1240,7 +1240,7 @@@ unsigned int useroffset; /* Usercopy region offset */ unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count; + unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; @@@ -1518,20 -1521,49 +1521,49 @@@ sk_rmem_schedule(struct sock *sk, struc skb_pfmemalloc(skb); }
+ static inline int sk_unused_reserved_mem(const struct sock *sk) + { + int unused_mem; + + if (likely(!sk->sk_reserved_mem)) + return 0; + + unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued - + atomic_read(&sk->sk_rmem_alloc); + + return unused_mem > 0 ? unused_mem : 0; + } + static inline void sk_mem_reclaim(struct sock *sk) { + int reclaimable; + if (!sk_has_account(sk)) return; - if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, sk->sk_forward_alloc); + + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); + + if (reclaimable >= SK_MEM_QUANTUM) + __sk_mem_reclaim(sk, reclaimable); + } + + static inline void sk_mem_reclaim_final(struct sock *sk) + { + sk->sk_reserved_mem = 0; + sk_mem_reclaim(sk); }
static inline void sk_mem_reclaim_partial(struct sock *sk) { + int reclaimable; + if (!sk_has_account(sk)) return; - if (sk->sk_forward_alloc > SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); + + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); + + if (reclaimable > SK_MEM_QUANTUM) + __sk_mem_reclaim(sk, reclaimable - 1); }
static inline void sk_mem_charge(struct sock *sk, int size) @@@ -1543,9 -1575,12 +1575,12 @@@
static inline void sk_mem_uncharge(struct sock *sk, int size) { + int reclaimable; + if (!sk_has_account(sk)) return; sk->sk_forward_alloc += size; + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow. * TCP send queues can make this happen, if sk_mem_reclaim() @@@ -1554,22 -1589,14 +1589,14 @@@ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is * no need to hold that much forward allocation anyway. */ - if (unlikely(sk->sk_forward_alloc >= 1 << 21)) + if (unlikely(reclaimable >= 1 << 21)) __sk_mem_reclaim(sk, 1 << 20); }
- DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { sk_wmem_queued_add(sk, -skb->truesize); sk_mem_uncharge(sk, skb->truesize); - if (static_branch_unlikely(&tcp_tx_skb_cache_key) && - !sk->sk_tx_skb_cache && !skb_cloned(skb)) { - skb_ext_reset(skb); - skb_zcopy_clear(skb, true); - sk->sk_tx_skb_cache = skb; - return; - } __kfree_skb(skb); }
@@@ -1889,10 -1916,8 +1916,8 @@@ static inline void sk_rx_queue_set(stru if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING)) - return; - - sk->sk_rx_queue_mapping = rx_queue; + if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) + WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } @@@ -1900,15 -1925,19 +1925,19 @@@ static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING - sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING; + WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING); #endif }
static inline int sk_rx_queue_get(const struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING - if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING) - return sk->sk_rx_queue_mapping; + if (sk) { + int res = READ_ONCE(sk->sk_rx_queue_mapping); + + if (res != NO_QUEUE_MAPPING) + return res; + } #endif
return -1; @@@ -2388,13 -2417,11 +2417,11 @@@ static inline void sk_stream_moderate_s return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); + val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); }
- struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule); - /** * sk_page_frag - return an appropriate page_frag * @sk: socket @@@ -2608,7 -2635,6 +2635,6 @@@ static inline void skb_setup_tx_timesta &skb_shinfo(skb)->tskey); }
- DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from @@@ -2620,12 -2646,6 +2646,6 @@@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); - if (static_branch_unlikely(&tcp_rx_skb_cache_key) && - !sk->sk_rx_skb_cache) { - sk->sk_rx_skb_cache = skb; - skb_orphan(skb); - return; - } __kfree_skb(skb); }
@@@ -2820,10 -2840,8 +2840,15 @@@ void sock_set_sndtimeo(struct sock *sk
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+ int sock_get_timeout(long timeo, void *optval, bool old_timeval); + int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, + sockptr_t optval, int optlen, bool old_timeval); + +static inline bool sk_is_readable(struct sock *sk) +{ + if (sk->sk_prot->sock_is_readable) + return sk->sk_prot->sock_is_readable(sk); + return false; +} ++ #endif /* _SOCK_H */ diff --combined include/net/tls.h index 01d2e3744393,adab19a8aed7..07ff59e99cb3 --- a/include/net/tls.h +++ b/include/net/tls.h @@@ -66,7 -66,7 +66,7 @@@ #define MAX_IV_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8
- /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. + /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. * * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] * @@@ -74,6 -74,7 +74,7 @@@ * Hence b0 contains (3 - 1) = 2. */ #define TLS_AES_CCM_IV_B0_BYTE 2 + #define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \ __SNMP_INC_STATS((net)->mib.tls_statistics, field) @@@ -220,6 -221,8 +221,8 @@@ union tls_crypto_context struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; }; };
@@@ -375,7 -378,7 +378,7 @@@ void tls_sw_release_resources_rx(struc void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); -bool tls_sw_stream_read(const struct sock *sk); +bool tls_sw_sock_is_readable(struct sock *sk); ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); diff --combined kernel/bpf/arraymap.c index 447def540544,5e1ccfae916b..c7a5be3bf8be --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@@ -645,7 -645,7 +645,7 @@@ static const struct bpf_iter_seq_info i .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), };
- static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn, + static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, void *callback_ctx, u64 flags) { u32 i, key, num_elems = 0; @@@ -668,9 -668,8 +668,8 @@@ val = array->value + array->elem_size * i; num_elems++; key = i; - ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, - (u64)(long)&key, (u64)(long)val, - (u64)(long)callback_ctx, 0); + ret = callback_fn((u64)(long)map, (u64)(long)&key, + (u64)(long)val, (u64)(long)callback_ctx, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) break; @@@ -1072,7 -1071,6 +1071,7 @@@ static struct bpf_map *prog_array_map_a INIT_WORK(&aux->work, prog_array_map_clear_deferred); INIT_LIST_HEAD(&aux->poke_progs); mutex_init(&aux->poke_mutex); + spin_lock_init(&aux->owner.lock);
map = array_map_alloc(attr); if (IS_ERR(map)) { diff --combined kernel/bpf/core.c index 6e3ae90ad107,ea8a468dbded..ded9163185d1 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@@ -524,7 -524,6 +524,7 @@@ int bpf_jit_enable __read_mostly = IS int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); int bpf_jit_harden __read_mostly; long bpf_jit_limit __read_mostly; +long bpf_jit_limit_max __read_mostly;
static void bpf_prog_ksym_set_addr(struct bpf_prog *prog) @@@ -818,8 -817,7 +818,8 @@@ u64 __weak bpf_jit_alloc_exec_limit(voi static int __init bpf_jit_charge_init(void) { /* Only used as heuristic here to derive limit. */ - bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, + bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); + bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, PAGE_SIZE), LONG_MAX); return 0; } @@@ -1823,26 -1821,20 +1823,26 @@@ static unsigned int __bpf_prog_ret0_war bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp) { + bool ret; + if (fp->kprobe_override) return false;
- if (!array->aux->type) { + spin_lock(&array->aux->owner.lock); + + if (!array->aux->owner.type) { /* There's no owner yet where we could check for * compatibility. */ - array->aux->type = fp->type; - array->aux->jited = fp->jited; - return true; + array->aux->owner.type = fp->type; + array->aux->owner.jited = fp->jited; + ret = true; + } else { + ret = array->aux->owner.type == fp->type && + array->aux->owner.jited == fp->jited; } - - return array->aux->type == fp->type && - array->aux->jited == fp->jited; + spin_unlock(&array->aux->owner.lock); + return ret; }
static int bpf_check_tail_call(const struct bpf_prog *fp) @@@ -2365,6 -2357,11 +2365,11 @@@ const struct bpf_func_proto * __weak bp return NULL; }
+ const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) + { + return NULL; + } + u64 __weak bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) diff --combined net/batman-adv/bridge_loop_avoidance.c index 17687848daec,7242b32fff80..2ed9496fc41f --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -254,7 -254,7 +254,7 @@@ batadv_claim_hash_find(struct batadv_pr * Return: backbone gateway if found or NULL otherwise */ static struct batadv_bla_backbone_gw * - batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr, + batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; @@@ -336,7 -336,7 +336,7 @@@ batadv_bla_del_backbone_claims(struct b * @vid: the VLAN ID * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) */ - static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, + static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac, unsigned short vid, int claimtype) { struct sk_buff *skb; @@@ -488,7 -488,7 +488,7 @@@ static void batadv_bla_loopdetect_repor * Return: the (possibly created) backbone gateway or NULL on error */ static struct batadv_bla_backbone_gw * - batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig, + batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig, unsigned short vid, bool own_backbone) { struct batadv_bla_backbone_gw *entry; @@@ -926,7 -926,7 +926,7 @@@ static bool batadv_handle_request(struc */ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, + const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@@ -964,7 -964,7 +964,7 @@@ */ static bool batadv_handle_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, + const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@@ -1560,14 -1560,10 +1560,14 @@@ int batadv_bla_init(struct batadv_priv return 0;
bat_priv->bla.claim_hash = batadv_hash_new(128); - bat_priv->bla.backbone_hash = batadv_hash_new(32); + if (!bat_priv->bla.claim_hash) + return -ENOMEM;
- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) + bat_priv->bla.backbone_hash = batadv_hash_new(32); + if (!bat_priv->bla.backbone_hash) { + batadv_hash_destroy(bat_priv->bla.claim_hash); return -ENOMEM; + }
batadv_hash_set_lock_class(bat_priv->bla.claim_hash, &batadv_claim_hash_lock_class_key); @@@ -2130,7 -2126,7 +2130,7 @@@ batadv_bla_claim_dump_entry(struct sk_b struct batadv_hard_iface *primary_if, struct batadv_bla_claim *claim) { - u8 *primary_addr = primary_if->net_dev->dev_addr; + const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; void *hdr; @@@ -2298,7 -2294,7 +2298,7 @@@ batadv_bla_backbone_dump_entry(struct s struct batadv_hard_iface *primary_if, struct batadv_bla_backbone_gw *backbone_gw) { - u8 *primary_addr = primary_if->net_dev->dev_addr; + const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; int msecs; diff --combined net/core/dev.c index eb3a366bf212,e8754560e641..edeb811c454e --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -140,7 -140,7 +140,7 @@@ #include <linux/if_macvlan.h> #include <linux/errqueue.h> #include <linux/hrtimer.h> - #include <linux/netfilter_ingress.h> + #include <linux/netfilter_netdev.h> #include <linux/crash_dump.h> #include <linux/sctp.h> #include <net/udp_tunnel.h> @@@ -303,6 -303,12 +303,12 @@@ static struct netdev_name_node *netdev_ return NULL; }
+ bool netdev_name_in_use(struct net *net, const char *name) + { + return netdev_name_node_lookup(net, name); + } + EXPORT_SYMBOL(netdev_name_in_use); + int netdev_name_node_alt_create(struct net_device *dev, const char *name) { struct netdev_name_node *name_node; @@@ -1133,7 -1139,7 +1139,7 @@@ static int __dev_alloc_name(struct net }
snprintf(buf, IFNAMSIZ, name, i); - if (!__dev_get_by_name(net, buf)) + if (!netdev_name_in_use(net, buf)) return i;
/* It is possible to run out of possible slots @@@ -1187,7 -1193,7 +1193,7 @@@ static int dev_get_valid_name(struct ne
if (strchr(name, '%')) return dev_alloc_name_ns(net, dev, name); - else if (__dev_get_by_name(net, name)) + else if (netdev_name_in_use(net, name)) return -EEXIST; else if (dev->name != name) strlcpy(dev->name, name, IFNAMSIZ); @@@ -1290,8 -1296,8 +1296,8 @@@ rollback old_assign_type = NET_NAME_RENAMED; goto rollback; } else { - pr_err("%s: name change rollback failed: %d\n", - dev->name, ret); + netdev_err(dev, "name change rollback failed: %d\n", + ret); } }
@@@ -2345,7 -2351,7 +2351,7 @@@ static void netif_setup_tc(struct net_d
/* If TC0 is invalidated disable TC mapping */ if (tc->offset + tc->count > txq) { - pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); + netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); dev->num_tc = 0; return; } @@@ -2356,8 -2362,8 +2362,8 @@@
tc = &dev->tc_to_txq[q]; if (tc->offset + tc->count > txq) { - pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", - i, q); + netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", + i, q); netdev_set_prio_tc_map(dev, i, 0); } } @@@ -2921,6 -2927,8 +2927,8 @@@ int netif_set_real_num_tx_queues(struc if (dev->num_tc) netif_setup_tc(dev, txq);
+ dev_qdisc_change_real_num_tx(dev, txq); + dev->real_num_tx_queues = txq;
if (disabling) { @@@ -3163,12 -3171,6 +3171,12 @@@ static u16 skb_tx_hash(const struct net
qoffset = sb_dev->tc_to_txq[tc].offset; qcount = sb_dev->tc_to_txq[tc].count; + if (unlikely(!qcount)) { + net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", + sb_dev->name, qoffset, tc); + qoffset = 0; + qcount = dev->real_num_tx_queues; + } }
if (skb_rx_queue_recorded(skb)) { @@@ -3414,7 -3416,7 +3422,7 @@@ EXPORT_SYMBOL(__skb_gso_segment) #ifdef CONFIG_BUG static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) { - pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); + netdev_err(dev, "hw csum failure\n"); skb_dump(KERN_ERR, skb, true); dump_stack(); } @@@ -3912,8 -3914,7 +3920,8 @@@ int dev_loopback_xmit(struct net *net, skb_reset_mac_header(skb); __skb_pull(skb, skb_network_offset(skb)); skb->pkt_type = PACKET_LOOPBACK; - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (skb->ip_summed == CHECKSUM_NONE) + skb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(skb)); skb_dst_force(skb); netif_rx_ni(skb); @@@ -3925,6 -3926,7 +3933,7 @@@ EXPORT_SYMBOL(dev_loopback_xmit) static struct sk_buff * sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) { + #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); struct tcf_result cl_res;
@@@ -3960,6 -3962,7 +3969,7 @@@ default: break; } + #endif /* CONFIG_NET_CLS_ACT */
return skb; } @@@ -4153,13 -4156,20 +4163,20 @@@ static int __dev_queue_xmit(struct sk_b qdisc_pkt_len_init(skb); #ifdef CONFIG_NET_CLS_ACT skb->tc_at_ingress = 0; - # ifdef CONFIG_NET_EGRESS + #endif + #ifdef CONFIG_NET_EGRESS if (static_branch_unlikely(&egress_needed_key)) { + if (nf_hook_egress_active()) { + skb = nf_hook_egress(skb, &rc, dev); + if (!skb) + goto out; + } + nf_skip_egress(skb, true); skb = sch_handle_egress(skb, &rc, dev); if (!skb) goto out; + nf_skip_egress(skb, false); } - # endif #endif /* If device/qdisc don't need skb->dst, release it right now while * its hot in this cpu cache. @@@ -5301,6 -5311,7 +5318,7 @@@ skip_taps if (static_branch_unlikely(&ingress_needed_key)) { bool another = false;
+ nf_skip_egress(skb, true); skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, &another); if (another) @@@ -5308,6 -5319,7 +5326,7 @@@ if (!skb) goto out;
+ nf_skip_egress(skb, false); if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) goto out; } @@@ -5844,7 -5856,7 +5863,7 @@@ static void gro_normal_one(struct napi_ gro_normal_list(napi); }
- static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) + static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) { struct packet_offload *ptype; __be16 type = skb->protocol; @@@ -5873,12 -5885,11 +5892,11 @@@ if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); - return NET_RX_SUCCESS; + return; }
out: gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); - return NET_RX_SUCCESS; }
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, @@@ -6905,19 -6916,25 +6923,25 @@@ EXPORT_SYMBOL(netif_napi_add)
void napi_disable(struct napi_struct *n) { + unsigned long val, new; + might_sleep(); set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); - while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) - msleep(1); + do { + val = READ_ONCE(n->state); + if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { + usleep_range(20, 200); + continue; + } + + new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; + new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); + } while (cmpxchg(&n->state, val, new) != val);
hrtimer_cancel(&n->timer);
- clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); clear_bit(NAPI_STATE_DISABLE, &n->state); - clear_bit(NAPI_STATE_THREADED, &n->state); } EXPORT_SYMBOL(napi_disable);
@@@ -6995,8 -7012,8 +7019,8 @@@ static int __napi_poll(struct napi_stru }
if (unlikely(work > weight)) - pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n", - n->poll, work, weight); + netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", + n->poll, work, weight);
if (likely(work < weight)) return work; @@@ -8550,8 -8567,7 +8574,7 @@@ static int __dev_set_promiscuity(struc dev->flags &= ~IFF_PROMISC; else { dev->promiscuity -= inc; - pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", - dev->name); + netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); return -EOVERFLOW; } } @@@ -8621,8 -8637,7 +8644,7 @@@ static int __dev_set_allmulti(struct ne dev->flags &= ~IFF_ALLMULTI; else { dev->allmulti -= inc; - pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", - dev->name); + netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); return -EOVERFLOW; } } @@@ -9159,14 -9174,11 +9181,11 @@@ int dev_get_port_parent_id(struct net_d }
err = devlink_compat_switch_id_get(dev, ppid); - if (!err || err != -EOPNOTSUPP) + if (!recurse || err != -EOPNOTSUPP) return err;
- if (!recurse) - return -EOPNOTSUPP; - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = dev_get_port_parent_id(lower_dev, ppid, recurse); + err = dev_get_port_parent_id(lower_dev, ppid, true); if (err) break; if (!first.id_len) @@@ -9910,6 -9922,11 +9929,11 @@@ static netdev_features_t netdev_fix_fea } }
+ if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) { + netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n"); + features &= ~NETIF_F_LRO; + } + if (features & NETIF_F_HW_TLS_TX) { bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) == (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); @@@ -10867,7 -10884,7 +10891,7 @@@ struct net_device *alloc_netdev_mqs(in if (!dev->ethtool_ops) dev->ethtool_ops = &default_ethtool_ops;
- nf_hook_ingress_init(dev); + nf_hook_netdev_init(dev);
return dev;
@@@ -11153,7 -11170,7 +11177,7 @@@ int __dev_change_net_namespace(struct n * we can use it in the destination network namespace. */ err = -EEXIST; - if (__dev_get_by_name(net, dev->name)) { + if (netdev_name_in_use(net, dev->name)) { /* We get here if we can't use the current device name */ if (!pat) goto out; @@@ -11506,7 -11523,7 +11530,7 @@@ static void __net_exit default_device_e
/* Push remaining network devices to init_net */ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); - if (__dev_get_by_name(&init_net, fb_name)) + if (netdev_name_in_use(&init_net, fb_name)) snprintf(fb_name, IFNAMSIZ, "dev%%d"); err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { diff --combined net/core/net-sysfs.c index b2e49eb7001d,d6e4e0b43beb..9c01c642cf9e --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@@ -175,6 -175,14 +175,14 @@@ static int change_carrier(struct net_de static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { + struct net_device *netdev = to_net_dev(dev); + + /* The check is also done in change_carrier; this helps returning early + * without hitting the trylock/restart in netdev_store. + */ + if (!netdev->netdev_ops->ndo_change_carrier) + return -EOPNOTSUPP; + return netdev_store(dev, attr, buf, len, change_carrier); }
@@@ -196,6 -204,12 +204,12 @@@ static ssize_t speed_show(struct devic struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->ethtool_ops->get_link_ksettings) + return ret; + if (!rtnl_trylock()) return restart_syscall();
@@@ -216,6 -230,12 +230,12 @@@ static ssize_t duplex_show(struct devic struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->ethtool_ops->get_link_ksettings) + return ret; + if (!rtnl_trylock()) return restart_syscall();
@@@ -468,6 -488,14 +488,14 @@@ static ssize_t proto_down_store(struct struct device_attribute *attr, const char *buf, size_t len) { + struct net_device *netdev = to_net_dev(dev); + + /* The check is also done in change_proto_down; this helps returning + * early without hitting the trylock/restart in netdev_store. + */ + if (!netdev->netdev_ops->ndo_change_proto_down) + return -EOPNOTSUPP; + return netdev_store(dev, attr, buf, len, change_proto_down); } NETDEVICE_SHOW_RW(proto_down, fmt_dec); @@@ -478,6 -506,12 +506,12 @@@ static ssize_t phys_port_id_show(struc struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The check is also done in dev_get_phys_port_id; this helps returning + * early without hitting the trylock/restart below. + */ + if (!netdev->netdev_ops->ndo_get_phys_port_id) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -500,6 -534,13 +534,13 @@@ static ssize_t phys_port_name_show(stru struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->netdev_ops->ndo_get_phys_port_name && + !netdev->netdev_ops->ndo_get_devlink_port) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -522,6 -563,14 +563,14 @@@ static ssize_t phys_switch_id_show(stru struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps + * returning early without hitting the trylock/restart below. This works + * because recurse is false when calling dev_get_port_parent_id. + */ + if (!netdev->netdev_ops->ndo_get_port_parent_id && + !netdev->netdev_ops->ndo_get_devlink_port) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -1226,6 -1275,12 +1275,12 @@@ static ssize_t tx_maxrate_store(struct if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ /* The check is also done later; this helps returning early without + * hitting the trylock/restart below. + */ + if (!dev->netdev_ops->ndo_set_tx_maxrate) + return -EOPNOTSUPP; + err = kstrtou32(buf, 10, &rate); if (err < 0) return err; @@@ -1869,7 -1924,7 +1924,7 @@@ static struct class net_class __ro_afte .get_ownership = net_get_ownership, };
- #ifdef CONFIG_OF_NET + #ifdef CONFIG_OF static int of_dev_node_match(struct device *dev, const void *data) { for (; dev; dev = dev->parent) { @@@ -1973,9 -2028,9 +2028,9 @@@ int netdev_register_kobject(struct net_ int netdev_change_owner(struct net_device *ndev, const struct net *net_old, const struct net *net_new) { + kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; + kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; struct device *dev = &ndev->dev; - kuid_t old_uid, new_uid; - kgid_t old_gid, new_gid; int error;
net_ns_get_ownership(net_old, &old_uid, &old_gid); diff --combined net/core/skbuff.c index fe9358437380,74601bbc56ac..09b8cf8ab234 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -80,7 -80,6 +80,7 @@@ #include <linux/indirect_call_wrapper.h>
#include "datagram.h" +#include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init; static struct kmem_cache *skbuff_fclone_cache __ro_after_init; @@@ -135,34 -134,31 +135,31 @@@ struct napi_alloc_cache static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
- static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, - unsigned int align_mask) + void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); - } - - void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) - { fragsz = SKB_DATA_ALIGN(fragsz);
- return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); + return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); } EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { - struct page_frag_cache *nc; void *data;
fragsz = SKB_DATA_ALIGN(fragsz); if (in_hardirq() || irqs_disabled()) { - nc = this_cpu_ptr(&netdev_alloc_cache); + struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); } else { + struct napi_alloc_cache *nc; + local_bh_disable(); - data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); + nc = this_cpu_ptr(&napi_alloc_cache); + data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); local_bh_enable(); } return data; @@@ -398,8 -394,9 +395,9 @@@ struct sk_buff *__alloc_skb(unsigned in { struct kmem_cache *cache; struct sk_buff *skb; - u8 *data; + unsigned int osize; bool pfmemalloc; + u8 *data;
cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; @@@ -431,7 -428,8 +429,8 @@@ * Put skb_shared_info exactly at the end of allocated zone, * to allow max possible filling before reallocation. */ - size = SKB_WITH_OVERHEAD(ksize(data)); + osize = ksize(data); + size = SKB_WITH_OVERHEAD(osize); prefetchw(data + size);
/* @@@ -440,7 -438,7 +439,7 @@@ * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); - __build_skb_around(skb, data, 0); + __build_skb_around(skb, data, osize); skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) { @@@ -1805,39 -1803,30 +1804,39 @@@ EXPORT_SYMBOL(skb_realloc_headroom) struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) { int delta = headroom - skb_headroom(skb); + int osize = skb_end_offset(skb); + struct sock *sk = skb->sk;
if (WARN_ONCE(delta <= 0, "%s is expecting an increase in the headroom", __func__)) return skb;
- /* pskb_expand_head() might crash, if skb is shared */ - if (skb_shared(skb)) { + delta = SKB_DATA_ALIGN(delta); + /* pskb_expand_head() might crash, if skb is shared. */ + if (skb_shared(skb) || !is_skb_wmem(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
- if (likely(nskb)) { - if (skb->sk) - skb_set_owner_w(nskb, skb->sk); - consume_skb(skb); - } else { - kfree_skb(skb); - } + if (unlikely(!nskb)) + goto fail; + + if (sk) + skb_set_owner_w(nskb, sk); + consume_skb(skb); skb = nskb; } - if (skb && - pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { - kfree_skb(skb); - skb = NULL; + if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) + goto fail; + + if (sk && is_skb_wmem(skb)) { + delta = skb_end_offset(skb) - osize; + refcount_add(delta, &sk->sk_wmem_alloc); + skb->truesize += delta; } return skb; + +fail: + kfree_skb(skb); + return NULL; } EXPORT_SYMBOL(skb_expand_head);
diff --combined net/ipv4/tcp.c index f5c336f8b0c8,d0b848ff5c0f..c262cb3e4f35 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -287,8 -287,8 +287,8 @@@ enum TCP_CMSG_TS = 2 };
- struct percpu_counter tcp_orphan_count; - EXPORT_SYMBOL_GPL(tcp_orphan_count); + DEFINE_PER_CPU(unsigned int, tcp_orphan_count); + EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_tcp_mem); @@@ -325,11 -325,6 +325,6 @@@ struct tcp_splice_state unsigned long tcp_memory_pressure __read_mostly; EXPORT_SYMBOL_GPL(tcp_memory_pressure);
- DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); - EXPORT_SYMBOL(tcp_rx_skb_cache_key); - - DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); - void tcp_enter_memory_pressure(struct sock *sk) { unsigned long val; @@@ -486,7 -481,10 +481,7 @@@ static bool tcp_stream_is_readable(stru { if (tcp_epollin_ready(sk, target)) return true; - - if (sk->sk_prot->stream_memory_read) - return sk->sk_prot->stream_memory_read(sk); - return false; + return sk_is_readable(sk); }
/* @@@ -644,7 -642,7 +639,7 @@@ int tcp_ioctl(struct sock *sk, int cmd } EXPORT_SYMBOL(tcp_ioctl);
- static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) + void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; tp->pushed_seq = tp->write_seq; @@@ -655,7 -653,7 +650,7 @@@ static inline bool forced_push(const st return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); }
- static void skb_entail(struct sock *sk, struct sk_buff *skb) + void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); @@@ -858,30 -856,15 +853,15 @@@ ssize_t tcp_splice_read(struct socket * } EXPORT_SYMBOL(tcp_splice_read);
- struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule) + struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule) { struct sk_buff *skb;
- if (likely(!size)) { - skb = sk->sk_tx_skb_cache; - if (skb) { - skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); - sk->sk_tx_skb_cache = NULL; - pskb_trim(skb, 0); - INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); - skb_shinfo(skb)->tx_flags = 0; - memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb)); - return skb; - } - } - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - if (unlikely(tcp_under_memory_pressure(sk))) sk_mem_reclaim_partial(sk);
- skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); + skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); if (likely(skb)) { bool mem_scheduled;
@@@ -892,7 -875,7 +872,7 @@@ mem_scheduled = sk_wmem_schedule(sk, skb->truesize); } if (likely(mem_scheduled)) { - skb_reserve(skb, sk->sk_prot->max_header); + skb_reserve(skb, MAX_TCP_HEADER); /* * Make sure that we have exactly size bytes * available to the caller, no more, no less. @@@ -952,7 -935,7 +932,7 @@@ int tcp_send_mss(struct sock *sk, int * */ void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) { - if (skb && !skb->len) { + if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { tcp_unlink_write_queue(skb, sk); if (tcp_write_queue_empty(sk)) tcp_chrono_stop(sk, TCP_CHRONO_BUSY); @@@ -960,8 -943,8 +940,8 @@@ } }
- struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, - struct page *page, int offset, size_t *size) + static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, + struct page *page, int offset, size_t *size) { struct sk_buff *skb = tcp_write_queue_tail(sk); struct tcp_sock *tp = tcp_sk(sk); @@@ -974,15 -957,15 +954,15 @@@ new_segment if (!sk_stream_memory_free(sk)) return NULL;
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - tcp_rtx_and_write_queues_empty(sk)); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + tcp_rtx_and_write_queues_empty(sk)); if (!skb) return NULL;
#ifdef CONFIG_TLS_DEVICE skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); #endif - skb_entail(sk, skb); + tcp_skb_entail(sk, skb); copy = size_goal; }
@@@ -1303,15 -1286,15 +1283,15 @@@ new_segment goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - first_skb); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + first_skb); if (!skb) goto wait_for_space;
process_backlog++; skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb); + tcp_skb_entail(sk, skb); copy = size_goal;
/* All packets are restored as if they have @@@ -2687,11 -2670,36 +2667,36 @@@ void tcp_shutdown(struct sock *sk, int } EXPORT_SYMBOL(tcp_shutdown);
+ int tcp_orphan_count_sum(void) + { + int i, total = 0; + + for_each_possible_cpu(i) + total += per_cpu(tcp_orphan_count, i); + + return max(total, 0); + } + + static int tcp_orphan_cache; + static struct timer_list tcp_orphan_timer; + #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) + + static void tcp_orphan_update(struct timer_list *unused) + { + WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); + mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); + } + + static bool tcp_too_many_orphans(int shift) + { + return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans; + } + bool tcp_check_oom(struct sock *sk, int shift) { bool too_many_orphans, out_of_socket_memory;
- too_many_orphans = tcp_too_many_orphans(sk, shift); + too_many_orphans = tcp_too_many_orphans(shift); out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans) @@@ -2800,7 -2808,7 +2805,7 @@@ adjudge_to_death /* remove backlog if any, without releasing ownership. */ __release_sock(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count); + this_cpu_inc(tcp_orphan_count);
/* Have we already been destroyed by a softirq or backlog? */ if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) @@@ -2917,11 -2925,6 +2922,6 @@@ void tcp_write_queue_purge(struct sock sk_wmem_free_skb(sk, skb); } tcp_rtx_queue_purge(sk); - skb = sk->sk_tx_skb_cache; - if (skb) { - __kfree_skb(skb); - sk->sk_tx_skb_cache = NULL; - } INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); sk_mem_reclaim(sk); tcp_clear_all_retrans_hints(tcp_sk(sk)); @@@ -2958,10 -2961,6 +2958,6 @@@ int tcp_disconnect(struct sock *sk, in
tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); - if (sk->sk_rx_skb_cache) { - __kfree_skb(sk->sk_rx_skb_cache); - sk->sk_rx_skb_cache = NULL; - } WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); tp->urg_data = 0; tcp_write_queue_purge(sk); @@@ -4502,7 -4501,10 +4498,10 @@@ void __init tcp_init(void sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); - percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); + + timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); + mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); + inet_hashinfo_init(&tcp_hashinfo); inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", thash_entries, 21, /* one slot per 2 MB*/ diff --combined net/mac80211/mesh.c index 5dcfd53a4ab6,a4212a333d61..15ac08d111ea --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@@ -672,7 -672,7 +672,7 @@@ ieee80211_mesh_update_bss_params(struc u8 *ie, u8 ie_len) { struct ieee80211_supported_band *sband; - const u8 *cap; + const struct element *cap; const struct ieee80211_he_operation *he_oper = NULL;
sband = ieee80211_get_sband(sdata); @@@ -687,10 -687,9 +687,10 @@@
sdata->vif.bss_conf.he_support = true;
- cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); - if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3])) - he_oper = (void *)(cap + 3); + cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); + if (cap && cap->datalen >= 1 + sizeof(*he_oper) && + cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1)) + he_oper = (void *)(cap->data + 1);
if (he_oper) sdata->vif.bss_conf.he_oper.params = @@@ -1247,7 -1246,7 +1247,7 @@@ ieee80211_mesh_rx_probe_req(struct ieee struct sk_buff *presp; struct beacon_data *bcn; struct ieee80211_mgmt *hdr; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; size_t baselen; u8 *pos;
@@@ -1256,22 -1255,24 +1256,24 @@@ if (baselen > len) return;
- ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid, - NULL); - - if (!elems.mesh_id) + elems = ieee802_11_parse_elems(pos, len - baselen, false, mgmt->bssid, + NULL); + if (!elems) return;
+ if (!elems->mesh_id) + goto free; + /* 802.11-2012 10.1.4.3.2 */ if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) && !is_broadcast_ether_addr(mgmt->da)) || - elems.ssid_len != 0) - return; + elems->ssid_len != 0) + goto free;
- if (elems.mesh_id_len != 0 && - (elems.mesh_id_len != ifmsh->mesh_id_len || - memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) - return; + if (elems->mesh_id_len != 0 && + (elems->mesh_id_len != ifmsh->mesh_id_len || + memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) + goto free;
rcu_read_lock(); bcn = rcu_dereference(ifmsh->beacon); @@@ -1295,6 -1296,8 +1297,8 @@@ ieee80211_tx_skb(sdata, presp); out: rcu_read_unlock(); + free: + kfree(elems); }
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, @@@ -1305,7 -1308,7 +1309,7 @@@ { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; struct ieee80211_channel *channel; size_t baselen; int freq; @@@ -1320,42 -1323,47 +1324,47 @@@ if (baselen > len) return;
- ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, - false, &elems, mgmt->bssid, NULL); + elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, + len - baselen, + false, mgmt->bssid, NULL); + if (!elems) + return;
/* ignore non-mesh or secure / unsecure mismatch */ - if ((!elems.mesh_id || !elems.mesh_config) || - (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || - (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) - return; + if ((!elems->mesh_id || !elems->mesh_config) || + (elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || + (!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) + goto free;
- if (elems.ds_params) - freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); + if (elems->ds_params) + freq = ieee80211_channel_to_frequency(elems->ds_params[0], band); else freq = rx_status->freq;
channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) - return; + goto free;
- if (mesh_matches_local(sdata, &elems)) { + if (mesh_matches_local(sdata, elems)) { mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n", sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal); if (!sdata->u.mesh.user_mpm || sdata->u.mesh.mshcfg.rssi_threshold == 0 || sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) - mesh_neighbour_update(sdata, mgmt->sa, &elems, + mesh_neighbour_update(sdata, mgmt->sa, elems, rx_status);
if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && !sdata->vif.csa_active) - ieee80211_mesh_process_chnswitch(sdata, &elems, true); + ieee80211_mesh_process_chnswitch(sdata, elems, true); }
if (ifmsh->sync_ops) - ifmsh->sync_ops->rx_bcn_presp(sdata, - stype, mgmt, &elems, rx_status); + ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len, + elems->mesh_config, rx_status); + free: + kfree(elems); }
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) @@@ -1447,7 -1455,7 +1456,7 @@@ static void mesh_rx_csa_frame(struct ie struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; u16 pre_value; bool fwd_csa = true; size_t baselen; @@@ -1460,33 -1468,37 +1469,37 @@@ pos = mgmt->u.action.u.chan_switch.variable; baselen = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); - ieee802_11_parse_elems(pos, len - baselen, true, &elems, - mgmt->bssid, NULL); - - if (!mesh_matches_local(sdata, &elems)) + elems = ieee802_11_parse_elems(pos, len - baselen, true, + mgmt->bssid, NULL); + if (!elems) return;
- ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; + if (!mesh_matches_local(sdata, elems)) + goto free; + + ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl; if (!--ifmsh->chsw_ttl) fwd_csa = false;
- pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); + pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value); if (ifmsh->pre_value >= pre_value) - return; + goto free;
ifmsh->pre_value = pre_value;
if (!sdata->vif.csa_active && - !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) { + !ieee80211_mesh_process_chnswitch(sdata, elems, false)) { mcsa_dbg(sdata, "Failed to process CSA action frame"); - return; + goto free; }
/* forward or re-broadcast the CSA frame */ if (fwd_csa) { - if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) + if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0) mcsa_dbg(sdata, "Failed to forward the CSA frame"); } + free: + kfree(elems); }
static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, diff --combined net/netfilter/ipvs/ip_vs_ctl.c index 0ff94c66641f,e62b40bd349e..38ed88b89007 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@@ -48,8 -48,6 +48,8 @@@
#include <net/ip_vs.h>
+MODULE_ALIAS_GENL_FAMILY(IPVS_GENL_NAME); + /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ static DEFINE_MUTEX(__ip_vs_mutex);
@@@ -2019,6 -2017,12 +2019,12 @@@ static struct ctl_table vs_vars[] = .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "run_estimation", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #ifdef CONFIG_IP_VS_DEBUG { .procname = "debug_level", @@@ -4092,6 -4096,8 +4098,8 @@@ static int __net_init ip_vs_control_net tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; tbl[idx++].data = &ipvs->sysctl_schedule_icmp; tbl[idx++].data = &ipvs->sysctl_ignore_tunneled; + ipvs->sysctl_run_estimation = 1; + tbl[idx++].data = &ipvs->sysctl_run_estimation; #ifdef CONFIG_IP_VS_DEBUG /* Global sysctls must be ro in non-init netns */ if (!net_eq(net, &init_net)) diff --combined net/tls/tls_main.c index 9ab81db8a654,278192ee133e..acfba9f1ba72 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@@ -421,6 -421,88 +421,88 @@@ static int do_tls_getsockopt_conf(struc rc = -EFAULT; break; } + case TLS_CIPHER_AES_CCM_128: { + struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 = + container_of(crypto_info, + struct tls12_crypto_info_aes_ccm_128, info); + + if (len != sizeof(*aes_ccm_128)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(aes_ccm_128->iv, + cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE, + TLS_CIPHER_AES_CCM_128_IV_SIZE); + memcpy(aes_ccm_128->rec_seq, cctx->rec_seq, + TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_CHACHA20_POLY1305: { + struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 = + container_of(crypto_info, + struct tls12_crypto_info_chacha20_poly1305, + info); + + if (len != sizeof(*chacha20_poly1305)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(chacha20_poly1305->iv, + cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE, + TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE); + memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq, + TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, chacha20_poly1305, + sizeof(*chacha20_poly1305))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_gcm, info); + + if (len != sizeof(*sm4_gcm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_gcm_info->iv, + cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, + TLS_CIPHER_SM4_GCM_IV_SIZE); + memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_ccm, info); + + if (len != sizeof(*sm4_ccm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_ccm_info->iv, + cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, + TLS_CIPHER_SM4_CCM_IV_SIZE); + memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) + rc = -EFAULT; + break; + } default: rc = -EINVAL; } @@@ -524,6 -606,12 +606,12 @@@ static int do_tls_setsockopt_conf(struc case TLS_CIPHER_CHACHA20_POLY1305: optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305); break; + case TLS_CIPHER_SM4_GCM: + optsize = sizeof(struct tls12_crypto_info_sm4_gcm); + break; + case TLS_CIPHER_SM4_CCM: + optsize = sizeof(struct tls12_crypto_info_sm4_ccm); + break; default: rc = -EINVAL; goto err_crypto_info; @@@ -681,12 -769,12 +769,12 @@@ static void build_protos(struct proto p
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; - prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; + prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; - prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; + prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
#ifdef CONFIG_TLS_DEVICE diff --combined net/tls/tls_sw.c index d5d09bd817b7,4147bb2e7057..c5ba3fd50e91 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@@ -498,9 -498,15 +498,15 @@@ static int tls_do_encryption(struct soc int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; + iv_offset = 1; + break; }
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, @@@ -1457,10 -1463,16 +1463,16 @@@ static int decrypt_internal(struct soc aad = (u8 *)(sgout + n_sgout); iv = aad + prot->aad_size;
- /* For CCM based ciphers, first byte of nonce+iv is always '2' */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { - iv[0] = 2; + /* For CCM based ciphers, first byte of nonce+iv is a constant */ + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: + iv[0] = TLS_AES_CCM_IV_B0_BYTE; + iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + iv[0] = TLS_SM4_CCM_IV_B0_BYTE; iv_offset = 1; + break; }
/* Prepare IV */ @@@ -2026,7 -2038,7 +2038,7 @@@ splice_read_end return copied ? : err; }
-bool tls_sw_stream_read(const struct sock *sk) +bool tls_sw_sock_is_readable(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@@ -2424,6 -2436,40 +2436,40 @@@ int tls_set_sw_offload(struct sock *sk cipher_name = "rfc7539(chacha20,poly1305)"; break; } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info; + + sm4_gcm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + iv = sm4_gcm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE; + rec_seq = sm4_gcm_info->rec_seq; + keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE; + key = sm4_gcm_info->key; + salt = sm4_gcm_info->salt; + salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE; + cipher_name = "gcm(sm4)"; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info; + + sm4_ccm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + iv = sm4_ccm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE; + rec_seq = sm4_ccm_info->rec_seq; + keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE; + key = sm4_ccm_info->key; + salt = sm4_ccm_info->salt; + salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE; + cipher_name = "ccm(sm4)"; + break; + } default: rc = -EINVAL; goto free_priv; diff --combined net/wireless/core.c index aaba847d79eb,45be124a98f1..eb297e1015e0 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@@ -524,7 -524,6 +524,7 @@@ use_default_name INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); INIT_WORK(&rdev->mgmt_registrations_update_wk, cfg80211_mgmt_registrations_update_wk); + spin_lock_init(&rdev->mgmt_registrations_lock);
#ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@@ -1081,6 -1080,16 +1081,16 @@@ void cfg80211_dev_free(struct cfg80211_ list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&rdev->wiphy, &scan->pub); mutex_destroy(&rdev->wiphy.mtx); + + /* + * The 'regd' can only be non-NULL if we never finished + * initializing the wiphy and thus never went through the + * unregister path - e.g. in failure scenarios. Thus, it + * cannot have been visible to anyone if non-NULL, so we + * can just free it here. + */ + kfree(rcu_dereference_raw(rdev->wiphy.regd)); + kfree(rdev); }
@@@ -1280,6 -1289,7 +1290,6 @@@ void cfg80211_init_wdev(struct wireless INIT_LIST_HEAD(&wdev->event_list); spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); - spin_lock_init(&wdev->mgmt_registrations_lock); INIT_LIST_HEAD(&wdev->pmsr_list); spin_lock_init(&wdev->pmsr_lock); INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); diff --combined net/wireless/scan.c index adc0d14cfd86,e4f79b23f7f6..22e92be61938 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@@ -383,7 -383,7 +383,7 @@@ static bool is_bss(struct cfg80211_bss const u8 *ssid, size_t ssid_len) { const struct cfg80211_bss_ies *ies; - const u8 *ssidie; + const struct element *ssid_elem;
if (bssid && !ether_addr_equal(a->bssid, bssid)) return false; @@@ -394,12 -394,12 +394,12 @@@ ies = rcu_access_pointer(a->ies); if (!ies) return false; - ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); - if (!ssidie) + ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, ies->data, ies->len); + if (!ssid_elem) return false; - if (ssidie[1] != ssid_len) + if (ssid_elem->datalen != ssid_len) return false; - return memcmp(ssidie + 2, ssid, ssid_len) == 0; + return memcmp(ssid_elem->data, ssid, ssid_len) == 0; }
static int @@@ -418,17 -418,14 +418,17 @@@ cfg80211_add_nontrans_list(struct cfg80 } ssid_len = ssid[1]; ssid = ssid + 2; - rcu_read_unlock();
/* check if nontrans_bss is in the list */ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) { - if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) + if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) { + rcu_read_unlock(); return 0; + } }
+ rcu_read_unlock(); + /* add to the list */ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); return 0; @@@ -1794,25 -1791,13 +1794,13 @@@ cfg80211_bss_update(struct cfg80211_reg return NULL; }
- /* - * Update RX channel information based on the available frame payload - * information. This is mainly for the 2.4 GHz band where frames can be received - * from neighboring channels and the Beacon frames use the DSSS Parameter Set - * element to indicate the current (transmitting) channel, but this might also - * be needed on other bands if RX frequency does not match with the actual - * operating channel of a BSS. - */ - static struct ieee80211_channel * - cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, - struct ieee80211_channel *channel, - enum nl80211_bss_scan_width scan_width) + int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, + enum nl80211_band band) { const u8 *tmp; - u32 freq; int channel_number = -1; - struct ieee80211_channel *alt_channel;
- if (channel->band == NL80211_BAND_S1GHZ) { + if (band == NL80211_BAND_S1GHZ) { tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen); if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) { struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2); @@@ -1833,6 -1818,29 +1821,29 @@@ } }
+ return channel_number; + } + EXPORT_SYMBOL(cfg80211_get_ies_channel_number); + + /* + * Update RX channel information based on the available frame payload + * information. This is mainly for the 2.4 GHz band where frames can be received + * from neighboring channels and the Beacon frames use the DSSS Parameter Set + * element to indicate the current (transmitting) channel, but this might also + * be needed on other bands if RX frequency does not match with the actual + * operating channel of a BSS. + */ + static struct ieee80211_channel * + cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, + struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width) + { + u32 freq; + int channel_number; + struct ieee80211_channel *alt_channel; + + channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band); + if (channel_number < 0) { /* No channel information in frame payload */ return channel; @@@ -2075,12 -2083,12 +2086,12 @@@ static void cfg80211_parse_mbssid_data(
if (!non_tx_data) return; - if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return; if (!wiphy->support_mbssid) return; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return;
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp); @@@ -2447,10 -2455,10 +2458,10 @@@ cfg80211_inform_bss_frame_data(struct w res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, gfp); if (!res || !wiphy->support_mbssid || - !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return res; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return res;
non_tx_data.tx_bss = res; diff --combined net/wireless/util.c index a1a99a574984,2991f711491a..5ff1f8726faf --- a/net/wireless/util.c +++ b/net/wireless/util.c @@@ -80,6 -80,7 +80,7 @@@ u32 ieee80211_channel_to_freq_khz(int c return 0; /* not supported */ switch (band) { case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: if (chan == 14) return MHZ_TO_KHZ(2484); else if (chan < 14) @@@ -209,6 -210,7 +210,7 @@@ static void set_mandatory_flags_band(st WARN_ON(want); break; case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: want = 7; for (i = 0; i < sband->n_bitrates; i++) { switch (sband->bitrates[i].bitrate) { @@@ -1028,14 -1030,14 +1030,14 @@@ int cfg80211_change_iface(struct cfg802 !(rdev->wiphy.interface_modes & (1 << ntype))) return -EOPNOTSUPP;
- /* if it's part of a bridge, reject changing type to station/ibss */ - if (netif_is_bridge_port(dev) && - (ntype == NL80211_IFTYPE_ADHOC || - ntype == NL80211_IFTYPE_STATION || - ntype == NL80211_IFTYPE_P2P_CLIENT)) - return -EBUSY; - if (ntype != otype) { + /* if it's part of a bridge, reject changing type to station/ibss */ + if (netif_is_bridge_port(dev) && + (ntype == NL80211_IFTYPE_ADHOC || + ntype == NL80211_IFTYPE_STATION || + ntype == NL80211_IFTYPE_P2P_CLIENT)) + return -EBUSY; + dev->ieee80211_ptr->use_4addr = false; dev->ieee80211_ptr->mesh_id_up_len = 0; wdev_lock(dev->ieee80211_ptr);
linux-merge@lists.open-mesh.org