The following commit has been merged in the master branch: commit 735b1fe548e7396a639f641b65c3a89b5d377cbf Merge: efabfec55eb24459a7a42f5fd05eb9a690d08700 06338ceff92510544a732380dbb2d621bd3775bf Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed Oct 27 11:11:52 2021 +1100
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
# Conflicts: # include/linux/mlx5/fs.h # include/net/sock.h
diff --combined Documentation/devicetree/bindings/vendor-prefixes.yaml index fe9615089acb,e2eb0c738f3a..a348fccf3d0a --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@@ -131,6 -131,8 +131,8 @@@ patternProperties description: Asahi Kasei Corp. "^asc,.*": description: All Sensors Corporation + "^asix,.*": + description: ASIX Electronics Corporation "^aspeed,.*": description: ASPEED Technology Inc. "^asus,.*": @@@ -191,8 -193,6 +193,8 @@@ description: B&R Industrial Automation GmbH "^bticino,.*": description: Bticino International + "^calamp,.*": + description: CalAmp Corp. "^calaosystems,.*": description: CALAO Systems SAS "^calxeda,.*": @@@ -337,8 -337,6 +339,8 @@@ description: EBV Elektronik "^eckelmann,.*": description: Eckelmann AG + "^edimax,.*": + description: EDIMAX Technology Co., Ltd "^edt,.*": description: Emerging Display Technologies "^eeti,.*": @@@ -357,8 -355,6 +359,8 @@@ description: Shenzhen Elida Technology Co., Ltd. "^elimo,.*": description: Elimo Engineering Ltd. + "^elpida,.*": + description: Elpida Memory, Inc. "^embest,.*": description: Shenzhen Embest Technology Co., Ltd. "^emlid,.*": @@@ -401,8 -397,6 +403,8 @@@ description: Exar Corporation "^excito,.*": description: Excito + "^exegin,.*": + description: Exegin Technologies Limited "^ezchip,.*": description: EZchip Semiconductor "^facebook,.*": @@@ -517,8 -511,6 +519,8 @@@ description: Hycon Technology Corp. "^hydis,.*": description: Hydis Technologies + "^hynix,.*": + description: SK Hynix Inc. "^hyundai,.*": description: Hyundai Technology "^i2se,.*": @@@ -587,8 -579,6 +589,8 @@@ description: JEDEC Solid State Technology Association "^jesurun,.*": description: Shenzhen Jesurun Electronics Business Dept. + "^jethome,.*": + description: JetHome (IP Sokolov P.A.) "^jianda,.*": description: Jiandangjing Technology Co., Ltd. "^kam,.*": @@@ -1114,8 -1104,6 +1116,8 @@@ description: SparkFun Electronics "^sprd,.*": description: Spreadtrum Communications Inc. + "^ssi,.*": + description: SSI Computer Corp "^sst,.*": description: Silicon Storage Technology, Inc. "^sstar,.*": diff --combined MAINTAINERS index d2d4b2c33a06,975086c5345d..bcf838a430f4 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -334,7 -334,7 +334,7 @@@ F: drivers/platform/x86/acer-wmi.
ACPI M: "Rafael J. Wysocki" rafael@kernel.org -M: Len Brown lenb@kernel.org +R: Len Brown lenb@kernel.org L: linux-acpi@vger.kernel.org S: Supported W: https://01.org/linux-acpi @@@ -355,7 -355,7 +355,7 @@@ F: tools/power/acpi
ACPI APEI M: "Rafael J. Wysocki" rafael@kernel.org -M: Len Brown lenb@kernel.org +R: Len Brown lenb@kernel.org R: James Morse james.morse@arm.com R: Tony Luck tony.luck@intel.com R: Borislav Petkov bp@alien8.de @@@ -378,6 -378,14 +378,6 @@@ F: drivers/acpi/acpica F: include/acpi/ F: tools/power/acpi/
-ACPI FAN DRIVER -M: Zhang Rui rui.zhang@intel.com -L: linux-acpi@vger.kernel.org -S: Supported -W: https://01.org/linux-acpi -B: https://bugzilla.kernel.org -F: drivers/acpi/fan.c - ACPI FOR ARM64 (ACPI/arm64) M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com M: Hanjun Guo guohanjun@huawei.com @@@ -414,6 -422,14 +414,6 @@@ W: https://01.org/linux-acp B: https://bugzilla.kernel.org F: drivers/acpi/*thermal*
-ACPI VIDEO DRIVER -M: Zhang Rui rui.zhang@intel.com -L: linux-acpi@vger.kernel.org -S: Supported -W: https://01.org/linux-acpi -B: https://bugzilla.kernel.org -F: drivers/acpi/acpi_video.c - ACPI VIOT DRIVER M: Jean-Philippe Brucker jean-philippe@linaro.org L: linux-acpi@vger.kernel.org @@@ -1266,13 -1282,6 +1266,13 @@@ S: Maintaine F: Documentation/devicetree/bindings/iommu/apple,dart.yaml F: drivers/iommu/apple-dart.c
+APPLE PCIE CONTROLLER DRIVER +M: Alyssa Rosenzweig alyssa@rosenzweig.io +M: Marc Zyngier maz@kernel.org +L: linux-pci@vger.kernel.org +S: Maintained +F: drivers/pci/controller/pcie-apple.c + APPLE SMC DRIVER M: Henrik Rydberg rydberg@bitmath.org L: linux-hwmon@vger.kernel.org @@@ -2303,14 -2312,6 +2303,14 @@@ F: arch/arm/boot/dts/nuvoton-wpcm450 F: arch/arm/mach-npcm/wpcm450.c F: drivers/*/*wpcm*
+ARM/NXP S32G ARCHITECTURE +M: Chester Lin clin@suse.com +R: Andreas F��rber afaerber@suse.de +R: Matthias Brugger mbrugger@suse.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: arch/arm64/boot/dts/freescale/s32g*.dts* + ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT L: openmoko-kernel@lists.openmoko.org (subscribers-only) S: Orphan @@@ -2898,6 -2899,12 +2898,12 @@@ S: Maintaine F: Documentation/hwmon/asc7621.rst F: drivers/hwmon/asc7621.c
+ ASIX AX88796C SPI ETHERNET ADAPTER + M: ��ukasz Stelmach l.stelmach@samsung.com + S: Maintained + F: Documentation/devicetree/bindings/net/asix,ax88796c.yaml + F: drivers/net/ethernet/asix/ax88796c_* + ASPEED PINCTRL DRIVERS M: Andrew Jeffery andrew@aj.id.au L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers) @@@ -4435,7 -4442,7 +4441,7 @@@ N: cros_e N: cros-ec
CHRONTEL CH7322 CEC DRIVER -M: Jeff Chase jnchase@google.com +M: Joe Tessler jrt@google.com L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git @@@ -5457,19 -5464,6 +5463,19 @@@ F: include/net/devlink. F: include/uapi/linux/devlink.h F: net/core/devlink.c
+DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT +M: Christoph Niedermaier cniedermaier@dh-electronics.com +L: kernel@dh-electronics.com +S: Maintained +F: arch/arm/boot/dts/imx6*-dhcom-* + +DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT +M: Marek Vasut marex@denx.de +L: kernel@dh-electronics.com +S: Maintained +F: arch/arm/boot/dts/stm32mp1*-dhcom-* +F: arch/arm/boot/dts/stm32mp1*-dhcor-* + DIALOG SEMICONDUCTOR DRIVERS M: Support Opensource support.opensource@diasemi.com S: Supported @@@ -6304,7 -6298,6 +6310,7 @@@ L: linux-tegra@vger.kernel.or S: Supported T: git git://anongit.freedesktop.org/tegra/linux.git F: Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt +F: Documentation/devicetree/bindings/gpu/host1x/ F: drivers/gpu/drm/tegra/ F: drivers/gpu/host1x/ F: include/linux/host1x.h @@@ -7025,7 -7018,6 +7031,6 @@@ F: drivers/net/mdio/fwnode_mdio. F: drivers/net/mdio/of_mdio.c F: drivers/net/pcs/ F: drivers/net/phy/ - F: drivers/of/of_net.c F: include/dt-bindings/net/qca-ar803x.h F: include/linux/*mdio*.h F: include/linux/mdio/*.h @@@ -7037,6 -7029,7 +7042,7 @@@ F: include/linux/platform_data/mdio-gpi F: include/trace/events/mdio.h F: include/uapi/linux/mdio.h F: include/uapi/linux/mii.h + F: net/core/of_net.c
EXFAT FILE SYSTEM M: Namjae Jeon linkinjeon@kernel.org @@@ -8211,7 -8204,7 +8217,7 @@@ T: git git://linuxtv.org/anttip/media_t F: drivers/media/usb/hackrf/
HANTRO VPU CODEC DRIVER -M: Ezequiel Garcia ezequiel@collabora.com +M: Ezequiel Garcia ezequiel@vanguardiasur.com.ar M: Philipp Zabel p.zabel@pengutronix.de L: linux-media@vger.kernel.org L: linux-rockchip@lists.infradead.org @@@ -8679,12 -8672,6 +8685,12 @@@ S: Maintaine T: git git://linuxtv.org/media_tree.git F: drivers/media/i2c/hi556.c
+HYNIX HI846 SENSOR DRIVER +M: Martin Kepplinger martin.kepplinger@puri.sm +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/media/i2c/hi846.c + Hyper-V/Azure CORE AND DRIVERS M: "K. Y. Srinivasan" kys@microsoft.com M: Haiyang Zhang haiyangz@microsoft.com @@@ -10020,7 -10007,6 +10026,7 @@@ JC42.4 TEMPERATURE SENSOR DRIVE M: Guenter Roeck linux@roeck-us.net L: linux-hwmon@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/hwmon/jedec,jc42.yaml F: Documentation/hwmon/jc42.rst F: drivers/hwmon/jc42.c
@@@ -10060,7 -10046,6 +10066,7 @@@ F: include/linux/jbd2. JPU V4L2 MEM2MEM DRIVER FOR RENESAS M: Mikhail Ulyanov mikhail.ulyanov@cogentembedded.com L: linux-media@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org S: Maintained F: drivers/media/platform/rcar_jpu.c
@@@ -10915,7 -10900,7 +10921,7 @@@ LM90 HARDWARE MONITOR DRIVE M: Jean Delvare jdelvare@suse.com L: linux-hwmon@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/hwmon/lm90.txt +F: Documentation/devicetree/bindings/hwmon/national,lm90.yaml F: Documentation/hwmon/lm90.rst F: drivers/hwmon/lm90.c F: include/dt-bindings/thermal/lm90.h @@@ -11299,6 -11284,7 +11305,6 @@@ F: Documentation/networking/device_driv F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL PRESTERA ETHERNET SWITCH DRIVER -M: Vadym Kochan vkochan@marvell.com M: Taras Chornyi tchornyi@marvell.com S: Supported W: https://github.com/Marvell-switching/switchdev-prestera @@@ -11699,7 -11685,6 +11705,7 @@@ T: git git://linuxtv.org/media_tree.gi F: Documentation/devicetree/bindings/media/renesas,csi2.yaml F: Documentation/devicetree/bindings/media/renesas,isp.yaml F: Documentation/devicetree/bindings/media/renesas,vin.yaml +F: drivers/media/platform/rcar-isp.c F: drivers/media/platform/rcar-vin/
MEDIA DRIVERS FOR RENESAS - VSP1 @@@ -11845,7 -11830,9 +11851,9 @@@ F: drivers/mmc/host/mtk-sd. MEDIATEK MT76 WIRELESS LAN DRIVER M: Felix Fietkau nbd@nbd.name M: Lorenzo Bianconi lorenzo.bianconi83@gmail.com - R: Ryder Lee ryder.lee@mediatek.com + M: Ryder Lee ryder.lee@mediatek.com + R: Shayne Chen shayne.chen@mediatek.com + R: Sean Wang sean.wang@mediatek.com L: linux-wireless@vger.kernel.org S: Maintained F: drivers/net/wireless/mediatek/mt76/ @@@ -11869,12 -11856,6 +11877,12 @@@ S: Maintaine F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt F: drivers/i2c/busses/i2c-mt7621.c
+MEDIATEK MT7621 PCIE CONTROLLER DRIVER +M: Sergio Paracuellos sergio.paracuellos@gmail.com +S: Maintained +F: Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml +F: drivers/pci/controller/pcie-mt7621.c + MEDIATEK MT7621 PHY PCI DRIVER M: Sergio Paracuellos sergio.paracuellos@gmail.com S: Maintained @@@ -11898,14 -11879,6 +11906,14 @@@ M: Sean Wang <sean.wang@mediatek.com S: Maintained F: drivers/char/hw_random/mtk-rng.c
+MEDIATEK SMI DRIVER +M: Yong Wu yong.wu@mediatek.com +L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) +S: Supported +F: Documentation/devicetree/bindings/memory-controllers/mediatek,smi* +F: drivers/memory/mtk-smi.c +F: include/soc/mediatek/smi.h + MEDIATEK SWITCH DRIVER M: Sean Wang sean.wang@mediatek.com M: Landen Chao Landen.Chao@mediatek.com @@@ -12789,7 -12762,6 +12797,7 @@@ M: Laurent Pinchart <laurent.pinchart@i L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git +F: Documentation/devicetree/bindings/media/i2c/aptina,mt9p031.yaml F: drivers/media/i2c/mt9p031.c F: include/media/i2c/mt9p031.h
@@@ -13084,6 -13056,7 +13092,7 @@@ F: include/linux/dsa F: include/linux/platform_data/dsa.h F: include/net/dsa.h F: net/dsa/ + F: tools/testing/selftests/drivers/net/dsa/
NETWORKING [GENERAL] M: "David S. Miller" davem@davemloft.net @@@ -13842,13 -13815,6 +13851,13 @@@ S: Maintaine T: git git://linuxtv.org/media_tree.git F: drivers/media/i2c/ov13858.c
+OMNIVISION OV13B10 SENSOR DRIVER +M: Arec Kao arec.kao@intel.com +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: drivers/media/i2c/ov13b10.c + OMNIVISION OV2680 SENSOR DRIVER M: Rui Miguel Silva rmfrfs@gmail.com L: linux-media@vger.kernel.org @@@ -14645,15 -14611,7 +14654,15 @@@ M: Stanimir Varbanov <svarbanov@mm-sol. L: linux-pci@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained -F: drivers/pci/controller/dwc/*qcom* +F: drivers/pci/controller/dwc/pcie-qcom.c + +PCIE ENDPOINT DRIVER FOR QUALCOMM +M: Manivannan Sadhasivam manivannan.sadhasivam@linaro.org +L: linux-pci@vger.kernel.org +L: linux-arm-msm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml +F: drivers/pci/controller/dwc/pcie-qcom-ep.c
PCIE DRIVER FOR ROCKCHIP M: Shawn Lin shawn.lin@rock-chips.com @@@ -14910,6 -14868,13 +14919,6 @@@ L: linux-omap@vger.kernel.or S: Maintained F: drivers/pinctrl/pinctrl-single.c
-PIN CONTROLLER - ST SPEAR -M: Viresh Kumar vireshk@kernel.org -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained -W: http://www.st.com/spear -F: drivers/pinctrl/spear/ - PKTCDVD DRIVER M: linux-block@vger.kernel.org S: Orphan @@@ -14958,6 -14923,7 +14967,6 @@@ S: Maintaine W: http://hwmon.wiki.kernel.org/ W: http://www.roeck-us.net/linux/drivers/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git -F: Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt F: Documentation/devicetree/bindings/hwmon/ltc2978.txt F: Documentation/devicetree/bindings/hwmon/max31785.txt F: Documentation/hwmon/adm1275.rst @@@ -15506,6 -15472,7 +15515,7 @@@ M: ath9k-devel@qca.qualcomm.co L: linux-wireless@vger.kernel.org S: Supported W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k + F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml F: drivers/net/wireless/ath/ath9k/
QUALCOMM CAMERA SUBSYSTEM DRIVER @@@ -15927,6 -15894,12 +15937,12 @@@ L: linux-wireless@vger.kernel.or S: Maintained F: drivers/net/wireless/realtek/rtw88/
+ REALTEK WIRELESS DRIVER (rtw89) + M: Ping-Ke Shih pkshih@realtek.com + L: linux-wireless@vger.kernel.org + S: Maintained + F: drivers/net/wireless/realtek/rtw89/ + REDPINE WIRELESS DRIVER M: Amitkumar Karwar amitkarwar@gmail.com M: Siva Rebbagondla siva8118@gmail.com @@@ -16161,7 -16134,7 +16177,7 @@@ F: include/uapi/linux/rkisp1-config.
ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER M: Jacob Chen jacob-chen@iotwrt.com -M: Ezequiel Garcia ezequiel@collabora.com +M: Ezequiel Garcia ezequiel@vanguardiasur.com.ar L: linux-media@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained @@@ -16169,7 -16142,7 +16185,7 @@@ F: Documentation/devicetree/bindings/me F: drivers/media/platform/rockchip/rga/
ROCKCHIP VIDEO DECODER DRIVER -M: Ezequiel Garcia ezequiel@collabora.com +M: Ezequiel Garcia ezequiel@vanguardiasur.com.ar L: linux-media@vger.kernel.org L: linux-rockchip@lists.infradead.org S: Maintained @@@ -17674,17 -17647,21 +17690,17 @@@ W: https://github.com/linux-speakup/spe B: https://github.com/linux-speakup/speakup/issues F: drivers/accessibility/speakup/
-SPEAR CLOCK FRAMEWORK SUPPORT -M: Viresh Kumar vireshk@kernel.org -L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained -W: http://www.st.com/spear -F: drivers/clk/spear/ - -SPEAR PLATFORM SUPPORT +SPEAR PLATFORM/CLOCK/PINCTRL SUPPORT M: Viresh Kumar vireshk@kernel.org M: Shiraz Hashim shiraz.linux.kernel@gmail.com +M: soc@kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained W: http://www.st.com/spear F: arch/arm/boot/dts/spear* F: arch/arm/mach-spear/ +F: drivers/clk/spear/ +F: drivers/pinctrl/spear/
SPI NOR SUBSYSTEM M: Tudor Ambarus tudor.ambarus@microchip.com @@@ -18601,9 -18578,7 +18617,9 @@@ L: linux-pm@vger.kernel.or S: Supported Q: https://patchwork.kernel.org/project/linux-pm/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal +F: Documentation/ABI/testing/sysfs-class-thermal F: Documentation/devicetree/bindings/thermal/ +F: Documentation/driver-api/thermal/ F: drivers/thermal/ F: include/linux/cpu_cooling.h F: include/linux/thermal.h @@@ -20377,7 -20352,6 +20393,7 @@@ X86 ARCHITECTURE (32-BIT AND 64-BIT M: Thomas Gleixner tglx@linutronix.de M: Ingo Molnar mingo@redhat.com M: Borislav Petkov bp@alien8.de +M: Dave Hansen dave.hansen@linux.intel.com M: x86@kernel.org R: "H. Peter Anvin" hpa@zytor.com L: linux-kernel@vger.kernel.org diff --combined drivers/infiniband/hw/mlx4/main.c index 38742e233915,f3fa2fe6a88a..ceca05982f61 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@@ -2105,10 -2105,10 +2105,10 @@@ mlx4_ib_alloc_hw_device_stats(struct ib struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[0].name) + if (!diag[0].descs) return NULL;
- return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters, + return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); }
@@@ -2118,10 -2118,10 +2118,10 @@@ mlx4_ib_alloc_hw_port_stats(struct ib_d struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[1].name) + if (!diag[1].descs) return NULL;
- return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters, + return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); }
@@@ -2151,8 -2151,10 +2151,8 @@@ static int mlx4_ib_get_hw_stats(struct }
static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, - const char ***name, - u32 **offset, - u32 *num, - bool port) + struct rdma_stat_desc **pdescs, + u32 **offset, u32 *num, bool port) { u32 num_counters;
@@@ -2164,46 -2166,46 +2164,46 @@@ if (!port) num_counters += ARRAY_SIZE(diag_device_only);
- *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL); - if (!*name) + *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc), + GFP_KERNEL); + if (!*pdescs) return -ENOMEM;
*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL); if (!*offset) - goto err_name; + goto err;
*num = num_counters;
return 0;
-err_name: - kfree(*name); +err: + kfree(*pdescs); return -ENOMEM; }
static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, - const char **name, - u32 *offset, - bool port) + struct rdma_stat_desc *descs, + u32 *offset, bool port) { int i; int j;
for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) { - name[i] = diag_basic[i].name; + descs[i].name = diag_basic[i].name; offset[i] = diag_basic[i].offset; }
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) { - name[j] = diag_ext[i].name; + descs[j].name = diag_ext[i].name; offset[j] = diag_ext[i].offset; } }
if (!port) { for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) { - name[j] = diag_device_only[i].name; + descs[j].name = diag_device_only[i].name; offset[j] = diag_device_only[i].offset; } } @@@ -2231,13 -2233,13 +2231,13 @@@ static int mlx4_ib_alloc_diag_counters( if (i && !per_port) continue;
- ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name, + ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs, &diag[i].offset, &diag[i].num_counters, i); if (ret) goto err_alloc;
- mlx4_ib_fill_diag_counters(ibdev, diag[i].name, + mlx4_ib_fill_diag_counters(ibdev, diag[i].descs, diag[i].offset, i); }
@@@ -2247,7 -2249,7 +2247,7 @@@
err_alloc: if (i) { - kfree(diag[i - 1].name); + kfree(diag[i - 1].descs); kfree(diag[i - 1].offset); }
@@@ -2260,7 -2262,7 +2260,7 @@@ static void mlx4_ib_diag_cleanup(struc
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { kfree(ibdev->diag_counters[i].offset); - kfree(ibdev->diag_counters[i].name); + kfree(ibdev->diag_counters[i].descs); } }
@@@ -2273,7 -2275,7 +2273,7 @@@ static void mlx4_ib_update_qps(struct m u64 release_mac = MLX4_IB_INVALID_MAC; struct mlx4_ib_qp *qp;
- new_smac = mlx4_mac_to_u64(dev->dev_addr); + new_smac = ether_addr_to_u64(dev->dev_addr); atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */ diff --combined drivers/infiniband/hw/mlx4/qp.c index 3a1a4ac9dd33,aea4182f33a4..b17d6ebc5b70 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@@ -1099,10 -1099,8 +1099,10 @@@ static int create_qp_common(struct ib_p if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) qp->flags |= MLX4_IB_QP_NETIF; - else + else { + err = -EINVAL; goto err; + } }
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); @@@ -1855,7 -1853,7 +1855,7 @@@ static int mlx4_set_path(struct mlx4_ib u16 vlan_id, u8 *smac) { return _mlx4_set_path(dev, &qp->ah_attr, - mlx4_mac_to_u64(smac), + ether_addr_to_u64(smac), vlan_id, path, &mqp->pri, port); } diff --combined drivers/infiniband/hw/mlx5/odp.c index 31596d8c5212,81147d774dd2..91eb615b89ee --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@@ -430,7 -430,7 +430,7 @@@ static struct mlx5_ib_mr *implicit_get_ mr->umem = &odp->umem; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; - mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE; + mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; mr->parent = imr; odp->private = mr;
@@@ -500,7 -500,7 +500,7 @@@ struct mlx5_ib_mr *mlx5_ib_alloc_implic }
imr->ibmr.pd = &pd->ibpd; - imr->mmkey.iova = 0; + imr->ibmr.iova = 0; imr->umem = &umem_odp->umem; imr->ibmr.lkey = imr->mmkey.key; imr->ibmr.rkey = imr->mmkey.key; @@@ -738,7 -738,7 +738,7 @@@ static int pagefault_mr(struct mlx5_ib_ { struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->mmkey.iova)) + if (unlikely(io_virt < mr->ibmr.iova)) return -EFAULT;
if (mr->umem->is_dmabuf) @@@ -747,7 -747,7 +747,7 @@@ if (!odp->is_implicit_odp) { u64 user_va;
- if (check_add_overflow(io_virt - mr->mmkey.iova, + if (check_add_overflow(io_virt - mr->ibmr.iova, (u64)odp->umem.address, &user_va)) return -EFAULT; if (unlikely(user_va >= ib_umem_end(odp) || @@@ -788,7 -788,7 +788,7 @@@ struct pf_frame int depth; };
-static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key) +static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) { if (!mmkey) return false; @@@ -797,6 -797,21 +797,6 @@@ return mmkey->key == key; }
-static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey) -{ - struct mlx5_ib_mw *mw; - struct mlx5_ib_devx_mr *devx_mr; - - if (mmkey->type == MLX5_MKEY_MW) { - mw = container_of(mmkey, struct mlx5_ib_mw, mmkey); - return mw->ndescs; - } - - devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr, - mmkey); - return devx_mr->ndescs; -} - /* * Handle a single data segment in a page-fault WQE or RDMA region. * @@@ -816,11 -831,12 +816,11 @@@ static int pagefault_single_data_segmen { int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0; struct pf_frame *head = NULL, *frame; - struct mlx5_core_mkey *mmkey; + struct mlx5_ib_mkey *mmkey; struct mlx5_ib_mr *mr; struct mlx5_klm *pklm; u32 *out = NULL; size_t offset; - int ndescs;
io_virt += *bytes_committed; bcnt -= *bytes_committed; @@@ -869,6 -885,8 +869,6 @@@ next_mr
case MLX5_MKEY_MW: case MLX5_MKEY_INDIRECT_DEVX: - ndescs = get_indirect_num_descs(mmkey); - if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) { mlx5_ib_dbg(dev, "indirection level exceeded\n"); ret = -EFAULT; @@@ -876,7 -894,7 +876,7 @@@ }
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) + - sizeof(*pklm) * (ndescs - 2); + sizeof(*pklm) * (mmkey->ndescs - 2);
if (outlen > cur_outlen) { kfree(out); @@@ -891,14 -909,14 +891,14 @@@ pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out, bsf0_klm0_pas_mtt0_1);
- ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen); + ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen); if (ret) goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out, memory_key_mkey_entry.start_addr);
- for (i = 0; bcnt && i < ndescs; i++, pklm++) { + for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) { if (offset >= be32_to_cpu(pklm->bcount)) { offset -= be32_to_cpu(pklm->bcount); continue; @@@ -1541,6 -1559,7 +1541,7 @@@ int mlx5r_odp_create_eq(struct mlx5_ib_
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; param = (struct mlx5_eq_param) { + .irq_index = MLX5_IRQ_EQ_CTRL, .nent = MLX5_IB_NUM_PF_EQE, }; param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; @@@ -1685,31 -1704,25 +1686,31 @@@ get_prefetchable_mr(struct ib_pd *pd, e u32 lkey) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr = NULL; + struct mlx5_ib_mkey *mmkey;
xa_lock(&dev->odp_mkeys); mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey)); - if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR) + if (!mmkey || mmkey->key != lkey) { + mr = ERR_PTR(-ENOENT); goto end; + } + if (mmkey->type != MLX5_MKEY_MR) { + mr = ERR_PTR(-EINVAL); + goto end; + }
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
if (mr->ibmr.pd != pd) { - mr = NULL; + mr = ERR_PTR(-EPERM); goto end; }
/* prefetch with write-access must be supported by the MR */ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && !mr->umem->writable) { - mr = NULL; + mr = ERR_PTR(-EPERM); goto end; }
@@@ -1741,7 -1754,7 +1742,7 @@@ static void mlx5_ib_prefetch_mr_work(st destroy_prefetch_work(work); }
-static bool init_prefetch_work(struct ib_pd *pd, +static int init_prefetch_work(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 pf_flags, struct prefetch_mr_work *work, struct ib_sge *sg_list, u32 num_sge) @@@ -1752,19 -1765,17 +1753,19 @@@ work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) { - work->frags[i].io_virt = sg_list[i].addr; - work->frags[i].length = sg_list[i].length; - work->frags[i].mr = - get_prefetchable_mr(pd, advice, sg_list[i].lkey); - if (!work->frags[i].mr) { + struct mlx5_ib_mr *mr; + + mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); + if (IS_ERR(mr)) { work->num_sge = i; - return false; + return PTR_ERR(mr); } + work->frags[i].io_virt = sg_list[i].addr; + work->frags[i].length = sg_list[i].length; + work->frags[i].mr = mr; } work->num_sge = num_sge; - return true; + return 0; }
static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, @@@ -1780,8 -1791,8 +1781,8 @@@ struct mlx5_ib_mr *mr;
mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); - if (!mr) - return -ENOENT; + if (IS_ERR(mr)) + return PTR_ERR(mr); ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, &bytes_mapped, pf_flags); if (ret < 0) { @@@ -1801,7 -1812,6 +1802,7 @@@ int mlx5_ib_advise_mr_prefetch(struct i { u32 pf_flags = 0; struct prefetch_mr_work *work; + int rc;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH) pf_flags |= MLX5_PF_FLAGS_DOWNGRADE; @@@ -1817,10 -1827,9 +1818,10 @@@ if (!work) return -ENOMEM;
- if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) { + rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge); + if (rc) { destroy_prefetch_work(work); - return -EINVAL; + return rc; } queue_work(system_unbound_wq, &work->work); return 0; diff --combined drivers/net/ethernet/chelsio/cxgb3/common.h index d115ec93296d,a309016f7f8c..ecd025dda8d6 --- a/drivers/net/ethernet/chelsio/cxgb3/common.h +++ b/drivers/net/ethernet/chelsio/cxgb3/common.h @@@ -676,6 -676,8 +676,6 @@@ void t3_link_changed(struct adapter *ad void t3_link_fault(struct adapter *adapter, int port_id); int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc); const struct adapter_info *t3_get_adapter_info(unsigned int board_id); -int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data); -int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data); int t3_seeprom_wp(struct adapter *adapter, int enable); int t3_get_tp_version(struct adapter *adapter, u32 *vers); int t3_check_tpsram_version(struct adapter *adapter); @@@ -708,7 -710,7 +708,7 @@@ int t3_mac_enable(struct cmac *mac, in int t3_mac_disable(struct cmac *mac, int which); int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu); int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev); - int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]); + int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]); int t3_mac_set_num_ucast(struct cmac *mac, int n); const struct mac_stats *t3_mac_update_stats(struct cmac *mac); int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); diff --combined drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index e185f5f24849,9cf9e33664e4..bfffcaeee624 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@@ -2036,16 -2036,20 +2036,16 @@@ static int get_eeprom(struct net_devic { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - int i, err = 0; - - u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); - if (!buf) - return -ENOMEM; + int cnt;
e->magic = EEPROM_MAGIC; - for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) - err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]); + cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data); + if (cnt < 0) + return cnt;
- if (!err) - memcpy(data, buf + e->offset, e->len); - kfree(buf); - return err; + e->len = cnt; + + return 0; }
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, @@@ -2054,6 -2058,7 +2054,6 @@@ struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; u32 aligned_offset, aligned_len; - __le32 *p; u8 *buf; int err;
@@@ -2067,9 -2072,12 +2067,9 @@@ buf = kmalloc(aligned_len, GFP_KERNEL); if (!buf) return -ENOMEM; - err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf); - if (!err && aligned_len > 4) - err = t3_seeprom_read(adapter, - aligned_offset + aligned_len - 4, - (__le32 *) & buf[aligned_len - 4]); - if (err) + err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len, + buf); + if (err < 0) goto out; memcpy(buf + (eeprom->offset & 3), data, eeprom->len); } else @@@ -2079,13 -2087,17 +2079,13 @@@ if (err) goto out;
- for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) { - err = t3_seeprom_write(adapter, aligned_offset, *p); - aligned_offset += 4; - } - - if (!err) + err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf); + if (err >= 0) err = t3_seeprom_wp(adapter, 1); out: if (buf != data) kfree(buf); - return err; + return err < 0 ? err : 0; }
static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@@ -2574,7 -2586,7 +2574,7 @@@ static int cxgb_set_mac_addr(struct net if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + eth_hw_addr_set(dev, addr->sa_data); t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr); if (offload_running(adapter)) write_smt_entry(adapter, pi->port_id); diff --combined drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index cb85c2f21525,53feac8da503..da41eee2f25c --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@@ -29,6 -29,7 +29,7 @@@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ + #include <linux/etherdevice.h> #include "common.h" #include "regs.h" #include "sge_defs.h" @@@ -595,9 -596,80 +596,9 @@@ struct t3_vpd u32 pad; /* for multiple-of-4 sizing and alignment */ };
-#define EEPROM_MAX_POLL 40 #define EEPROM_STAT_ADDR 0x4000 #define VPD_BASE 0xc00
-/** - * t3_seeprom_read - read a VPD EEPROM location - * @adapter: adapter to read - * @addr: EEPROM address - * @data: where to store the read data - * - * Read a 32-bit word from a location in VPD EEPROM using the card's PCI - * VPD ROM capability. A zero is written to the flag bit when the - * address is written to the control register. The hardware device will - * set the flag to 1 when 4 bytes have been read into the data register. - */ -int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data) -{ - u16 val; - int attempts = EEPROM_MAX_POLL; - u32 v; - unsigned int base = adapter->params.pci.vpd_cap_addr; - - if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) - return -EINVAL; - - pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr); - do { - udelay(10); - pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); - } while (!(val & PCI_VPD_ADDR_F) && --attempts); - - if (!(val & PCI_VPD_ADDR_F)) { - CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr); - return -EIO; - } - pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v); - *data = cpu_to_le32(v); - return 0; -} - -/** - * t3_seeprom_write - write a VPD EEPROM location - * @adapter: adapter to write - * @addr: EEPROM address - * @data: value to write - * - * Write a 32-bit word to a location in VPD EEPROM using the card's PCI - * VPD ROM capability. - */ -int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data) -{ - u16 val; - int attempts = EEPROM_MAX_POLL; - unsigned int base = adapter->params.pci.vpd_cap_addr; - - if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3)) - return -EINVAL; - - pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA, - le32_to_cpu(data)); - pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR, - addr | PCI_VPD_ADDR_F); - do { - msleep(1); - pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val); - } while ((val & PCI_VPD_ADDR_F) && --attempts); - - if (val & PCI_VPD_ADDR_F) { - CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr); - return -EIO; - } - return 0; -} - /** * t3_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter @@@ -607,14 -679,7 +608,14 @@@ */ int t3_seeprom_wp(struct adapter *adapter, int enable) { - return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); + u32 data = enable ? 0xc : 0; + int ret; + + /* EEPROM_STAT_ADDR is outside VPD area, use pci_write_vpd_any() */ + ret = pci_write_vpd_any(adapter->pdev, EEPROM_STAT_ADDR, sizeof(u32), + &data); + + return ret < 0 ? ret : 0; }
static int vpdstrtouint(char *s, u8 len, unsigned int base, unsigned int *val) @@@ -644,22 -709,24 +645,22 @@@ static int vpdstrtou16(char *s, u8 len */ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { - int i, addr, ret; struct t3_vpd vpd; + u8 base_val = 0; + int addr, ret;
/* * Card information is normally at VPD_BASE but some early cards had * it at 0. */ - ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd); - if (ret) + ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val); + if (ret < 0) return ret; - addr = vpd.id_tag == 0x82 ? VPD_BASE : 0; + addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : 0;
- for (i = 0; i < sizeof(vpd); i += 4) { - ret = t3_seeprom_read(adapter, addr + i, - (__le32 *)((u8 *)&vpd + i)); - if (ret) - return ret; - } + ret = pci_read_vpd(adapter->pdev, addr, sizeof(vpd), &vpd); + if (ret < 0) + return ret;
ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk); if (ret) @@@ -3692,8 -3759,7 +3693,7 @@@ int t3_prep_adapter(struct adapter *ada memcpy(hw_addr, adapter->params.vpd.eth_base, 5); hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr, - ETH_ALEN); + eth_hw_addr_set(adapter->port[i], hw_addr); init_link_config(&p->link_config, p->phy.caps); p->phy.ops->power_down(&p->phy, 1);
diff --combined drivers/net/ethernet/intel/ice/ice_ptp.c index d1ef3d48a4b0,a1be0d04a2d0..bf7247c6f58e --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@@ -6,6 -6,252 +6,252 @@@
#define E810_OUT_PROP_DELAY_NS 1
+ static const struct ptp_pin_desc ice_pin_desc_e810t[] = { + /* name idx func chan */ + { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, + { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, + { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, + { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, + { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, + }; + + /** + * ice_get_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Read the configuration of the SMA control logic and put it into the + * ptp_pin_desc structure + */ + static int + ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) + { + u8 data, i; + int status; + + /* Read initial pin state */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* initialize with defaults */ + for (i = 0; i < NUM_PTP_PINS_E810T; i++) { + snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name), + "%s", ice_pin_desc_e810t[i].name); + ptp_pins[i].index = ice_pin_desc_e810t[i].index; + ptp_pins[i].func = ice_pin_desc_e810t[i].func; + ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; + } + + /* Parse SMA1/UFL1 */ + switch (data & ICE_SMA1_MASK_E810T) { + case ICE_SMA1_MASK_E810T: + default: + ptp_pins[SMA1].func = PTP_PF_NONE; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_DIR_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_PEROUT; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case ICE_SMA1_TX_EN_E810T: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_NONE; + break; + case 0: + ptp_pins[SMA1].func = PTP_PF_EXTTS; + ptp_pins[UFL1].func = PTP_PF_PEROUT; + break; + } + + /* Parse SMA2/UFL2 */ + switch (data & ICE_SMA2_MASK_E810T) { + case ICE_SMA2_MASK_E810T: + default: + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_EXTTS; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_NONE; + break; + case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): + ptp_pins[SMA2].func = PTP_PF_NONE; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + case ICE_SMA2_DIR_EN_E810T: + ptp_pins[SMA2].func = PTP_PF_PEROUT; + ptp_pins[UFL2].func = PTP_PF_EXTTS; + break; + } + + return 0; + } + + /** + * ice_ptp_set_sma_config_e810t + * @hw: pointer to the hw struct + * @ptp_pins: pointer to the ptp_pin_desc struture + * + * Set the configuration of the SMA control logic based on the configuration in + * num_pins parameter + */ + static int + ice_ptp_set_sma_config_e810t(struct ice_hw *hw, + const struct ptp_pin_desc *ptp_pins) + { + int status; + u8 data; + + /* SMA1 and UFL1 cannot be set to TX at the same time */ + if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_PEROUT) + return -EINVAL; + + /* SMA2 and UFL2 cannot be set to RX at the same time */ + if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_EXTTS) + return -EINVAL; + + /* Read initial pin state value */ + status = ice_read_sma_ctrl_e810t(hw, &data); + if (status) + return status; + + /* Set the right sate based on the desired configuration */ + data &= ~ICE_SMA1_MASK_E810T; + if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); + data |= ICE_SMA1_MASK_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX"); + data |= ICE_SMA1_TX_EN_E810T; + } else if (ptp_pins[SMA1].func == PTP_PF_NONE && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + /* U.FL 1 TX will always enable SMA 1 RX */ + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && + ptp_pins[UFL1].func == PTP_PF_PEROUT) { + dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); + } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && + ptp_pins[UFL1].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA1 TX"); + data |= ICE_SMA1_DIR_EN_E810T; + } + + data &= ~ICE_SMA2_MASK_E810T; + if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); + data |= ICE_SMA2_MASK_E810T; + } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 RX"); + data |= (ICE_SMA2_TX_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_NONE && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "UFL2 RX"); + data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_NONE) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX"); + data |= (ICE_SMA2_DIR_EN_E810T | + ICE_SMA2_UFL2_RX_DIS_E810T); + } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && + ptp_pins[UFL2].func == PTP_PF_EXTTS) { + dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); + data |= ICE_SMA2_DIR_EN_E810T; + } + + return ice_write_sma_ctrl_e810t(hw, data); + } + + /** + * ice_ptp_set_sma_e810t + * @info: the driver's PTP info structure + * @pin: pin index in kernel structure + * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) + * + * Set the configuration of a single SMA pin + */ + static int + ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func) + { + struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; + struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_hw *hw = &pf->hw; + int err; + + if (pin < SMA1 || func > PTP_PF_PEROUT) + return -EOPNOTSUPP; + + err = ice_get_sma_config_e810t(hw, ptp_pins); + if (err) + return err; + + /* Disable the same function on the other pin sharing the channel */ + if (pin == SMA1 && ptp_pins[UFL1].func == func) + ptp_pins[UFL1].func = PTP_PF_NONE; + if (pin == UFL1 && ptp_pins[SMA1].func == func) + ptp_pins[SMA1].func = PTP_PF_NONE; + + if (pin == SMA2 && ptp_pins[UFL2].func == func) + ptp_pins[UFL2].func = PTP_PF_NONE; + if (pin == UFL2 && ptp_pins[SMA2].func == func) + ptp_pins[SMA2].func = PTP_PF_NONE; + + /* Set up new pin function in the temp table */ + ptp_pins[pin].func = func; + + return ice_ptp_set_sma_config_e810t(hw, ptp_pins); + } + + /** + * ice_verify_pin_e810t + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Verify if pin supports requested pin function. If the Check pins consistency. + * Reconfigure the SMA logic attached to the given pin to enable its + * desired functionality + */ + static int + ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) + { + /* Don't allow channel reassignment */ + if (chan != ice_pin_desc_e810t[pin].chan) + return -EOPNOTSUPP; + + /* Check if functions are properly assigned */ + switch (func) { + case PTP_PF_NONE: + break; + case PTP_PF_EXTTS: + if (pin == UFL1) + return -EOPNOTSUPP; + break; + case PTP_PF_PEROUT: + if (pin == UFL2 || pin == GNSS) + return -EOPNOTSUPP; + break; + case PTP_PF_PHYSYNC: + return -EOPNOTSUPP; + } + + return ice_ptp_set_sma_e810t(info, pin, func); + } + /** * ice_set_tx_tstamp - Enable or disable Tx timestamping * @pf: The PF pointer to search in @@@ -735,17 -981,34 +981,34 @@@ ice_ptp_gpio_enable_e810(struct ptp_clo { struct ice_pf *pf = ptp_info_to_pf(info); struct ice_perout_channel clk_cfg = {0}; + bool sma_pres = false; unsigned int chan; u32 gpio_pin; int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + sma_pres = true; + switch (rq->type) { case PTP_CLK_REQ_PEROUT: chan = rq->perout.index; - if (chan == PPS_CLK_GEN_CHAN) + if (sma_pres) { + if (chan == ice_pin_desc_e810t[SMA1].chan) + clk_cfg.gpio_pin = GPIO_20; + else if (chan == ice_pin_desc_e810t[SMA2].chan) + clk_cfg.gpio_pin = GPIO_22; + else + return -1; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + clk_cfg.gpio_pin = GPIO_20; + else + clk_cfg.gpio_pin = GPIO_22; + } else if (chan == PPS_CLK_GEN_CHAN) { clk_cfg.gpio_pin = PPS_PIN_INDEX; - else + } else { clk_cfg.gpio_pin = chan; + }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + rq->perout.period.nsec); @@@ -757,7 -1020,19 +1020,19 @@@ break; case PTP_CLK_REQ_EXTTS: chan = rq->extts.index; - gpio_pin = chan; + if (sma_pres) { + if (chan < ice_pin_desc_e810t[SMA2].chan) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else if (ice_is_e810t(&pf->hw)) { + if (chan == 0) + gpio_pin = GPIO_21; + else + gpio_pin = GPIO_23; + } else { + gpio_pin = chan; + }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, rq->extts.flags); @@@ -1012,7 -1287,7 +1287,7 @@@ int ice_ptp_set_ts_config(struct ice_p * The timestamp is in ns, so we must convert the result first. */ void - ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring, + ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { u32 ts_high; @@@ -1037,14 -1312,94 +1312,94 @@@ } }
+ /** + * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Disable the OS access to the SMA pins. Called to clear out the OS + * indications of pin support when we fail to setup the E810-T SMA control + * register. + */ + static void + ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) + { + struct device *dev = ice_pf_to_dev(pf); + + dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); + + info->enable = NULL; + info->verify = NULL; + info->n_pins = 0; + info->n_ext_ts = 0; + info->n_per_out = 0; + } + + /** + * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins + * @pf: pointer to the PF structure + * @info: PTP clock info structure + * + * Finish setting up the SMA pins by allocating pin_config, and setting it up + * according to the current status of the SMA. On failure, disable all of the + * extended SMA pin support. + */ + static void + ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) + { + struct device *dev = ice_pf_to_dev(pf); + int err; + + /* Allocate memory for kernel pins interface */ + info->pin_config = devm_kcalloc(dev, info->n_pins, + sizeof(*info->pin_config), GFP_KERNEL); + if (!info->pin_config) { + ice_ptp_disable_sma_pins_e810t(pf, info); + return; + } + + /* Read current SMA status */ + err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); + if (err) + ice_ptp_disable_sma_pins_e810t(pf, info); + } + + /** + * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs + * @pf: pointer to the PF instance + * @info: PTP clock capabilities + */ + static void + ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) + { + /* Check if SMA controller is in the netlist */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) && + !ice_is_pca9575_present(&pf->hw)) + ice_clear_feature_support(pf, ICE_F_SMA_CTRL); + + if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + info->n_ext_ts = N_EXT_TS_E810_NO_SMA; + info->n_per_out = N_PER_OUT_E810T_NO_SMA; + return; + } + + info->n_per_out = N_PER_OUT_E810T; + info->n_ext_ts = N_EXT_TS_E810; + info->n_pins = NUM_PTP_PINS_E810T; + info->verify = ice_verify_pin_e810t; + + /* Complete setup of the SMA pins */ + ice_ptp_setup_sma_pins_e810t(pf, info); + } + /** * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs * @info: PTP clock capabilities */ static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info) { - info->n_per_out = E810_N_PER_OUT; - info->n_ext_ts = E810_N_EXT_TS; + info->n_per_out = N_PER_OUT_E810; + info->n_ext_ts = N_EXT_TS_E810; }
/** @@@ -1062,7 -1417,10 +1417,10 @@@ ice_ptp_set_funcs_e810(struct ice_pf *p { info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info); + if (ice_is_e810t(&pf->hw)) + ice_ptp_setup_pins_e810t(pf, info); + else + ice_ptp_setup_pins_e810(info); }
/** @@@ -1571,9 -1929,6 +1929,9 @@@ err_kworker */ void ice_ptp_release(struct ice_pf *pf) { + if (!test_bit(ICE_FLAG_PTP, pf->flags)) + return; + /* Disable timestamping for both Tx and Rx */ ice_ptp_cfg_timestamp(pf, false);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 02db3148240a,da1bec04efff..eae9aa9c0811 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@@ -745,7 -745,7 +745,7 @@@ static int mlx5_fw_tracer_set_mtrc_conf MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY); MLX5_SET(mtrc_conf, in, log_trace_buffer_size, ilog2(TRACER_BUFFER_PAGE_NUM)); - MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key); + MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTRC_CONF, 0, 1); @@@ -1028,7 -1028,7 +1028,7 @@@ int mlx5_fw_tracer_init(struct mlx5_fw_
err_notifier_unregister: mlx5_eq_notifier_unregister(dev, &tracer->nb); - mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); + mlx5_core_destroy_mkey(dev, tracer->buff.mkey); err_dealloc_pd: mlx5_core_dealloc_pd(dev, tracer->buff.pdn); err_cancel_work: @@@ -1051,7 -1051,7 +1051,7 @@@ void mlx5_fw_tracer_cleanup(struct mlx5 if (tracer->owner) mlx5_fw_tracer_ownership_release(tracer);
- mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey); + mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey); mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn); }
@@@ -1069,7 -1069,6 +1069,6 @@@ void mlx5_fw_tracer_destroy(struct mlx5 mlx5_fw_tracer_clean_saved_traces_array(tracer); mlx5_fw_tracer_free_strings_db(tracer); mlx5_fw_tracer_destroy_log_buf(tracer); - flush_workqueue(tracer->work_queue); destroy_workqueue(tracer->work_queue); kvfree(tracer); } diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h index 7761daa25f63,a3a4fece0cac..5b023951136d --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@@ -220,8 -220,6 +220,6 @@@ struct mlx5e_umr_wqe struct mlx5_mtt inline_mtts[0]; };
- extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; - enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER, MLX5E_PFLAG_TX_CQE_BASED_MODER, @@@ -253,6 -251,9 +251,9 @@@ struct mlx5e_params u16 mode; u8 num_tc; struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; + struct { + struct mlx5e_mqprio_rl *rl; + } channel; } mqprio; bool rx_cqe_compress_def; bool tunneled_offload_en; @@@ -665,7 -666,7 +666,7 @@@ struct mlx5e_rq u8 wq_type; u32 rqn; struct mlx5_core_dev *mdev; - struct mlx5_core_mkey umr_mkey; + u32 umr_mkey; struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */ @@@ -879,6 -880,7 +880,7 @@@ struct mlx5e_priv #endif struct mlx5e_scratchpad scratchpad; struct mlx5e_htb htb; + struct mlx5e_mqprio_rl *mqprio_rl; };
struct mlx5e_rx_handlers { @@@ -918,6 -920,7 +920,7 @@@ void mlx5e_fold_sw_stats64(struct mlx5e
void mlx5e_init_l2_addr(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv); + int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, u64 *buf); void mlx5e_set_rx_mode_work(struct work_struct *work); @@@ -1003,7 -1006,8 +1006,8 @@@ int mlx5e_modify_sq(struct mlx5_core_de struct mlx5e_modify_sq_param *p); int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid); + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, + struct mlx5e_sq_stats *sq_stats); void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq); void mlx5e_free_txqsq(struct mlx5e_txqsq *sq); diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5fce9401ac74,f3dec58026d9..c4f3d648a7b6 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -234,7 -234,8 +234,7 @@@ static int mlx5e_rq_alloc_mpwqe_info(st }
static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, - u64 npages, u8 page_shift, - struct mlx5_core_mkey *umr_mkey, + u64 npages, u8 page_shift, u32 *umr_mkey, dma_addr_t filler_addr) { struct mlx5_mtt *mtt; @@@ -454,7 -455,7 +454,7 @@@ static int mlx5e_alloc_rq(struct mlx5e_ err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) goto err_rq_drop_page; - rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); + rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node); if (err) @@@ -486,7 -487,7 +486,7 @@@ if (err) goto err_rq_frags;
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key); + rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey); }
if (xsk) { @@@ -573,7 -574,7 +573,7 @@@ err_free_by_rq_type case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); err_rq_mkey: - mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); + mlx5_core_destroy_mkey(mdev, rq->umr_mkey); err_rq_drop_page: mlx5e_free_mpwqe_rq_drop_page(rq); break; @@@ -606,7 -607,7 +606,7 @@@ static void mlx5e_free_rq(struct mlx5e_ switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: kvfree(rq->mpwqe.info); - mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); + mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey); mlx5e_free_mpwqe_rq_drop_page(rq); break; default: /* MLX5_WQ_TYPE_CYCLIC */ @@@ -929,9 -930,10 +929,10 @@@ static int mlx5e_alloc_xdpsq_fifo(struc struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + size_t size;
- xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, - GFP_KERNEL, numa); + size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq); + xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa); if (!xdpi_fifo->xi) return -ENOMEM;
@@@ -945,10 -947,11 +946,11 @@@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + size_t size; int err;
- sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, - GFP_KERNEL, numa); + size = array_size(sizeof(*sq->db.wqe_info), wq_sz); + sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa); if (!sq->db.wqe_info) return -ENOMEM;
@@@ -1297,7 -1300,8 +1299,8 @@@ static int mlx5e_set_sq_maxrate(struct
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, struct mlx5e_params *params, struct mlx5e_sq_param *param, - struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid) + struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, + struct mlx5e_sq_stats *sq_stats) { struct mlx5e_create_sq_param csp = {}; u32 tx_rate; @@@ -1307,10 -1311,7 +1310,7 @@@ if (err) return err;
- if (qos_queue_group_id) - sq->stats = c->priv->htb.qos_sq_stats[qos_qid]; - else - sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; + sq->stats = sq_stats;
csp.tisn = tisn; csp.tis_lst_sz = 1; @@@ -1704,6 -1705,36 +1704,36 @@@ static void mlx5e_close_tx_cqs(struct m mlx5e_close_cq(&c->sq[tc].cq); }
+ static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq) + { + int tc; + + for (tc = 0; tc < TC_MAX_QUEUE; tc++) + if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count) + return tc; + + WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq); + return -ENOENT; + } + + static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix, + u32 *hw_id) + { + int tc; + + if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL || + !params->mqprio.channel.rl) { + *hw_id = 0; + return 0; + } + + tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix); + if (tc < 0) + return tc; + + return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id); + } + static int mlx5e_open_sqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@@ -1712,9 -1743,16 +1742,16 @@@
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { int txq_ix = c->ix + tc * params->num_channels; + u32 qos_queue_group_id; + + err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id); + if (err) + goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, - params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0); + params, &cparam->txq_sq, &c->sq[tc], tc, + qos_queue_group_id, + &c->priv->channel_stats[c->ix].sq[tc]); if (err) goto err_close_sqs; } @@@ -1990,7 -2028,7 +2027,7 @@@ static int mlx5e_open_channel(struct ml c->cpu = cpu; c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey); c->num_tc = mlx5e_get_dcb_num_tc(params); c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; @@@ -2339,6 -2377,13 +2376,13 @@@ static int mlx5e_update_netdev_queues(s netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); goto err_txqs; } + if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) { + if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + priv->mqprio_rl = priv->channels.params.mqprio.channel.rl; + }
return 0;
@@@ -2900,15 -2945,18 +2944,18 @@@ static void mlx5e_params_mqprio_dcb_set { params->mqprio.mode = TC_MQPRIO_MODE_DCB; params->mqprio.num_tc = num_tc; + params->mqprio.channel.rl = NULL; mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc, params->num_channels); }
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params, - struct tc_mqprio_qopt *qopt) + struct tc_mqprio_qopt *qopt, + struct mlx5e_mqprio_rl *rl) { params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL; params->mqprio.num_tc = qopt->num_tc; + params->mqprio.channel.rl = rl; mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt); }
@@@ -2968,9 -3016,13 +3015,13 @@@ static int mlx5e_mqprio_channel_validat netdev_err(netdev, "Min tx rate is not supported\n"); return -EINVAL; } + if (mqprio->max_rate[i]) { - netdev_err(netdev, "Max tx rate is not supported\n"); - return -EINVAL; + int err; + + err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]); + if (err) + return err; }
if (mqprio->qopt.offset[i] != agg_count) { @@@ -2989,11 -3041,22 +3040,22 @@@ return 0; }
+ static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio) + { + int tc; + + for (tc = 0; tc < mqprio->qopt.num_tc; tc++) + if (mqprio->max_rate[tc]) + return true; + return false; + } + static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, struct tc_mqprio_qopt_offload *mqprio) { mlx5e_fp_preactivate preactivate; struct mlx5e_params new_params; + struct mlx5e_mqprio_rl *rl; bool nch_changed; int err;
@@@ -3001,13 -3064,32 +3063,32 @@@ if (err) return err;
+ rl = NULL; + if (mlx5e_mqprio_rate_limit(mqprio)) { + rl = mlx5e_mqprio_rl_alloc(); + if (!rl) + return -ENOMEM; + err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc, + mqprio->max_rate); + if (err) { + mlx5e_mqprio_rl_free(rl); + return err; + } + } + new_params = priv->channels.params; - mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt); + mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1; preactivate = nch_changed ? mlx5e_num_channels_changed_ctx : mlx5e_update_netdev_queues_ctx; - return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true); + if (err && rl) { + mlx5e_mqprio_rl_cleanup(rl); + mlx5e_mqprio_rl_free(rl); + } + + return err; }
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, @@@ -3225,7 -3307,7 +3306,7 @@@ static int mlx5e_set_mac(struct net_dev return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev); - ether_addr_copy(netdev->dev_addr, saddr->sa_data); + eth_hw_addr_set(netdev, saddr->sa_data); netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv); @@@ -4350,13 -4432,17 +4431,17 @@@ void mlx5e_build_nic_params(struct mlx5 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + u8 addr[ETH_ALEN];
- mlx5_query_mac_address(priv->mdev, netdev->dev_addr); - if (is_zero_ether_addr(netdev->dev_addr) && + mlx5_query_mac_address(priv->mdev, addr); + if (is_zero_ether_addr(addr) && !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) { eth_hw_addr_random(netdev); mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr); + return; } + + eth_hw_addr_set(netdev, addr); }
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table, @@@ -4861,6 -4947,11 +4946,11 @@@ void mlx5e_priv_cleanup(struct mlx5e_pr kfree(priv->htb.qos_sq_stats[i]); kvfree(priv->htb.qos_sq_stats);
+ if (priv->mqprio_rl) { + mlx5e_mqprio_rl_cleanup(priv->mqprio_rl); + mlx5e_mqprio_rl_free(priv->mqprio_rl); + } + memset(priv, 0, sizeof(*priv)); }
diff --combined drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 71a08f84d49d,873efde0d458..386ab9a2d490 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@@ -99,9 -99,6 +99,9 @@@ #define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1
+#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1 +#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1 + #define BY_PASS_PRIO_NUM_LEVELS 1 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) @@@ -209,63 -206,34 +209,63 @@@ static struct init_tree_node egress_roo } };
-#define RDMA_RX_BYPASS_PRIO 0 -#define RDMA_RX_KERNEL_PRIO 1 +enum { + RDMA_RX_COUNTERS_PRIO, + RDMA_RX_BYPASS_PRIO, + RDMA_RX_KERNEL_PRIO, +}; + +#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS +#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1) +#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2) + static struct init_tree_node rdma_rx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 2, + .ar_size = 3, .children = (struct init_tree_node[]) { + [RDMA_RX_COUNTERS_PRIO] = + ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS, + RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))), [RDMA_RX_BYPASS_PRIO] = - ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0, + ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), [RDMA_RX_KERNEL_PRIO] = - ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0, + ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0, FS_CHAINING_CAPS, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, ADD_MULTIPLE_PRIO(1, 1))), } };
+enum { + RDMA_TX_COUNTERS_PRIO, + RDMA_TX_BYPASS_PRIO, +}; + +#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS +#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) + static struct init_tree_node rdma_tx_root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 1, + .ar_size = 2, .children = (struct init_tree_node[]) { - ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, + [RDMA_TX_COUNTERS_PRIO] = + ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS, + RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))), + [RDMA_TX_BYPASS_PRIO] = + ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0, FS_CHAINING_CAPS_RDMA_TX, ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, - ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL, BY_PASS_PRIO_NUM_LEVELS))), } }; @@@ -2223,6 -2191,10 +2223,10 @@@ struct mlx5_flow_namespace *mlx5_get_fl if (steering->fdb_root_ns) return &steering->fdb_root_ns->ns; return NULL; + case MLX5_FLOW_NAMESPACE_PORT_SEL: + if (steering->port_sel_root_ns) + return &steering->port_sel_root_ns->ns; + return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_RX: if (steering->sniffer_rx_root_ns) return &steering->sniffer_rx_root_ns->ns; @@@ -2247,12 -2219,6 +2251,12 @@@ prio = RDMA_RX_KERNEL_PRIO; } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) { root_ns = steering->rdma_tx_root_ns; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) { + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_COUNTERS_PRIO; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) { + root_ns = steering->rdma_tx_root_ns; + prio = RDMA_TX_COUNTERS_PRIO; } else { /* Must be NIC RX */ root_ns = steering->root_ns; prio = type; @@@ -2634,6 -2600,7 +2638,7 @@@ void mlx5_cleanup_fs(struct mlx5_core_d steering->fdb_root_ns = NULL; kfree(steering->fdb_sub_ns); steering->fdb_sub_ns = NULL; + cleanup_root_ns(steering->port_sel_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); cleanup_root_ns(steering->rdma_rx_root_ns); @@@ -2672,6 -2639,21 +2677,21 @@@ static int init_sniffer_rx_root_ns(stru return PTR_ERR_OR_ZERO(prio); }
+ #define PORT_SEL_NUM_LEVELS 3 + static int init_port_sel_root_ns(struct mlx5_flow_steering *steering) + { + struct fs_prio *prio; + + steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL); + if (!steering->port_sel_root_ns) + return -ENOMEM; + + /* Create single prio */ + prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0, + PORT_SEL_NUM_LEVELS); + return PTR_ERR_OR_ZERO(prio); + } + static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering) { int err; @@@ -3058,6 -3040,12 +3078,12 @@@ int mlx5_init_fs(struct mlx5_core_dev * goto err; }
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) { + err = init_port_sel_root_ns(steering); + if (err) + goto err; + } + if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) && MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) { err = init_rdma_rx_root_ns(steering); @@@ -3262,6 -3250,52 +3288,52 @@@ void mlx5_packet_reformat_dealloc(struc } EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+ int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer) + { + return definer->id; + } + + struct mlx5_flow_definer * + mlx5_create_match_definer(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type ns_type, u16 format_id, + u32 *match_mask) + { + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_definer *definer; + int id; + + root = get_root_namespace(dev, ns_type); + if (!root) + return ERR_PTR(-EOPNOTSUPP); + + definer = kzalloc(sizeof(*definer), GFP_KERNEL); + if (!definer) + return ERR_PTR(-ENOMEM); + + definer->ns_type = ns_type; + id = root->cmds->create_match_definer(root, format_id, match_mask); + if (id < 0) { + mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id); + kfree(definer); + return ERR_PTR(id); + } + definer->id = id; + return definer; + } + + void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, + struct mlx5_flow_definer *definer) + { + struct mlx5_flow_root_namespace *root; + + root = get_root_namespace(dev, definer->ns_type); + if (WARN_ON(!root)) + return; + + root->cmds->destroy_match_definer(root, definer->id); + kfree(definer); + } + int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns) { diff --combined drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 6c67185c05c1,73fed94af09a..3f47d2b3b6e6 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@@ -4,7 -4,7 +4,7 @@@ #ifndef _DR_TYPES_ #define _DR_TYPES_
- #include <linux/mlx5/driver.h> + #include <linux/mlx5/vport.h> #include <linux/refcount.h> #include "fs_core.h" #include "wq.h" @@@ -14,7 -14,6 +14,6 @@@
#define DR_RULE_MAX_STES 18 #define DR_ACTION_MAX_STES 5 - #define WIRE_PORT 0xFFFF #define DR_STE_SVLAN 0x1 #define DR_STE_CVLAN 0x2 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) @@@ -752,9 -751,9 +751,9 @@@ struct mlx5dr_esw_caps struct mlx5dr_cmd_vport_cap { u16 vport_gvmi; u16 vhca_gvmi; + u16 num; u64 icm_address_rx; u64 icm_address_tx; - u32 num; };
struct mlx5dr_roce_cap { @@@ -763,6 -762,11 +762,11 @@@ u8 fl_rc_qp_when_roce_enabled:1; };
+ struct mlx5dr_vports { + struct mlx5dr_cmd_vport_cap esw_manager_caps; + struct xarray vports_caps_xa; + }; + struct mlx5dr_cmd_caps { u16 gvmi; u64 nic_rx_drop_address; @@@ -786,7 -790,6 +790,6 @@@ u8 flex_parser_id_gtpu_first_ext_dw_0; u8 max_ft_level; u16 roce_min_src_udp; - u8 num_esw_ports; u8 sw_format_ver; bool eswitch_manager; bool rx_sw_owner; @@@ -795,11 -798,11 +798,11 @@@ u8 rx_sw_owner_v2:1; u8 tx_sw_owner_v2:1; u8 fdb_sw_owner_v2:1; - u32 num_vports; struct mlx5dr_esw_caps esw_caps; - struct mlx5dr_cmd_vport_cap *vports_caps; + struct mlx5dr_vports vports; bool prio_tag_required; struct mlx5dr_roce_cap roce_caps; + u8 is_ecpf:1; u8 isolate_vl_tc:1; };
@@@ -826,10 -829,6 +829,6 @@@ struct mlx5dr_domain_info struct mlx5dr_cmd_caps caps; };
- struct mlx5dr_domain_cache { - struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft; - }; - struct mlx5dr_domain { struct mlx5dr_domain *peer_dmn; struct mlx5_core_dev *mdev; @@@ -841,7 -840,7 +840,7 @@@ struct mlx5dr_icm_pool *action_icm_pool; struct mlx5dr_send_ring *send_ring; struct mlx5dr_domain_info info; - struct mlx5dr_domain_cache cache; + struct xarray csum_fts_xa; struct mlx5dr_ste_ctx *ste_ctx; };
@@@ -942,7 -941,7 +941,7 @@@ struct mlx5dr_action_dest_tbl
struct mlx5dr_action_ctr { u32 ctr_id; - u32 offeset; + u32 offset; };
struct mlx5dr_action_vport { @@@ -1102,18 -1101,8 +1101,8 @@@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ return true; }
- static inline struct mlx5dr_cmd_vport_cap * - mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) - { - if (!caps->vports_caps || - (vport >= caps->num_vports && vport != WIRE_PORT)) - return NULL; - - if (vport == WIRE_PORT) - vport = caps->num_vports; - - return &caps->vports_caps[vport]; - } + struct mlx5dr_cmd_vport_cap * + mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
struct mlx5dr_cmd_query_flow_table_details { u8 status; @@@ -1154,7 -1143,7 +1143,7 @@@ int mlx5dr_cmd_set_fte_modify_and_vport u32 table_id, u32 group_id, u32 modify_header_id, - u32 vport_id); + u16 vport_id); int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, u32 table_type, u32 table_id); @@@ -1275,7 -1264,7 +1264,7 @@@ struct mlx5dr_cq
struct mlx5dr_mr { struct mlx5_core_dev *mdev; - struct mlx5_core_mkey mkey; + u32 mkey; dma_addr_t dma_addr; void *addr; size_t size; @@@ -1372,12 -1361,12 +1361,12 @@@ struct mlx5dr_fw_recalc_cs_ft };
struct mlx5dr_fw_recalc_cs_ft * - mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num); + mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num); void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft); - int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, - u32 vport_num, - u64 *rx_icm_addr); + int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, + u16 vport_num, + u64 *rx_icm_addr); int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_cmd_flow_destination_hw_info *dest, int num_dest, diff --combined drivers/net/ethernet/microchip/lan743x_main.c index c4f32c7e0db1,03d02403c19e..9d1091237567 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@@ -816,7 -816,7 +816,7 @@@ static int lan743x_mac_init(struct lan7 eth_random_addr(adapter->mac_address); } lan743x_mac_set_address(adapter, adapter->mac_address); - ether_addr_copy(netdev->dev_addr, adapter->mac_address); + eth_hw_addr_set(netdev, adapter->mac_address);
return 0; } @@@ -1743,16 -1743,6 +1743,16 @@@ static int lan743x_tx_ring_init(struct ret = -EINVAL; goto cleanup; } + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, + DMA_BIT_MASK(64))) { + if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev, + DMA_BIT_MASK(32))) { + dev_warn(&tx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; + } + } ring_allocation_size = ALIGN(tx->ring_size * sizeof(struct lan743x_tx_descriptor), PAGE_SIZE); @@@ -2286,16 -2276,6 +2286,16 @@@ static int lan743x_rx_ring_init(struct ret = -EINVAL; goto cleanup; } + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, + DMA_BIT_MASK(64))) { + if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev, + DMA_BIT_MASK(32))) { + dev_warn(&rx->adapter->pdev->dev, + "lan743x_: No suitable DMA available\n"); + ret = -ENOMEM; + goto cleanup; + } + } ring_allocation_size = ALIGN(rx->ring_size * sizeof(struct lan743x_rx_descriptor), PAGE_SIZE); @@@ -2665,7 -2645,7 +2665,7 @@@ static int lan743x_netdev_set_mac_addre ret = eth_prepare_mac_addr_change(netdev, sock_addr); if (ret) return ret; - ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); + eth_hw_addr_set(netdev, sock_addr->sa_data); lan743x_mac_set_address(adapter, sock_addr->sa_data); lan743x_rfe_update_mac_address(adapter); return 0; @@@ -3039,8 -3019,6 +3039,8 @@@ static int lan743x_pm_resume(struct dev if (ret) { netif_err(adapter, probe, adapter->netdev, "lan743x_hardware_init returned %d\n", ret); + lan743x_pci_cleanup(adapter); + return ret; }
/* open netdev when netdev is at running state while resume. diff --combined drivers/net/ethernet/nxp/lpc_eth.c index c910fa2f40a4,a63cc295b979..bc39558fe82b --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@@ -419,7 -419,7 +419,7 @@@ struct netdata_local /* * MAC support functions */ - static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac) + static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac) { u32 tmp;
@@@ -1015,6 -1015,9 +1015,6 @@@ static int lpc_eth_close(struct net_dev napi_disable(&pldat->napi); netif_stop_queue(ndev);
- if (ndev->phydev) - phy_stop(ndev->phydev); - spin_lock_irqsave(&pldat->lock, flags); __lpc_eth_reset(pldat); netif_carrier_off(ndev); @@@ -1022,8 -1025,6 +1022,8 @@@ writel(0, LPC_ENET_MAC2(pldat->net_base)); spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev) + phy_stop(ndev->phydev); clk_disable_unprepare(pldat->clk);
return 0; @@@ -1092,7 -1093,7 +1092,7 @@@ static int lpc_set_mac_address(struct n
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; - memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@@ -1231,6 -1232,7 +1231,7 @@@ static int lpc_eth_drv_probe(struct pla struct net_device *ndev; dma_addr_t dma_handle; struct resource *res; + u8 addr[ETH_ALEN]; int irq, ret;
/* Setup network interface for RMII or MII mode */ @@@ -1346,10 -1348,11 +1347,11 @@@ pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */ - __lpc_get_mac(pldat, ndev->dev_addr); + __lpc_get_mac(pldat, addr); + eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) { - of_get_mac_address(np, ndev->dev_addr); + of_get_ethdev_address(np, ndev); } if (!is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); diff --combined drivers/net/ethernet/smsc/smc91x.c index e4fc6484faa8,a31c159e96ea..96a9400b5737 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@@ -1851,6 -1851,7 +1851,7 @@@ static int smc_probe(struct net_device int retval; unsigned int val, revision_register; const char *version_string; + u8 addr[ETH_ALEN];
DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
@@@ -1922,7 -1923,8 +1923,8 @@@
/* Get the MAC address */ SMC_SELECT_BANK(lp, 1); - SMC_GET_MAC_ADDR(lp, dev->dev_addr); + SMC_GET_MAC_ADDR(lp, addr); + eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */ smc_reset(dev); @@@ -2190,7 -2192,6 +2192,7 @@@ static const struct of_device_id smc91x }; MODULE_DEVICE_TABLE(of, smc91x_match);
+#if defined(CONFIG_GPIOLIB) /** * try_toggle_control_gpio - configure a gpio if it exists * @dev: net device @@@ -2221,15 -2222,6 +2223,15 @@@ static int try_toggle_control_gpio(stru
return 0; } +#else +static int try_toggle_control_gpio(struct device *dev, + struct gpio_desc **desc, + const char *name, int index, + int value, unsigned int nsdelay) +{ + return 0; +} +#endif #endif
/* diff --combined drivers/net/usb/lan78xx.c index 63cd72c5f580,03319fdb5235..f20376c1ef3f --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@@ -1817,7 -1817,7 +1817,7 @@@ static void lan78xx_init_mac_address(st lan78xx_write_reg(dev, MAF_LO(0), addr_lo); lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr); + eth_hw_addr_set(dev->net, addr); }
/* MDIO read and write wrappers for phylib */ @@@ -2416,7 -2416,7 +2416,7 @@@ static int lan78xx_set_mac_addr(struct if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data); + eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] | netdev->dev_addr[1] << 8 | @@@ -4122,12 -4122,6 +4122,12 @@@ static int lan78xx_probe(struct usb_int
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ /* Reject broken descriptors. */ + if (dev->maxpacket == 0) { + ret = -ENODEV; + goto out4; + } + /* driver requires remote-wakeup capability during autosuspend. */ intf->needs_remote_wakeup = 1;
diff --combined drivers/net/xen-netfront.c index fc41ba95f81d,57437e4b8a94..911f43986a8c --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@@ -1730,10 -1730,6 +1730,10 @@@ static int netfront_resume(struct xenbu
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ netif_tx_lock_bh(info->netdev); + netif_device_detach(info->netdev); + netif_tx_unlock_bh(info->netdev); + xennet_disconnect_backend(info); return 0; } @@@ -2161,6 -2157,7 +2161,7 @@@ static int talk_to_netback(struct xenbu unsigned int max_queues = 0; struct netfront_queue *queue = NULL; unsigned int num_queues = 1; + u8 addr[ETH_ALEN];
info->netdev->irq = 0;
@@@ -2174,11 -2171,12 +2175,12 @@@ "feature-split-event-channels", 0);
/* Read mac addr. */ - err = xen_net_read_mac(dev, info->netdev->dev_addr); + err = xen_net_read_mac(dev, addr); if (err) { xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); goto out_unlocked; } + eth_hw_addr_set(info->netdev, addr);
info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, "feature-xdp-headroom", 0); @@@ -2353,10 -2351,6 +2355,10 @@@ static int xennet_connect(struct net_de * domain a kick because we've probably just requeued some * packets. */ + netif_tx_lock_bh(np->netdev); + netif_device_attach(np->netdev); + netif_tx_unlock_bh(np->netdev); + netif_carrier_on(np->netdev); for (j = 0; j < num_queues; ++j) { queue = &np->queues[j]; diff --combined drivers/soc/fsl/dpio/dpio-service.c index 4d119e98ce7c,3fd0d0840287..1d2b27e3ea63 --- a/drivers/soc/fsl/dpio/dpio-service.c +++ b/drivers/soc/fsl/dpio/dpio-service.c @@@ -12,6 -12,7 +12,7 @@@ #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> + #include <linux/dim.h> #include <linux/slab.h>
#include "dpio.h" @@@ -28,6 -29,14 +29,14 @@@ struct dpaa2_io spinlock_t lock_notifications; struct list_head notifications; struct device *dev; + + /* Net DIM */ + struct dim rx_dim; + /* protect against concurrent Net DIM updates */ + spinlock_t dim_lock; + u16 event_ctr; + u64 bytes; + u64 frames; };
struct dpaa2_io_store { @@@ -59,7 -68,7 +68,7 @@@ static inline struct dpaa2_io *service_ * potentially being migrated away. */ if (cpu < 0) - cpu = smp_processor_id(); + cpu = raw_smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */ return dpio_by_cpu[cpu]; @@@ -100,6 -109,17 +109,17 @@@ struct dpaa2_io *dpaa2_io_service_selec } EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+ static void dpaa2_io_dim_work(struct work_struct *w) + { + struct dim *dim = container_of(w, struct dim, work); + struct dim_cq_moder moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim); + + dpaa2_io_set_irq_coalescing(d, moder.usec); + dim->state = DIM_START_MEASURE; + } + /** * dpaa2_io_create() - create a dpaa2_io object. * @desc: the dpaa2_io descriptor @@@ -114,6 -134,7 +134,7 @@@ struct dpaa2_io *dpaa2_io_create(const struct device *dev) { struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL); + u32 qman_256_cycles_per_ns;
if (!obj) return NULL; @@@ -127,7 -148,15 +148,15 @@@ obj->dpio_desc = *desc; obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena; obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh; + obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk; obj->swp_desc.qman_version = obj->dpio_desc.qman_version; + + /* Compute how many 256 QBMAN cycles fit into one ns. This is because + * the interrupt timeout period register needs to be specified in QBMAN + * clock cycles in increments of 256. + */ + qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000); + obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns; obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) { @@@ -138,6 -167,7 +167,7 @@@ INIT_LIST_HEAD(&obj->node); spin_lock_init(&obj->lock_mgmt_cmd); spin_lock_init(&obj->lock_notifications); + spin_lock_init(&obj->dim_lock); INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */ @@@ -155,6 -185,12 +185,12 @@@
obj->dev = dev;
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim)); + INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work); + obj->event_ctr = 0; + obj->bytes = 0; + obj->frames = 0; + return obj; }
@@@ -194,6 -230,8 +230,8 @@@ irqreturn_t dpaa2_io_irq(struct dpaa2_i struct qbman_swp *swp; u32 status;
+ obj->event_ctr++; + swp = obj->swp; status = qbman_swp_interrupt_read_status(swp); if (!status) @@@ -462,7 -500,7 +500,7 @@@ int dpaa2_io_service_enqueue_multiple_f qbman_eq_desc_set_no_orp(&ed, 0); qbman_eq_desc_set_fq(&ed, fqid);
- return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb); + return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb); } EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
@@@ -779,3 -817,82 +817,82 @@@ int dpaa2_io_query_bp_count(struct dpaa return 0; } EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count); + + /** + * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values + * @d: the given DPIO object + * @irq_holdoff: interrupt holdoff (timeout) period in us + * + * Return 0 for success, or negative error code on error. + */ + int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff) + { + struct qbman_swp *swp = d->swp; + + return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1, + irq_holdoff); + } + EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing); + + /** + * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters + * @d: the given DPIO object + * @irq_holdoff: interrupt holdoff (timeout) period in us + */ + void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff) + { + struct qbman_swp *swp = d->swp; + + qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff); + } + EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing); + + /** + * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing + * @d: the given DPIO object + * @use_adaptive_rx_coalesce: adaptive coalescing state + */ + void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d, + int use_adaptive_rx_coalesce) + { + d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce; + } + EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing); + + /** + * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state + * @d: the given DPIO object + * + * Return 1 when adaptive coalescing is enabled on the DPIO object and 0 + * otherwise. + */ + int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d) + { + return d->swp->use_adaptive_rx_coalesce; + } + EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing); + + /** + * dpaa2_io_update_net_dim() - Update Net DIM + * @d: the given DPIO object + * @frames: how many frames have been dequeued by the user since the last call + * @bytes: how many bytes have been dequeued by the user since the last call + */ + void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes) + { + struct dim_sample dim_sample = {}; + + if (!d->swp->use_adaptive_rx_coalesce) + return; + + spin_lock(&d->dim_lock); + + d->bytes += bytes; + d->frames += frames; + + dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample); + net_dim(&d->rx_dim, dim_sample); + + spin_unlock(&d->dim_lock); + } + EXPORT_SYMBOL(dpaa2_io_update_net_dim); diff --combined drivers/soc/fsl/dpio/qbman-portal.c index c4282cc7b391,3474bf5f88d5..1397ec21873a --- a/drivers/soc/fsl/dpio/qbman-portal.c +++ b/drivers/soc/fsl/dpio/qbman-portal.c @@@ -29,6 -29,7 +29,7 @@@ #define QBMAN_CINH_SWP_EQCR_AM_RT 0x980 #define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0 #define QBMAN_CINH_SWP_DQPI 0xa00 + #define QBMAN_CINH_SWP_DQRR_ITR 0xa80 #define QBMAN_CINH_SWP_DCAP 0xac0 #define QBMAN_CINH_SWP_SDQCR 0xb00 #define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40 @@@ -38,6 -39,7 +39,7 @@@ #define QBMAN_CINH_SWP_IER 0xe40 #define QBMAN_CINH_SWP_ISDR 0xe80 #define QBMAN_CINH_SWP_IIR 0xec0 + #define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */ #define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6)) @@@ -355,6 -357,9 +357,9 @@@ struct qbman_swp *qbman_swp_init(const & p->eqcr.pi_ci_mask; p->eqcr.available = p->eqcr.pi_ring_size;
+ /* Initialize the software portal with a irq timeout period of 0us */ + qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0); + return p; }
@@@ -674,9 -679,9 +679,9 @@@ int qbman_swp_enqueue_multiple_direct(s for (i = 0; i < num_enqueued; i++) { p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); /* Skip copying the verb */ - memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); - memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], - &fd[i], sizeof(*fd)); + memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)], + &fd[i], sizeof(*fd)); eqcr_pi++; }
@@@ -688,9 -693,9 +693,9 @@@ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); p[0] = cl[0] | s->eqcr.pi_vb; if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { - struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; + struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | + eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); } eqcr_pi++; @@@ -732,7 -737,8 +737,7 @@@ int qbman_swp_enqueue_multiple_mem_back int i, num_enqueued = 0; unsigned long irq_flags;
- spin_lock(&s->access_spinlock); - local_irq_save(irq_flags); + spin_lock_irqsave(&s->access_spinlock, irq_flags);
half_mask = (s->eqcr.pi_ci_mask>>1); full_mask = s->eqcr.pi_ci_mask; @@@ -743,7 -749,8 +748,7 @@@ s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size, eqcr_ci, s->eqcr.ci); if (!s->eqcr.available) { - local_irq_restore(irq_flags); - spin_unlock(&s->access_spinlock); + spin_unlock_irqrestore(&s->access_spinlock, irq_flags); return 0; } } @@@ -756,9 -763,9 +761,9 @@@ for (i = 0; i < num_enqueued; i++) { p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); /* Skip copying the verb */ - memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); - memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], - &fd[i], sizeof(*fd)); + memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)], + &fd[i], sizeof(*fd)); eqcr_pi++; }
@@@ -768,9 -775,9 +773,9 @@@ p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); p[0] = cl[0] | s->eqcr.pi_vb; if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { - struct qbman_eq_desc *d = (struct qbman_eq_desc *)p; + struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | + eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) | ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK); } eqcr_pi++; @@@ -782,7 -789,8 +787,7 @@@ dma_wmb(); qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI, (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb); - local_irq_restore(irq_flags); - spin_unlock(&s->access_spinlock); + spin_unlock_irqrestore(&s->access_spinlock, irq_flags);
return num_enqueued; } @@@ -829,9 -837,9 +834,9 @@@ int qbman_swp_enqueue_multiple_desc_dir p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); cl = (uint32_t *)(&d[i]); /* Skip copying the verb */ - memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); - memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], - &fd[i], sizeof(*fd)); + memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)], + &fd[i], sizeof(*fd)); eqcr_pi++; }
@@@ -899,9 -907,9 +904,9 @@@ int qbman_swp_enqueue_multiple_desc_mem p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); cl = (uint32_t *)(&d[i]); /* Skip copying the verb */ - memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); - memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)], - &fd[i], sizeof(*fd)); + memcpy_toio((__iomem void *)&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1); + memcpy_toio((__iomem void *)&p[EQ_DESC_SIZE_FD_START / sizeof(uint32_t)], + &fd[i], sizeof(*fd)); eqcr_pi++; }
@@@ -1793,3 -1801,56 +1798,56 @@@ u32 qbman_bp_info_num_free_bufs(struct { return le32_to_cpu(a->fill); } + + /** + * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values + * @p: the software portal object + * @irq_threshold: interrupt threshold + * @irq_holdoff: interrupt holdoff (timeout) period in us + * + * Return 0 for success, or negative error code on error. + */ + int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold, + u32 irq_holdoff) + { + u32 itp, max_holdoff; + + /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles + * increments. This depends on the QBMAN internal frequency. + */ + itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns; + if (itp > 4096) { + max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000; + pr_err("irq_holdoff must be <= %uus\n", max_holdoff); + return -EINVAL; + } + + if (irq_threshold >= p->dqrr.dqrr_size) { + pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1); + return -EINVAL; + } + + p->irq_threshold = irq_threshold; + p->irq_holdoff = irq_holdoff; + + qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold); + qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp); + + return 0; + } + + /** + * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters + * @p: the software portal object + * @irq_threshold: interrupt threshold (an IRQ is generated when there are more + * DQRR entries in the portal than the threshold) + * @irq_holdoff: interrupt holdoff (timeout) period in us + */ + void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold, + u32 *irq_holdoff) + { + if (irq_threshold) + *irq_threshold = p->irq_threshold; + if (irq_holdoff) + *irq_holdoff = p->irq_holdoff; + } diff --combined include/linux/bpf.h index 3db6f6c95489,1c7fd7c4c6d3..e6f5579f9356 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@@ -48,6 -48,7 +48,7 @@@ extern struct idr btf_idr extern spinlock_t btf_idr_lock; extern struct kobject *btf_kobj;
+ typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, struct bpf_iter_aux_info *aux); typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); @@@ -142,7 -143,8 +143,8 @@@ struct bpf_map_ops int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, struct bpf_func_state *caller, struct bpf_func_state *callee); - int (*map_for_each_callback)(struct bpf_map *map, void *callback_fn, + int (*map_for_each_callback)(struct bpf_map *map, + bpf_callback_t callback_fn, void *callback_ctx, u64 flags);
/* BTF name and id of struct allocated by map_alloc */ @@@ -929,11 -931,8 +931,11 @@@ struct bpf_array_aux * stored in the map to make sure that all callers and callees have * the same prog type and JITed flag. */ - enum bpf_prog_type type; - bool jited; + struct { + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + } owner; /* Programs with direct jumps into programs part of this array. */ struct list_head poke_progs; struct bpf_map *map; @@@ -1092,6 -1091,7 +1094,7 @@@ bool bpf_prog_array_compatible(struct b int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); + const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void);
typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, unsigned long off, unsigned long len); @@@ -2220,6 -2220,8 +2223,8 @@@ int bpf_arch_text_poke(void *ip, enum b struct btf_id_set; bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+ #define MAX_BPRINTF_VARARGS 12 + int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, u32 **bin_buf, u32 num_args); void bpf_bprintf_cleanup(void); diff --combined include/linux/filter.h index ef03ff34234d,47f80adbe744..8231a6a257f6 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -360,10 -360,9 +360,9 @@@ static inline bool insn_is_zext(const s .off = 0, \ .imm = TGT })
- /* Function call */ + /* Convert function address to BPF immediate */
- #define BPF_CAST_CALL(x) \ - ((u64 (*)(u64, u64, u64, u64, u64))(x)) + #define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base)
#define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ @@@ -371,7 -370,7 +370,7 @@@ .dst_reg = 0, \ .src_reg = 0, \ .off = 0, \ - .imm = ((FUNC) - __bpf_call_base) }) + .imm = BPF_CALL_IMM(FUNC) })
/* Raw code statement block */
@@@ -1051,7 -1050,6 +1050,7 @@@ extern int bpf_jit_enable extern int bpf_jit_harden; extern int bpf_jit_kallsyms; extern long bpf_jit_limit; +extern long bpf_jit_limit_max;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
diff --combined include/linux/mlx5/device.h index ed0230ff9422,f8a0bbb42c3b..ed0254b01b71 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@@ -541,19 -541,21 +541,21 @@@ struct mlx5_cmd_layout u8 status_own; };
- enum mlx5_fatal_assert_bit_offsets { - MLX5_RFR_OFFSET = 31, + enum mlx5_rfr_severity_bit_offsets { + MLX5_RFR_BIT_OFFSET = 0x7, };
struct health_buffer { - __be32 assert_var[5]; - __be32 rsvd0[3]; + __be32 assert_var[6]; + __be32 rsvd0[2]; __be32 assert_exit_ptr; __be32 assert_callra; - __be32 rsvd1[2]; + __be32 rsvd1[1]; + __be32 time; __be32 fw_ver; __be32 hw_id; - __be32 rfr; + u8 rfr_severity; + u8 rsvd2[3]; u8 irisc_index; u8 synd; __be16 ext_synd; @@@ -577,7 -579,9 +579,9 @@@ struct mlx5_init_seg __be32 rsvd1[120]; __be32 initializing; struct health_buffer health; - __be32 rsvd2[880]; + __be32 rsvd2[878]; + __be32 cmd_exec_to; + __be32 cmd_q_init_to; __be32 internal_timer_h; __be32 internal_timer_l; __be32 rsvd3[2]; @@@ -1183,6 -1187,7 +1187,7 @@@ enum mlx5_cap_type MLX5_CAP_DEV_EVENT = 0x14, MLX5_CAP_IPSEC, MLX5_CAP_GENERAL_2 = 0x20, + MLX5_CAP_PORT_SELECTION = 0x25, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@@ -1340,6 -1345,20 +1345,20 @@@ enum mlx5_qcam_feature_groups MLX5_GET(e_switch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
+ #define MLX5_CAP_PORT_SELECTION(mdev, cap) \ + MLX5_GET(port_selection_cap, \ + mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap) + + #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \ + MLX5_GET(port_selection_cap, \ + mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap) + + #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ + MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap) + + #define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \ + MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap) + #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
@@@ -1456,8 -1475,6 +1475,8 @@@ static inline u16 mlx5_to_sw_pkey_sz(in return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; }
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2 +#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 diff --combined include/linux/mlx5/driver.h index 5cc19b9483b9,f617dfbcd9fd..a623ec635947 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@@ -59,15 -59,13 +59,13 @@@
#define MLX5_ADEV_NAME "mlx5_core"
+ #define MLX5_IRQ_EQ_CTRL (U8_MAX) + enum { MLX5_BOARD_ID_LEN = 64, };
enum { - /* one minute for the sake of bringup. Generally, commands must always - * complete and we may need to increase this timeout value - */ - MLX5_CMD_TIMEOUT_MSEC = 60 * 1000, MLX5_CMD_WQ_MAX_NAME = 32, };
@@@ -136,6 -134,7 +134,7 @@@ enum MLX5_REG_MCIA = 0x9014, MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, + MLX5_REG_MRTC = 0x902d, MLX5_REG_MTRC_CAP = 0x9040, MLX5_REG_MTRC_CONF = 0x9041, MLX5_REG_MTRC_STDB = 0x9042, @@@ -154,6 -153,7 +153,7 @@@ MLX5_REG_MIRC = 0x9162, MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, + MLX5_REG_DTOR = 0xC00E, };
enum mlx5_qpts_trust_state { @@@ -357,6 -357,22 +357,6 @@@ struct mlx5_core_sig_ctx u32 sigerr_count; };
-enum { - MLX5_MKEY_MR = 1, - MLX5_MKEY_MW, - MLX5_MKEY_INDIRECT_DEVX, -}; - -struct mlx5_core_mkey { - u64 iova; - u64 size; - u32 key; - u32 pd; - u32 type; - struct wait_queue_head wait; - refcount_t usecount; -}; - #define MLX5_24BIT_MASK ((1 << 24) - 1)
enum mlx5_res_type { @@@ -425,6 -441,7 +425,7 @@@ struct mlx5_core_health struct work_struct report_work; struct devlink_health_reporter *fw_reporter; struct devlink_health_reporter *fw_fatal_reporter; + struct delayed_work update_fw_log_ts_work; };
struct mlx5_qp_table { @@@ -637,7 -654,7 +638,7 @@@ struct mlx5e_resources struct mlx5e_hw_objs { u32 pdn; struct mlx5_td td; - struct mlx5_core_mkey mkey; + u32 mkey; struct mlx5_sq_bfreg bfreg; } hw_objs; struct devlink_port dl_port; @@@ -736,6 -753,7 +737,7 @@@ struct mlx5_core_dev u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; u8 embedded_cpu; } caps; + struct mlx5_timeouts *timeouts; u64 sys_image_guid; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; @@@ -989,6 -1007,8 +991,6 @@@ void mlx5_cmd_mbox_status(void *out, u bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); -int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); -int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); void mlx5_health_flush(struct mlx5_core_dev *dev); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); @@@ -1006,11 -1026,13 +1008,11 @@@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); -int mlx5_core_create_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey, - u32 *in, int inlen); -int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, - struct mlx5_core_mkey *mkey); -int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, - u32 *out, int outlen); +int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, + int inlen); +int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); +int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out, + int outlen); int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_pagealloc_init(struct mlx5_core_dev *dev); @@@ -1222,6 -1244,16 +1224,16 @@@ static inline int mlx5_core_native_port return MLX5_CAP_GEN(dev, native_port_num); }
+ static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev) + { + int idx = MLX5_CAP_GEN(dev, native_port_num); + + if (idx >= 1 && idx <= MLX5_MAX_PORTS) + return idx - 1; + else + return PCI_FUNC(dev->pdev->devfn); + } + enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; @@@ -1230,11 -1262,12 +1242,12 @@@ static inline bool mlx5_is_roce_init_en { struct devlink *devlink = priv_to_devlink(dev); union devlink_param_value val; + int err;
- devlink_param_driverinit_value_get(devlink, - DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, - &val); - return val.vbool; + err = devlink_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, + &val); + return err ? MLX5_CAP_GEN(dev, roce) : val.vbool; }
#endif /* MLX5_DRIVER_H */ diff --combined include/linux/mlx5/fs.h index f2c3da2006d9,7a43fec63a35..90c548aa6389 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@@ -83,8 -83,7 +83,9 @@@ enum mlx5_flow_namespace_type MLX5_FLOW_NAMESPACE_RDMA_RX, MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, MLX5_FLOW_NAMESPACE_RDMA_TX, + MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS, + MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS, + MLX5_FLOW_NAMESPACE_PORT_SEL, };
enum { @@@ -99,6 -98,7 +100,7 @@@
struct mlx5_pkt_reformat; struct mlx5_modify_hdr; + struct mlx5_flow_definer; struct mlx5_flow_table; struct mlx5_flow_group; struct mlx5_flow_namespace; @@@ -259,6 -259,13 +261,13 @@@ struct mlx5_modify_hdr *mlx5_modify_hea void *modify_actions); void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, struct mlx5_modify_hdr *modify_hdr); + struct mlx5_flow_definer * + mlx5_create_match_definer(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type ns_type, u16 format_id, + u32 *match_mask); + void mlx5_destroy_match_definer(struct mlx5_core_dev *dev, + struct mlx5_flow_definer *definer); + int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer);
struct mlx5_pkt_reformat_params { int type; diff --combined include/linux/mlx5/mlx5_ifc.h index 9011c4495867,746381eccccf..53483c457093 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@@ -94,6 -94,7 +94,7 @@@ enum enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, + MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_PSV = 0xff03, @@@ -342,7 -343,7 +343,7 @@@ struct mlx5_ifc_flow_table_fields_suppo u8 outer_geneve_oam[0x1]; u8 outer_geneve_protocol_type[0x1]; u8 outer_geneve_opt_len[0x1]; - u8 reserved_at_1e[0x1]; + u8 source_vhca_port[0x1]; u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1]; @@@ -393,14 -394,6 +394,14 @@@ u8 metadata_reg_c_0[0x1]; };
+struct mlx5_ifc_flow_table_fields_supported_2_bits { + u8 reserved_at_0[0xe]; + u8 bth_opcode[0x1]; + u8 reserved_at_f[0x11]; + + u8 reserved_at_20[0x60]; +}; + struct mlx5_ifc_flow_table_prop_layout_bits { u8 ft_support[0x1]; u8 reserved_at_1[0x1]; @@@ -547,7 -540,7 +548,7 @@@ struct mlx5_ifc_fte_match_set_misc_bit union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18]; - u8 reserved_at_b8[0x8]; + u8 bth_opcode[0x8];
u8 geneve_vni[0x18]; u8 reserved_at_d8[0x7]; @@@ -764,15 -757,7 +765,15 @@@ struct mlx5_ifc_flow_table_nic_cap_bit
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_at_e00[0x1200]; + u8 reserved_at_e00[0x700]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma; + + u8 reserved_at_1580[0x280]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_transmit_rdma; + + u8 reserved_at_1880[0x780];
u8 sw_steering_nic_rx_action_drop_icm_address[0x40];
@@@ -783,6 -768,18 +784,18 @@@ u8 reserved_at_20c0[0x5f40]; };
+ struct mlx5_ifc_port_selection_cap_bits { + u8 reserved_at_0[0x10]; + u8 port_select_flow_table[0x1]; + u8 reserved_at_11[0xf]; + + u8 reserved_at_20[0x1e0]; + + struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection; + + u8 reserved_at_400[0x7c00]; + }; + enum { MLX5_FDB_TO_VPORT_REG_C_0 = 0x01, MLX5_FDB_TO_VPORT_REG_C_1 = 0x02, @@@ -1322,7 -1319,8 +1335,8 @@@ struct mlx5_ifc_cmd_hca_cap_bits u8 vhca_resource_manager[0x1];
u8 hca_cap_2[0x1]; - u8 reserved_at_21[0x2]; + u8 reserved_at_21[0x1]; + u8 dtor[0x1]; u8 event_on_vhca_state_teardown_request[0x1]; u8 event_on_vhca_state_in_use[0x1]; u8 event_on_vhca_state_active[0x1]; @@@ -1530,7 -1528,8 +1544,8 @@@ u8 uar_4k[0x1]; u8 reserved_at_241[0x9]; u8 uar_sz[0x6]; - u8 reserved_at_248[0x2]; + u8 port_selection_cap[0x1]; + u8 reserved_at_248[0x1]; u8 umem_uid_0[0x1]; u8 reserved_at_250[0x5]; u8 log_pg_sz[0x8]; @@@ -1603,7 -1602,8 +1618,8 @@@ u8 log_max_tis_per_sq[0x5];
u8 ext_stride_num_range[0x1]; - u8 reserved_at_3a1[0x2]; + u8 roce_rw_supported[0x1]; + u8 reserved_at_3a2[0x1]; u8 log_max_stride_sz_rq[0x5]; u8 reserved_at_3a8[0x3]; u8 log_min_stride_sz_rq[0x5]; @@@ -1732,7 -1732,7 +1748,7 @@@ u8 flex_parser_id_outer_first_mpls_over_gre[0x4]; u8 flex_parser_id_outer_first_mpls_over_udp_label[0x4];
- u8 reserved_at_6e0[0x10]; + u8 max_num_match_definer[0x10]; u8 sf_base_id[0x10];
u8 flex_parser_id_gtpu_dw_2[0x4]; @@@ -1747,7 -1747,7 +1763,7 @@@
u8 reserved_at_760[0x20]; u8 vhca_tunnel_commands[0x40]; - u8 reserved_at_7c0[0x40]; + u8 match_definer_format_supported[0x40]; };
struct mlx5_ifc_cmd_hca_cap_2_bits { @@@ -1766,6 -1766,7 +1782,7 @@@ enum mlx5_flow_destination_type MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1, MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2, MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6, + MLX5_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99, MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100, @@@ -2823,6 -2824,40 +2840,40 @@@ struct mlx5_ifc_dropped_packet_logged_b u8 reserved_at_0[0xe0]; };
+ struct mlx5_ifc_default_timeout_bits { + u8 to_multiplier[0x3]; + u8 reserved_at_3[0x9]; + u8 to_value[0x14]; + }; + + struct mlx5_ifc_dtor_reg_bits { + u8 reserved_at_0[0x20]; + + struct mlx5_ifc_default_timeout_bits pcie_toggle_to; + + u8 reserved_at_40[0x60]; + + struct mlx5_ifc_default_timeout_bits health_poll_to; + + struct mlx5_ifc_default_timeout_bits full_crdump_to; + + struct mlx5_ifc_default_timeout_bits fw_reset_to; + + struct mlx5_ifc_default_timeout_bits flush_on_err_to; + + struct mlx5_ifc_default_timeout_bits pci_sync_update_to; + + struct mlx5_ifc_default_timeout_bits tear_down_to; + + struct mlx5_ifc_default_timeout_bits fsm_reactivate_to; + + struct mlx5_ifc_default_timeout_bits reclaim_pages_to; + + struct mlx5_ifc_default_timeout_bits reclaim_vfs_pages_to; + + u8 reserved_at_1c0[0x40]; + }; + enum { MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1, MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2, @@@ -3144,6 -3179,7 +3195,7 @@@ union mlx5_ifc_hca_cap_union_bits struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; + struct mlx5_ifc_port_selection_cap_bits port_selection_cap; struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap; struct mlx5_ifc_qos_cap_bits qos_cap; struct mlx5_ifc_debug_cap_bits debug_cap; @@@ -4113,13 -4149,19 +4165,19 @@@ struct mlx5_ifc_health_buffer_bits
u8 assert_callra[0x20];
- u8 reserved_at_140[0x40]; + u8 reserved_at_140[0x20]; + + u8 time[0x20];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_at_1c0[0x20]; + u8 rfr[0x1]; + u8 reserved_at_1c1[0x3]; + u8 valid[0x1]; + u8 severity[0x3]; + u8 reserved_at_1c8[0x18];
u8 irisc_index[0x8]; u8 synd[0x8]; @@@ -5632,6 -5674,236 +5690,236 @@@ struct mlx5_ifc_query_fte_in_bits u8 reserved_at_120[0xe0]; };
+ struct mlx5_ifc_match_definer_format_0_bits { + u8 reserved_at_0[0x100]; + + u8 metadata_reg_c_0[0x20]; + + u8 metadata_reg_c_1[0x20]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_dmac_15_0[0x10]; + u8 outer_ethertype[0x10]; + + u8 reserved_at_180[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 outer_ip_frag[0x1]; + u8 outer_qp_type[0x2]; + u8 outer_encap_type[0x2]; + u8 port_number[0x2]; + u8 outer_l3_type[0x2]; + u8 outer_l4_type[0x2]; + u8 outer_first_vlan_type[0x2]; + u8 outer_first_vlan_prio[0x3]; + u8 outer_first_vlan_cfi[0x1]; + u8 outer_first_vlan_vid[0xc]; + + u8 outer_l4_type_ext[0x4]; + u8 reserved_at_1a4[0x2]; + u8 outer_ipsec_layer[0x2]; + u8 outer_l2_type[0x2]; + u8 force_lb[0x1]; + u8 outer_l2_ok[0x1]; + u8 outer_l3_ok[0x1]; + u8 outer_l4_ok[0x1]; + u8 outer_second_vlan_type[0x2]; + u8 outer_second_vlan_prio[0x3]; + u8 outer_second_vlan_cfi[0x1]; + u8 outer_second_vlan_vid[0xc]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 inner_ipv4_checksum_ok[0x1]; + u8 inner_l4_checksum_ok[0x1]; + u8 outer_ipv4_checksum_ok[0x1]; + u8 outer_l4_checksum_ok[0x1]; + u8 inner_l3_ok[0x1]; + u8 inner_l4_ok[0x1]; + u8 outer_l3_ok_duplicate[0x1]; + u8 outer_l4_ok_duplicate[0x1]; + u8 outer_tcp_cwr[0x1]; + u8 outer_tcp_ece[0x1]; + u8 outer_tcp_urg[0x1]; + u8 outer_tcp_ack[0x1]; + u8 outer_tcp_psh[0x1]; + u8 outer_tcp_rst[0x1]; + u8 outer_tcp_syn[0x1]; + u8 outer_tcp_fin[0x1]; + }; + + struct mlx5_ifc_match_definer_format_22_bits { + u8 reserved_at_0[0x100]; + + u8 outer_ip_src_addr[0x20]; + + u8 outer_ip_dest_addr[0x20]; + + u8 outer_l4_sport[0x10]; + u8 outer_l4_dport[0x10]; + + u8 reserved_at_160[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 outer_ip_frag[0x1]; + u8 outer_qp_type[0x2]; + u8 outer_encap_type[0x2]; + u8 port_number[0x2]; + u8 outer_l3_type[0x2]; + u8 outer_l4_type[0x2]; + u8 outer_first_vlan_type[0x2]; + u8 outer_first_vlan_prio[0x3]; + u8 outer_first_vlan_cfi[0x1]; + u8 outer_first_vlan_vid[0xc]; + + u8 metadata_reg_c_0[0x20]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 outer_dmac_15_0[0x10]; + }; + + struct mlx5_ifc_match_definer_format_23_bits { + u8 reserved_at_0[0x100]; + + u8 inner_ip_src_addr[0x20]; + + u8 inner_ip_dest_addr[0x20]; + + u8 inner_l4_sport[0x10]; + u8 inner_l4_dport[0x10]; + + u8 reserved_at_160[0x1]; + u8 sx_sniffer[0x1]; + u8 functional_lb[0x1]; + u8 inner_ip_frag[0x1]; + u8 inner_qp_type[0x2]; + u8 inner_encap_type[0x2]; + u8 port_number[0x2]; + u8 inner_l3_type[0x2]; + u8 inner_l4_type[0x2]; + u8 inner_first_vlan_type[0x2]; + u8 inner_first_vlan_prio[0x3]; + u8 inner_first_vlan_cfi[0x1]; + u8 inner_first_vlan_vid[0xc]; + + u8 tunnel_header_0[0x20]; + + u8 inner_dmac_47_16[0x20]; + + u8 inner_smac_47_16[0x20]; + + u8 inner_smac_15_0[0x10]; + u8 inner_dmac_15_0[0x10]; + }; + + struct mlx5_ifc_match_definer_format_29_bits { + u8 reserved_at_0[0xc0]; + + u8 outer_ip_dest_addr[0x80]; + + u8 outer_ip_src_addr[0x80]; + + u8 outer_l4_sport[0x10]; + u8 outer_l4_dport[0x10]; + + u8 reserved_at_1e0[0x20]; + }; + + struct mlx5_ifc_match_definer_format_30_bits { + u8 reserved_at_0[0xa0]; + + u8 outer_ip_dest_addr[0x80]; + + u8 outer_ip_src_addr[0x80]; + + u8 outer_dmac_47_16[0x20]; + + u8 outer_smac_47_16[0x20]; + + u8 outer_smac_15_0[0x10]; + u8 outer_dmac_15_0[0x10]; + }; + + struct mlx5_ifc_match_definer_format_31_bits { + u8 reserved_at_0[0xc0]; + + u8 inner_ip_dest_addr[0x80]; + + u8 inner_ip_src_addr[0x80]; + + u8 inner_l4_sport[0x10]; + u8 inner_l4_dport[0x10]; + + u8 reserved_at_1e0[0x20]; + }; + + struct mlx5_ifc_match_definer_format_32_bits { + u8 reserved_at_0[0xa0]; + + u8 inner_ip_dest_addr[0x80]; + + u8 inner_ip_src_addr[0x80]; + + u8 inner_dmac_47_16[0x20]; + + u8 inner_smac_47_16[0x20]; + + u8 inner_smac_15_0[0x10]; + u8 inner_dmac_15_0[0x10]; + }; + + struct mlx5_ifc_match_definer_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x40]; + + u8 reserved_at_80[0x10]; + u8 format_id[0x10]; + + u8 reserved_at_a0[0x160]; + + u8 match_mask[16][0x20]; + }; + + struct mlx5_ifc_general_obj_in_cmd_hdr_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 vhca_tunnel_id[0x10]; + u8 obj_type[0x10]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; + }; + + struct mlx5_ifc_general_obj_out_cmd_hdr_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 obj_id[0x20]; + + u8 reserved_at_60[0x20]; + }; + + struct mlx5_ifc_create_match_definer_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + + struct mlx5_ifc_match_definer_bits obj_context; + }; + + struct mlx5_ifc_create_match_definer_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + }; + enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@@ -7585,7 -7857,7 +7873,7 @@@ struct mlx5_ifc_dealloc_uar_out_bits
struct mlx5_ifc_dealloc_uar_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@@ -8105,6 -8377,11 +8393,11 @@@ struct mlx5_ifc_create_flow_group_out_b u8 reserved_at_60[0x20]; };
+ enum { + MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_TCAM_SUBTABLE = 0x0, + MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT = 0x1, + }; + enum { MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, @@@ -8126,7 -8403,9 +8419,9 @@@ struct mlx5_ifc_create_flow_group_in_bi u8 reserved_at_60[0x20];
u8 table_type[0x8]; - u8 reserved_at_88[0x18]; + u8 reserved_at_88[0x4]; + u8 group_type[0x4]; + u8 reserved_at_90[0x10];
u8 reserved_at_a0[0x8]; u8 table_id[0x18]; @@@ -8141,7 -8420,10 +8436,10 @@@
u8 end_flow_index[0x20];
- u8 reserved_at_140[0xa0]; + u8 reserved_at_140[0x10]; + u8 match_definer_id[0x10]; + + u8 reserved_at_160[0x80];
u8 reserved_at_1e0[0x18]; u8 match_criteria_enable[0x8]; @@@ -8432,7 -8714,7 +8730,7 @@@ struct mlx5_ifc_alloc_uar_out_bits
struct mlx5_ifc_alloc_uar_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10];
u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@@ -10076,6 -10358,17 +10374,17 @@@ struct mlx5_ifc_pddr_reg_bits union mlx5_ifc_pddr_reg_page_data_auto_bits page_data; };
+ struct mlx5_ifc_mrtc_reg_bits { + u8 time_synced[0x1]; + u8 reserved_at_1[0x1f]; + + u8 reserved_at_20[0x20]; + + u8 time_h[0x20]; + + u8 time_l[0x20]; + }; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@@ -10137,6 -10430,7 +10446,7 @@@ struct mlx5_ifc_mirc_reg_bits mirc_reg; struct mlx5_ifc_mfrl_reg_bits mfrl_reg; struct mlx5_ifc_mtutc_reg_bits mtutc_reg; + struct mlx5_ifc_mrtc_reg_bits mrtc_reg; u8 reserved_at_0[0x60e0]; };
@@@ -10414,9 -10708,16 +10724,16 @@@ struct mlx5_ifc_dcbx_param_bits u8 reserved_at_a0[0x160]; };
+ enum { + MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY = 0, + MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT, + }; + struct mlx5_ifc_lagc_bits { u8 fdb_selection_mode[0x1]; - u8 reserved_at_1[0x1c]; + u8 reserved_at_1[0x14]; + u8 port_select_mode[0x3]; + u8 reserved_at_18[0x5]; u8 lag_state[0x3];
u8 reserved_at_20[0x14]; @@@ -10630,29 -10931,6 +10947,6 @@@ struct mlx5_ifc_dealloc_memic_out_bits u8 reserved_at_40[0x40]; };
- struct mlx5_ifc_general_obj_in_cmd_hdr_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 vhca_tunnel_id[0x10]; - u8 obj_type[0x10]; - - u8 obj_id[0x20]; - - u8 reserved_at_60[0x20]; - }; - - struct mlx5_ifc_general_obj_out_cmd_hdr_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 obj_id[0x20]; - - u8 reserved_at_60[0x20]; - }; - struct mlx5_ifc_umem_bits { u8 reserved_at_0[0x80];
diff --combined include/net/cfg80211.h index 27336fc70467,7c9d5db4f0e6..423f97b982ff --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@@ -739,6 -739,22 +739,22 @@@ struct cfg80211_tid_config struct cfg80211_tid_cfg tid_conf[]; };
+ /** + * struct cfg80211_fils_aad - FILS AAD data + * @macaddr: STA MAC address + * @kek: FILS KEK + * @kek_len: FILS KEK length + * @snonce: STA Nonce + * @anonce: AP Nonce + */ + struct cfg80211_fils_aad { + const u8 *macaddr; + const u8 *kek; + u8 kek_len; + const u8 *snonce; + const u8 *anonce; + }; + /** * cfg80211_get_chandef_type - return old channel type from chandef * @chandef: the channel definition @@@ -1040,6 -1056,36 +1056,36 @@@ struct cfg80211_crypto_settings enum nl80211_sae_pwe_mechanism sae_pwe; };
+ /** + * struct cfg80211_mbssid_config - AP settings for multi bssid + * + * @tx_wdev: pointer to the transmitted interface in the MBSSID set + * @index: index of this AP in the multi bssid group. + * @ema: set to true if the beacons should be sent out in EMA mode. + */ + struct cfg80211_mbssid_config { + struct wireless_dev *tx_wdev; + u8 index; + bool ema; + }; + + /** + * struct cfg80211_mbssid_elems - Multiple BSSID elements + * + * @cnt: Number of elements in array %elems. + * + * @elem: Array of multiple BSSID element(s) to be added into Beacon frames. + * @elem.data: Data for multiple BSSID elements. + * @elem.len: Length of data. + */ + struct cfg80211_mbssid_elems { + u8 cnt; + struct { + const u8 *data; + size_t len; + } elem[]; + }; + /** * struct cfg80211_beacon_data - beacon data * @head: head portion of beacon (before TIM IE) @@@ -1058,6 -1104,7 +1104,7 @@@ * @assocresp_ies_len: length of assocresp_ies in octets * @probe_resp_len: length of probe response template (@probe_resp) * @probe_resp: probe response template (AP mode only) + * @mbssid_ies: multiple BSSID elements * @ftm_responder: enable FTM responder functionality; -1 for no change * (which also implies no change in LCI/civic location data) * @lci: Measurement Report element content, starting with Measurement Token @@@ -1075,6 -1122,7 +1122,7 @@@ struct cfg80211_beacon_data const u8 *probe_resp; const u8 *lci; const u8 *civicloc; + struct cfg80211_mbssid_elems *mbssid_ies; s8 ftm_responder;
size_t head_len, tail_len; @@@ -1189,6 -1237,7 +1237,7 @@@ enum cfg80211_ap_settings_flags * @he_oper: HE operation IE (or %NULL if HE isn't enabled) * @fils_discovery: FILS discovery transmission parameters * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters + * @mbssid_config: AP settings for multiple bssid */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@@ -1221,6 -1270,7 +1270,7 @@@ struct cfg80211_he_bss_color he_bss_color; struct cfg80211_fils_discovery fils_discovery; struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp; + struct cfg80211_mbssid_config mbssid_config; };
/** @@@ -4018,6 -4068,10 +4068,10 @@@ struct mgmt_frame_regs * @set_sar_specs: Update the SAR (TX power) settings. * * @color_change: Initiate a color change. + * + * @set_fils_aad: Set FILS AAD data to the AP driver so that the driver can use + * those to decrypt (Re)Association Request and encrypt (Re)Association + * Response frame. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@@ -4348,6 -4402,8 +4402,8 @@@ int (*color_change)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_color_change_settings *params); + int (*set_fils_aad)(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_fils_aad *fils_aad); };
/* @@@ -4981,6 -5037,13 +5037,13 @@@ struct wiphy_iftype_akm_suites * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes * @sar_capa: SAR control capabilities * @rfkill: a pointer to the rfkill structure + * + * @mbssid_max_interfaces: maximum number of interfaces supported by the driver + * in a multiple BSSID set. This field must be set to a non-zero value + * by the driver to advertise MBSSID support. + * @ema_max_profile_periodicity: maximum profile periodicity supported by + * the driver. Setting this field to a non-zero value indicates that the + * driver supports enhanced multi-BSSID advertisements (EMA AP). */ struct wiphy { struct mutex mtx; @@@ -5125,6 -5188,9 +5188,9 @@@
struct rfkill *rfkill;
+ u8 mbssid_max_interfaces; + u8 ema_max_profile_periodicity; + char priv[] __aligned(NETDEV_ALIGN); };
@@@ -5376,6 -5442,7 +5442,6 @@@ static inline void wiphy_unlock(struct * netdev and may otherwise be used by driver read-only, will be update * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames - * @mgmt_registrations_lock: lock for the list * @mgmt_registrations_need_update: mgmt registrations were updated, * need to propagate the update to the driver * @mtx: mutex used to lock data in this struct, may be used by drivers @@@ -5422,6 -5489,7 +5488,6 @@@ struct wireless_dev u32 identifier;
struct list_head mgmt_registrations; - spinlock_t mgmt_registrations_lock; u8 mgmt_registrations_need_update:1;
struct mutex mtx; @@@ -5490,7 -5558,7 +5556,7 @@@ unsigned long unprot_beacon_reported; };
- static inline u8 *wdev_address(struct wireless_dev *wdev) + static inline const u8 *wdev_address(struct wireless_dev *wdev) { if (wdev->netdev) return wdev->netdev->dev_addr; @@@ -6308,6 -6376,17 +6374,17 @@@ static inline void cfg80211_gen_new_bss u64_to_ether_addr(new_bssid_u64, new_bssid); }
+ /** + * cfg80211_get_ies_channel_number - returns the channel number from ies + * @ie: IEs + * @ielen: length of IEs + * @band: enum nl80211_band of the channel + * + * Returns the channel number, or -1 if none could be determined. + */ + int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, + enum nl80211_band band); + /** * cfg80211_is_element_inherited - returns if element ID should be inherited * @element: element to check diff --combined include/net/sock.h index 463f390d90b3,ff4e62aa62e5..db95ceaf4d4c --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -259,10 -259,11 +259,11 @@@ struct bpf_local_storage * @sk_rcvbuf: size of receive buffer in bytes * @sk_wq: sock wait queue and async head * @sk_rx_dst: receive input route used by early demux + * @sk_rx_dst_ifindex: ifindex for @sk_rx_dst + * @sk_rx_dst_cookie: cookie for @sk_rx_dst * @sk_dst_cache: destination cache * @sk_dst_pending_confirm: need to confirm neighbour * @sk_policy: flow policy - * @sk_rx_skb_cache: cache copy of recently accessed RX skb * @sk_receive_queue: incoming packets * @sk_wmem_alloc: transmit queue bytes committed * @sk_tsq_flags: TCP Small Queues flags @@@ -270,6 -271,7 +271,7 @@@ * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward + * @sk_reserved_mem: space reserved and non-reclaimable for the socket * @sk_napi_id: id of the last napi context to receive data for sk * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode @@@ -329,7 -331,6 +331,6 @@@ * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head] - * @sk_tx_skb_cache: cache copy of recently accessed TX skb * @sk_security: used by security modules * @sk_mark: generic packet mark * @sk_cgrp_data: cgroup data for this cgroup @@@ -394,7 -395,6 +395,6 @@@ struct sock atomic_t sk_drops; int sk_rcvlowat; struct sk_buff_head sk_error_queue; - struct sk_buff *sk_rx_skb_cache; struct sk_buff_head sk_receive_queue; /* * The backlog queue is special, it is always used with @@@ -413,6 -413,7 +413,7 @@@ #define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc; + u32 sk_reserved_mem; #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int sk_ll_usec; /* ===== mostly read cache line ===== */ @@@ -431,6 -432,9 +432,9 @@@ struct xfrm_policy __rcu *sk_policy[2]; #endif struct dst_entry *sk_rx_dst; + int sk_rx_dst_ifindex; + u32 sk_rx_dst_cookie; + struct dst_entry __rcu *sk_dst_cache; atomic_t sk_omem_alloc; int sk_sndbuf; @@@ -443,7 -447,6 +447,6 @@@ struct sk_buff *sk_send_head; struct rb_root tcp_rtx_queue; }; - struct sk_buff *sk_tx_skb_cache; struct sk_buff_head sk_write_queue; __s32 sk_peek_off; int sk_write_pending; @@@ -1208,7 -1211,7 +1211,7 @@@ struct proto #endif
bool (*stream_memory_free)(const struct sock *sk, int wake); - bool (*stream_memory_read)(const struct sock *sk); + bool (*sock_is_readable)(struct sock *sk); /* Memory pressure */ void (*enter_memory_pressure)(struct sock *sk); void (*leave_memory_pressure)(struct sock *sk); @@@ -1237,7 -1240,7 +1240,7 @@@ unsigned int useroffset; /* Usercopy region offset */ unsigned int usersize; /* Usercopy region size */
- struct percpu_counter *orphan_count; + unsigned int __percpu *orphan_count;
struct request_sock_ops *rsk_prot; struct timewait_sock_ops *twsk_prot; @@@ -1518,20 -1521,49 +1521,49 @@@ sk_rmem_schedule(struct sock *sk, struc skb_pfmemalloc(skb); }
+ static inline int sk_unused_reserved_mem(const struct sock *sk) + { + int unused_mem; + + if (likely(!sk->sk_reserved_mem)) + return 0; + + unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued - + atomic_read(&sk->sk_rmem_alloc); + + return unused_mem > 0 ? unused_mem : 0; + } + static inline void sk_mem_reclaim(struct sock *sk) { + int reclaimable; + if (!sk_has_account(sk)) return; - if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, sk->sk_forward_alloc); + + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); + + if (reclaimable >= SK_MEM_QUANTUM) + __sk_mem_reclaim(sk, reclaimable); + } + + static inline void sk_mem_reclaim_final(struct sock *sk) + { + sk->sk_reserved_mem = 0; + sk_mem_reclaim(sk); }
static inline void sk_mem_reclaim_partial(struct sock *sk) { + int reclaimable; + if (!sk_has_account(sk)) return; - if (sk->sk_forward_alloc > SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); + + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); + + if (reclaimable > SK_MEM_QUANTUM) + __sk_mem_reclaim(sk, reclaimable - 1); }
static inline void sk_mem_charge(struct sock *sk, int size) @@@ -1543,9 -1575,12 +1575,12 @@@
static inline void sk_mem_uncharge(struct sock *sk, int size) { + int reclaimable; + if (!sk_has_account(sk)) return; sk->sk_forward_alloc += size; + reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow. * TCP send queues can make this happen, if sk_mem_reclaim() @@@ -1554,22 -1589,14 +1589,14 @@@ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is * no need to hold that much forward allocation anyway. */ - if (unlikely(sk->sk_forward_alloc >= 1 << 21)) + if (unlikely(reclaimable >= 1 << 21)) __sk_mem_reclaim(sk, 1 << 20); }
- DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) { sk_wmem_queued_add(sk, -skb->truesize); sk_mem_uncharge(sk, skb->truesize); - if (static_branch_unlikely(&tcp_tx_skb_cache_key) && - !sk->sk_tx_skb_cache && !skb_cloned(skb)) { - skb_ext_reset(skb); - skb_zcopy_clear(skb, true); - sk->sk_tx_skb_cache = skb; - return; - } __kfree_skb(skb); }
@@@ -1889,10 -1916,8 +1916,8 @@@ static inline void sk_rx_queue_set(stru if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb);
- if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING)) - return; - - sk->sk_rx_queue_mapping = rx_queue; + if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) + WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } @@@ -1900,15 -1925,19 +1925,19 @@@ static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING - sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING; + WRITE_ONCE(sk->sk_rx_queue_mapping, NO_QUEUE_MAPPING); #endif }
static inline int sk_rx_queue_get(const struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING - if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING) - return sk->sk_rx_queue_mapping; + if (sk) { + int res = READ_ONCE(sk->sk_rx_queue_mapping); + + if (res != NO_QUEUE_MAPPING) + return res; + } #endif
return -1; @@@ -2388,13 -2417,11 +2417,11 @@@ static inline void sk_stream_moderate_s return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); + val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); }
- struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule); - /** * sk_page_frag - return an appropriate page_frag * @sk: socket @@@ -2608,7 -2635,6 +2635,6 @@@ static inline void skb_setup_tx_timesta &skb_shinfo(skb)->tskey); }
- DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); /** * sk_eat_skb - Release a skb if it is no longer needed * @sk: socket to eat this skb from @@@ -2620,12 -2646,6 +2646,6 @@@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); - if (static_branch_unlikely(&tcp_rx_skb_cache_key) && - !sk->sk_rx_skb_cache) { - sk->sk_rx_skb_cache = skb; - skb_orphan(skb); - return; - } __kfree_skb(skb); }
@@@ -2820,10 -2840,8 +2840,15 @@@ void sock_set_sndtimeo(struct sock *sk
int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len);
+ int sock_get_timeout(long timeo, void *optval, bool old_timeval); + int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, + sockptr_t optval, int optlen, bool old_timeval); + +static inline bool sk_is_readable(struct sock *sk) +{ + if (sk->sk_prot->sock_is_readable) + return sk->sk_prot->sock_is_readable(sk); + return false; +} ++ #endif /* _SOCK_H */ diff --combined include/net/tls.h index 01d2e3744393,adab19a8aed7..07ff59e99cb3 --- a/include/net/tls.h +++ b/include/net/tls.h @@@ -66,7 -66,7 +66,7 @@@ #define MAX_IV_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8
- /* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes. + /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. * * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3] * @@@ -74,6 -74,7 +74,7 @@@ * Hence b0 contains (3 - 1) = 2. */ #define TLS_AES_CCM_IV_B0_BYTE 2 + #define TLS_SM4_CCM_IV_B0_BYTE 2
#define __TLS_INC_STATS(net, field) \ __SNMP_INC_STATS((net)->mib.tls_statistics, field) @@@ -220,6 -221,8 +221,8 @@@ union tls_crypto_context struct tls12_crypto_info_aes_gcm_128 aes_gcm_128; struct tls12_crypto_info_aes_gcm_256 aes_gcm_256; struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305; + struct tls12_crypto_info_sm4_gcm sm4_gcm; + struct tls12_crypto_info_sm4_ccm sm4_ccm; }; };
@@@ -375,7 -378,7 +378,7 @@@ void tls_sw_release_resources_rx(struc void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len); -bool tls_sw_stream_read(const struct sock *sk); +bool tls_sw_sock_is_readable(struct sock *sk); ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); diff --combined kernel/bpf/arraymap.c index 447def540544,5e1ccfae916b..c7a5be3bf8be --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@@ -645,7 -645,7 +645,7 @@@ static const struct bpf_iter_seq_info i .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), };
- static int bpf_for_each_array_elem(struct bpf_map *map, void *callback_fn, + static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, void *callback_ctx, u64 flags) { u32 i, key, num_elems = 0; @@@ -668,9 -668,8 +668,8 @@@ val = array->value + array->elem_size * i; num_elems++; key = i; - ret = BPF_CAST_CALL(callback_fn)((u64)(long)map, - (u64)(long)&key, (u64)(long)val, - (u64)(long)callback_ctx, 0); + ret = callback_fn((u64)(long)map, (u64)(long)&key, + (u64)(long)val, (u64)(long)callback_ctx, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) break; @@@ -1072,7 -1071,6 +1071,7 @@@ static struct bpf_map *prog_array_map_a INIT_WORK(&aux->work, prog_array_map_clear_deferred); INIT_LIST_HEAD(&aux->poke_progs); mutex_init(&aux->poke_mutex); + spin_lock_init(&aux->owner.lock);
map = array_map_alloc(attr); if (IS_ERR(map)) { diff --combined kernel/bpf/core.c index 6e3ae90ad107,ea8a468dbded..ded9163185d1 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@@ -524,7 -524,6 +524,7 @@@ int bpf_jit_enable __read_mostly = IS int bpf_jit_kallsyms __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_DEFAULT_ON); int bpf_jit_harden __read_mostly; long bpf_jit_limit __read_mostly; +long bpf_jit_limit_max __read_mostly;
static void bpf_prog_ksym_set_addr(struct bpf_prog *prog) @@@ -818,8 -817,7 +818,8 @@@ u64 __weak bpf_jit_alloc_exec_limit(voi static int __init bpf_jit_charge_init(void) { /* Only used as heuristic here to derive limit. */ - bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, + bpf_jit_limit_max = bpf_jit_alloc_exec_limit(); + bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2, PAGE_SIZE), LONG_MAX); return 0; } @@@ -1823,26 -1821,20 +1823,26 @@@ static unsigned int __bpf_prog_ret0_war bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp) { + bool ret; + if (fp->kprobe_override) return false;
- if (!array->aux->type) { + spin_lock(&array->aux->owner.lock); + + if (!array->aux->owner.type) { /* There's no owner yet where we could check for * compatibility. */ - array->aux->type = fp->type; - array->aux->jited = fp->jited; - return true; + array->aux->owner.type = fp->type; + array->aux->owner.jited = fp->jited; + ret = true; + } else { + ret = array->aux->owner.type == fp->type && + array->aux->owner.jited == fp->jited; } - - return array->aux->type == fp->type && - array->aux->jited == fp->jited; + spin_unlock(&array->aux->owner.lock); + return ret; }
static int bpf_check_tail_call(const struct bpf_prog *fp) @@@ -2365,6 -2357,11 +2365,11 @@@ const struct bpf_func_proto * __weak bp return NULL; }
+ const struct bpf_func_proto * __weak bpf_get_trace_vprintk_proto(void) + { + return NULL; + } + u64 __weak bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) diff --combined net/batman-adv/bridge_loop_avoidance.c index 17687848daec,7242b32fff80..2ed9496fc41f --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -254,7 -254,7 +254,7 @@@ batadv_claim_hash_find(struct batadv_pr * Return: backbone gateway if found or NULL otherwise */ static struct batadv_bla_backbone_gw * - batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr, + batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; @@@ -336,7 -336,7 +336,7 @@@ batadv_bla_del_backbone_claims(struct b * @vid: the VLAN ID * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) */ - static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac, + static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac, unsigned short vid, int claimtype) { struct sk_buff *skb; @@@ -488,7 -488,7 +488,7 @@@ static void batadv_bla_loopdetect_repor * Return: the (possibly created) backbone gateway or NULL on error */ static struct batadv_bla_backbone_gw * - batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig, + batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig, unsigned short vid, bool own_backbone) { struct batadv_bla_backbone_gw *entry; @@@ -926,7 -926,7 +926,7 @@@ static bool batadv_handle_request(struc */ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, + const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@@ -964,7 -964,7 +964,7 @@@ */ static bool batadv_handle_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, - u8 *backbone_addr, u8 *claim_addr, + const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; @@@ -1560,14 -1560,10 +1560,14 @@@ int batadv_bla_init(struct batadv_priv return 0;
bat_priv->bla.claim_hash = batadv_hash_new(128); - bat_priv->bla.backbone_hash = batadv_hash_new(32); + if (!bat_priv->bla.claim_hash) + return -ENOMEM;
- if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash) + bat_priv->bla.backbone_hash = batadv_hash_new(32); + if (!bat_priv->bla.backbone_hash) { + batadv_hash_destroy(bat_priv->bla.claim_hash); return -ENOMEM; + }
batadv_hash_set_lock_class(bat_priv->bla.claim_hash, &batadv_claim_hash_lock_class_key); @@@ -2130,7 -2126,7 +2130,7 @@@ batadv_bla_claim_dump_entry(struct sk_b struct batadv_hard_iface *primary_if, struct batadv_bla_claim *claim) { - u8 *primary_addr = primary_if->net_dev->dev_addr; + const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; void *hdr; @@@ -2298,7 -2294,7 +2298,7 @@@ batadv_bla_backbone_dump_entry(struct s struct batadv_hard_iface *primary_if, struct batadv_bla_backbone_gw *backbone_gw) { - u8 *primary_addr = primary_if->net_dev->dev_addr; + const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; int msecs; diff --combined net/core/dev.c index eb3a366bf212,4e3d19a06de4..a58b02e22d69 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -140,7 -140,7 +140,7 @@@ #include <linux/if_macvlan.h> #include <linux/errqueue.h> #include <linux/hrtimer.h> - #include <linux/netfilter_ingress.h> + #include <linux/netfilter_netdev.h> #include <linux/crash_dump.h> #include <linux/sctp.h> #include <net/udp_tunnel.h> @@@ -303,6 -303,12 +303,12 @@@ static struct netdev_name_node *netdev_ return NULL; }
+ bool netdev_name_in_use(struct net *net, const char *name) + { + return netdev_name_node_lookup(net, name); + } + EXPORT_SYMBOL(netdev_name_in_use); + int netdev_name_node_alt_create(struct net_device *dev, const char *name) { struct netdev_name_node *name_node; @@@ -1133,7 -1139,7 +1139,7 @@@ static int __dev_alloc_name(struct net }
snprintf(buf, IFNAMSIZ, name, i); - if (!__dev_get_by_name(net, buf)) + if (!netdev_name_in_use(net, buf)) return i;
/* It is possible to run out of possible slots @@@ -1187,7 -1193,7 +1193,7 @@@ static int dev_get_valid_name(struct ne
if (strchr(name, '%')) return dev_alloc_name_ns(net, dev, name); - else if (__dev_get_by_name(net, name)) + else if (netdev_name_in_use(net, name)) return -EEXIST; else if (dev->name != name) strlcpy(dev->name, name, IFNAMSIZ); @@@ -1290,8 -1296,8 +1296,8 @@@ rollback old_assign_type = NET_NAME_RENAMED; goto rollback; } else { - pr_err("%s: name change rollback failed: %d\n", - dev->name, ret); + netdev_err(dev, "name change rollback failed: %d\n", + ret); } }
@@@ -2345,7 -2351,7 +2351,7 @@@ static void netif_setup_tc(struct net_d
/* If TC0 is invalidated disable TC mapping */ if (tc->offset + tc->count > txq) { - pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); + netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); dev->num_tc = 0; return; } @@@ -2356,8 -2362,8 +2362,8 @@@
tc = &dev->tc_to_txq[q]; if (tc->offset + tc->count > txq) { - pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", - i, q); + netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", + i, q); netdev_set_prio_tc_map(dev, i, 0); } } @@@ -2921,6 -2927,8 +2927,8 @@@ int netif_set_real_num_tx_queues(struc if (dev->num_tc) netif_setup_tc(dev, txq);
+ dev_qdisc_change_real_num_tx(dev, txq); + dev->real_num_tx_queues = txq;
if (disabling) { @@@ -3163,12 -3171,6 +3171,12 @@@ static u16 skb_tx_hash(const struct net
qoffset = sb_dev->tc_to_txq[tc].offset; qcount = sb_dev->tc_to_txq[tc].count; + if (unlikely(!qcount)) { + net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", + sb_dev->name, qoffset, tc); + qoffset = 0; + qcount = dev->real_num_tx_queues; + } }
if (skb_rx_queue_recorded(skb)) { @@@ -3414,7 -3416,7 +3422,7 @@@ EXPORT_SYMBOL(__skb_gso_segment) #ifdef CONFIG_BUG static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb) { - pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>"); + netdev_err(dev, "hw csum failure\n"); skb_dump(KERN_ERR, skb, true); dump_stack(); } @@@ -3912,8 -3914,7 +3920,8 @@@ int dev_loopback_xmit(struct net *net, skb_reset_mac_header(skb); __skb_pull(skb, skb_network_offset(skb)); skb->pkt_type = PACKET_LOOPBACK; - skb->ip_summed = CHECKSUM_UNNECESSARY; + if (skb->ip_summed == CHECKSUM_NONE) + skb->ip_summed = CHECKSUM_UNNECESSARY; WARN_ON(!skb_dst(skb)); skb_dst_force(skb); netif_rx_ni(skb); @@@ -3925,6 -3926,7 +3933,7 @@@ EXPORT_SYMBOL(dev_loopback_xmit) static struct sk_buff * sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) { + #ifdef CONFIG_NET_CLS_ACT struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress); struct tcf_result cl_res;
@@@ -3960,6 -3962,7 +3969,7 @@@ default: break; } + #endif /* CONFIG_NET_CLS_ACT */
return skb; } @@@ -4153,13 -4156,20 +4163,20 @@@ static int __dev_queue_xmit(struct sk_b qdisc_pkt_len_init(skb); #ifdef CONFIG_NET_CLS_ACT skb->tc_at_ingress = 0; - # ifdef CONFIG_NET_EGRESS + #endif + #ifdef CONFIG_NET_EGRESS if (static_branch_unlikely(&egress_needed_key)) { + if (nf_hook_egress_active()) { + skb = nf_hook_egress(skb, &rc, dev); + if (!skb) + goto out; + } + nf_skip_egress(skb, true); skb = sch_handle_egress(skb, &rc, dev); if (!skb) goto out; + nf_skip_egress(skb, false); } - # endif #endif /* If device/qdisc don't need skb->dst, release it right now while * its hot in this cpu cache. @@@ -5301,6 -5311,7 +5318,7 @@@ skip_taps if (static_branch_unlikely(&ingress_needed_key)) { bool another = false;
+ nf_skip_egress(skb, true); skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev, &another); if (another) @@@ -5308,6 -5319,7 +5326,7 @@@ if (!skb) goto out;
+ nf_skip_egress(skb, false); if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) goto out; } @@@ -5844,7 -5856,7 +5863,7 @@@ static void gro_normal_one(struct napi_ gro_normal_list(napi); }
- static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) + static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) { struct packet_offload *ptype; __be16 type = skb->protocol; @@@ -5873,12 -5885,11 +5892,11 @@@ if (err) { WARN_ON(&ptype->list == head); kfree_skb(skb); - return NET_RX_SUCCESS; + return; }
out: gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); - return NET_RX_SUCCESS; }
static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, @@@ -6905,19 -6916,25 +6923,25 @@@ EXPORT_SYMBOL(netif_napi_add)
void napi_disable(struct napi_struct *n) { + unsigned long val, new; + might_sleep(); set_bit(NAPI_STATE_DISABLE, &n->state);
- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); - while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state)) - msleep(1); + do { + val = READ_ONCE(n->state); + if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) { + usleep_range(20, 200); + continue; + } + + new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; + new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); + } while (cmpxchg(&n->state, val, new) != val);
hrtimer_cancel(&n->timer);
- clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); clear_bit(NAPI_STATE_DISABLE, &n->state); - clear_bit(NAPI_STATE_THREADED, &n->state); } EXPORT_SYMBOL(napi_disable);
@@@ -6995,8 -7012,8 +7019,8 @@@ static int __napi_poll(struct napi_stru }
if (unlikely(work > weight)) - pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n", - n->poll, work, weight); + netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n", + n->poll, work, weight);
if (likely(work < weight)) return work; @@@ -8550,8 -8567,7 +8574,7 @@@ static int __dev_set_promiscuity(struc dev->flags &= ~IFF_PROMISC; else { dev->promiscuity -= inc; - pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n", - dev->name); + netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n"); return -EOVERFLOW; } } @@@ -8621,8 -8637,7 +8644,7 @@@ static int __dev_set_allmulti(struct ne dev->flags &= ~IFF_ALLMULTI; else { dev->allmulti -= inc; - pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n", - dev->name); + netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n"); return -EOVERFLOW; } } @@@ -9159,14 -9174,11 +9181,11 @@@ int dev_get_port_parent_id(struct net_d }
err = devlink_compat_switch_id_get(dev, ppid); - if (!err || err != -EOPNOTSUPP) + if (!recurse || err != -EOPNOTSUPP) return err;
- if (!recurse) - return -EOPNOTSUPP; - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = dev_get_port_parent_id(lower_dev, ppid, recurse); + err = dev_get_port_parent_id(lower_dev, ppid, true); if (err) break; if (!first.id_len) @@@ -10867,7 -10879,7 +10886,7 @@@ struct net_device *alloc_netdev_mqs(in if (!dev->ethtool_ops) dev->ethtool_ops = &default_ethtool_ops;
- nf_hook_ingress_init(dev); + nf_hook_netdev_init(dev);
return dev;
@@@ -11153,7 -11165,7 +11172,7 @@@ int __dev_change_net_namespace(struct n * we can use it in the destination network namespace. */ err = -EEXIST; - if (__dev_get_by_name(net, dev->name)) { + if (netdev_name_in_use(net, dev->name)) { /* We get here if we can't use the current device name */ if (!pat) goto out; @@@ -11506,7 -11518,7 +11525,7 @@@ static void __net_exit default_device_e
/* Push remaining network devices to init_net */ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); - if (__dev_get_by_name(&init_net, fb_name)) + if (netdev_name_in_use(&init_net, fb_name)) snprintf(fb_name, IFNAMSIZ, "dev%%d"); err = dev_change_net_namespace(dev, &init_net, fb_name); if (err) { diff --combined net/core/net-sysfs.c index b2e49eb7001d,d6e4e0b43beb..9c01c642cf9e --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@@ -175,6 -175,14 +175,14 @@@ static int change_carrier(struct net_de static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { + struct net_device *netdev = to_net_dev(dev); + + /* The check is also done in change_carrier; this helps returning early + * without hitting the trylock/restart in netdev_store. + */ + if (!netdev->netdev_ops->ndo_change_carrier) + return -EOPNOTSUPP; + return netdev_store(dev, attr, buf, len, change_carrier); }
@@@ -196,6 -204,12 +204,12 @@@ static ssize_t speed_show(struct devic struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->ethtool_ops->get_link_ksettings) + return ret; + if (!rtnl_trylock()) return restart_syscall();
@@@ -216,6 -230,12 +230,12 @@@ static ssize_t duplex_show(struct devic struct net_device *netdev = to_net_dev(dev); int ret = -EINVAL;
+ /* The check is also done in __ethtool_get_link_ksettings; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->ethtool_ops->get_link_ksettings) + return ret; + if (!rtnl_trylock()) return restart_syscall();
@@@ -468,6 -488,14 +488,14 @@@ static ssize_t proto_down_store(struct struct device_attribute *attr, const char *buf, size_t len) { + struct net_device *netdev = to_net_dev(dev); + + /* The check is also done in change_proto_down; this helps returning + * early without hitting the trylock/restart in netdev_store. + */ + if (!netdev->netdev_ops->ndo_change_proto_down) + return -EOPNOTSUPP; + return netdev_store(dev, attr, buf, len, change_proto_down); } NETDEVICE_SHOW_RW(proto_down, fmt_dec); @@@ -478,6 -506,12 +506,12 @@@ static ssize_t phys_port_id_show(struc struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The check is also done in dev_get_phys_port_id; this helps returning + * early without hitting the trylock/restart below. + */ + if (!netdev->netdev_ops->ndo_get_phys_port_id) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -500,6 -534,13 +534,13 @@@ static ssize_t phys_port_name_show(stru struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps + * returning early without hitting the trylock/restart below. + */ + if (!netdev->netdev_ops->ndo_get_phys_port_name && + !netdev->netdev_ops->ndo_get_devlink_port) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -522,6 -563,14 +563,14 @@@ static ssize_t phys_switch_id_show(stru struct net_device *netdev = to_net_dev(dev); ssize_t ret = -EINVAL;
+ /* The checks are also done in dev_get_phys_port_name; this helps + * returning early without hitting the trylock/restart below. This works + * because recurse is false when calling dev_get_port_parent_id. + */ + if (!netdev->netdev_ops->ndo_get_port_parent_id && + !netdev->netdev_ops->ndo_get_devlink_port) + return -EOPNOTSUPP; + if (!rtnl_trylock()) return restart_syscall();
@@@ -1226,6 -1275,12 +1275,12 @@@ static ssize_t tx_maxrate_store(struct if (!capable(CAP_NET_ADMIN)) return -EPERM;
+ /* The check is also done later; this helps returning early without + * hitting the trylock/restart below. + */ + if (!dev->netdev_ops->ndo_set_tx_maxrate) + return -EOPNOTSUPP; + err = kstrtou32(buf, 10, &rate); if (err < 0) return err; @@@ -1869,7 -1924,7 +1924,7 @@@ static struct class net_class __ro_afte .get_ownership = net_get_ownership, };
- #ifdef CONFIG_OF_NET + #ifdef CONFIG_OF static int of_dev_node_match(struct device *dev, const void *data) { for (; dev; dev = dev->parent) { @@@ -1973,9 -2028,9 +2028,9 @@@ int netdev_register_kobject(struct net_ int netdev_change_owner(struct net_device *ndev, const struct net *net_old, const struct net *net_new) { + kuid_t old_uid = GLOBAL_ROOT_UID, new_uid = GLOBAL_ROOT_UID; + kgid_t old_gid = GLOBAL_ROOT_GID, new_gid = GLOBAL_ROOT_GID; struct device *dev = &ndev->dev; - kuid_t old_uid, new_uid; - kgid_t old_gid, new_gid; int error;
net_ns_get_ownership(net_old, &old_uid, &old_gid); diff --combined net/core/skbuff.c index fe9358437380,74601bbc56ac..09b8cf8ab234 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -80,7 -80,6 +80,7 @@@ #include <linux/indirect_call_wrapper.h>
#include "datagram.h" +#include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init; static struct kmem_cache *skbuff_fclone_cache __ro_after_init; @@@ -135,34 -134,31 +135,31 @@@ struct napi_alloc_cache static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
- static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, - unsigned int align_mask) + void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); - } - - void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) - { fragsz = SKB_DATA_ALIGN(fragsz);
- return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); + return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); } EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { - struct page_frag_cache *nc; void *data;
fragsz = SKB_DATA_ALIGN(fragsz); if (in_hardirq() || irqs_disabled()) { - nc = this_cpu_ptr(&netdev_alloc_cache); + struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); + data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); } else { + struct napi_alloc_cache *nc; + local_bh_disable(); - data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); + nc = this_cpu_ptr(&napi_alloc_cache); + data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); local_bh_enable(); } return data; @@@ -398,8 -394,9 +395,9 @@@ struct sk_buff *__alloc_skb(unsigned in { struct kmem_cache *cache; struct sk_buff *skb; - u8 *data; + unsigned int osize; bool pfmemalloc; + u8 *data;
cache = (flags & SKB_ALLOC_FCLONE) ? skbuff_fclone_cache : skbuff_head_cache; @@@ -431,7 -428,8 +429,8 @@@ * Put skb_shared_info exactly at the end of allocated zone, * to allow max possible filling before reallocation. */ - size = SKB_WITH_OVERHEAD(ksize(data)); + osize = ksize(data); + size = SKB_WITH_OVERHEAD(osize); prefetchw(data + size);
/* @@@ -440,7 -438,7 +439,7 @@@ * the tail pointer in struct sk_buff! */ memset(skb, 0, offsetof(struct sk_buff, tail)); - __build_skb_around(skb, data, 0); + __build_skb_around(skb, data, osize); skb->pfmemalloc = pfmemalloc;
if (flags & SKB_ALLOC_FCLONE) { @@@ -1805,39 -1803,30 +1804,39 @@@ EXPORT_SYMBOL(skb_realloc_headroom) struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) { int delta = headroom - skb_headroom(skb); + int osize = skb_end_offset(skb); + struct sock *sk = skb->sk;
if (WARN_ONCE(delta <= 0, "%s is expecting an increase in the headroom", __func__)) return skb;
- /* pskb_expand_head() might crash, if skb is shared */ - if (skb_shared(skb)) { + delta = SKB_DATA_ALIGN(delta); + /* pskb_expand_head() might crash, if skb is shared. */ + if (skb_shared(skb) || !is_skb_wmem(skb)) { struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
- if (likely(nskb)) { - if (skb->sk) - skb_set_owner_w(nskb, skb->sk); - consume_skb(skb); - } else { - kfree_skb(skb); - } + if (unlikely(!nskb)) + goto fail; + + if (sk) + skb_set_owner_w(nskb, sk); + consume_skb(skb); skb = nskb; } - if (skb && - pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) { - kfree_skb(skb); - skb = NULL; + if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) + goto fail; + + if (sk && is_skb_wmem(skb)) { + delta = skb_end_offset(skb) - osize; + refcount_add(delta, &sk->sk_wmem_alloc); + skb->truesize += delta; } return skb; + +fail: + kfree_skb(skb); + return NULL; } EXPORT_SYMBOL(skb_expand_head);
diff --combined net/ipv4/tcp.c index f5c336f8b0c8,d0b848ff5c0f..c262cb3e4f35 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@@ -287,8 -287,8 +287,8 @@@ enum TCP_CMSG_TS = 2 };
- struct percpu_counter tcp_orphan_count; - EXPORT_SYMBOL_GPL(tcp_orphan_count); + DEFINE_PER_CPU(unsigned int, tcp_orphan_count); + EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_tcp_mem); @@@ -325,11 -325,6 +325,6 @@@ struct tcp_splice_state unsigned long tcp_memory_pressure __read_mostly; EXPORT_SYMBOL_GPL(tcp_memory_pressure);
- DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); - EXPORT_SYMBOL(tcp_rx_skb_cache_key); - - DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); - void tcp_enter_memory_pressure(struct sock *sk) { unsigned long val; @@@ -486,7 -481,10 +481,7 @@@ static bool tcp_stream_is_readable(stru { if (tcp_epollin_ready(sk, target)) return true; - - if (sk->sk_prot->stream_memory_read) - return sk->sk_prot->stream_memory_read(sk); - return false; + return sk_is_readable(sk); }
/* @@@ -644,7 -642,7 +639,7 @@@ int tcp_ioctl(struct sock *sk, int cmd } EXPORT_SYMBOL(tcp_ioctl);
- static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) + void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; tp->pushed_seq = tp->write_seq; @@@ -655,7 -653,7 +650,7 @@@ static inline bool forced_push(const st return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); }
- static void skb_entail(struct sock *sk, struct sk_buff *skb) + void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); @@@ -858,30 -856,15 +853,15 @@@ ssize_t tcp_splice_read(struct socket * } EXPORT_SYMBOL(tcp_splice_read);
- struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - bool force_schedule) + struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule) { struct sk_buff *skb;
- if (likely(!size)) { - skb = sk->sk_tx_skb_cache; - if (skb) { - skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); - sk->sk_tx_skb_cache = NULL; - pskb_trim(skb, 0); - INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); - skb_shinfo(skb)->tx_flags = 0; - memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb)); - return skb; - } - } - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - if (unlikely(tcp_under_memory_pressure(sk))) sk_mem_reclaim_partial(sk);
- skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); + skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); if (likely(skb)) { bool mem_scheduled;
@@@ -892,7 -875,7 +872,7 @@@ mem_scheduled = sk_wmem_schedule(sk, skb->truesize); } if (likely(mem_scheduled)) { - skb_reserve(skb, sk->sk_prot->max_header); + skb_reserve(skb, MAX_TCP_HEADER); /* * Make sure that we have exactly size bytes * available to the caller, no more, no less. @@@ -952,7 -935,7 +932,7 @@@ int tcp_send_mss(struct sock *sk, int * */ void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) { - if (skb && !skb->len) { + if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { tcp_unlink_write_queue(skb, sk); if (tcp_write_queue_empty(sk)) tcp_chrono_stop(sk, TCP_CHRONO_BUSY); @@@ -960,8 -943,8 +940,8 @@@ } }
- struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, - struct page *page, int offset, size_t *size) + static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, + struct page *page, int offset, size_t *size) { struct sk_buff *skb = tcp_write_queue_tail(sk); struct tcp_sock *tp = tcp_sk(sk); @@@ -974,15 -957,15 +954,15 @@@ new_segment if (!sk_stream_memory_free(sk)) return NULL;
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - tcp_rtx_and_write_queues_empty(sk)); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + tcp_rtx_and_write_queues_empty(sk)); if (!skb) return NULL;
#ifdef CONFIG_TLS_DEVICE skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); #endif - skb_entail(sk, skb); + tcp_skb_entail(sk, skb); copy = size_goal; }
@@@ -1303,15 -1286,15 +1283,15 @@@ new_segment goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, - first_skb); + skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, + first_skb); if (!skb) goto wait_for_space;
process_backlog++; skb->ip_summed = CHECKSUM_PARTIAL;
- skb_entail(sk, skb); + tcp_skb_entail(sk, skb); copy = size_goal;
/* All packets are restored as if they have @@@ -2687,11 -2670,36 +2667,36 @@@ void tcp_shutdown(struct sock *sk, int } EXPORT_SYMBOL(tcp_shutdown);
+ int tcp_orphan_count_sum(void) + { + int i, total = 0; + + for_each_possible_cpu(i) + total += per_cpu(tcp_orphan_count, i); + + return max(total, 0); + } + + static int tcp_orphan_cache; + static struct timer_list tcp_orphan_timer; + #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) + + static void tcp_orphan_update(struct timer_list *unused) + { + WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); + mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); + } + + static bool tcp_too_many_orphans(int shift) + { + return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans; + } + bool tcp_check_oom(struct sock *sk, int shift) { bool too_many_orphans, out_of_socket_memory;
- too_many_orphans = tcp_too_many_orphans(sk, shift); + too_many_orphans = tcp_too_many_orphans(shift); out_of_socket_memory = tcp_out_of_memory(sk);
if (too_many_orphans) @@@ -2800,7 -2808,7 +2805,7 @@@ adjudge_to_death /* remove backlog if any, without releasing ownership. */ __release_sock(sk);
- percpu_counter_inc(sk->sk_prot->orphan_count); + this_cpu_inc(tcp_orphan_count);
/* Have we already been destroyed by a softirq or backlog? */ if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) @@@ -2917,11 -2925,6 +2922,6 @@@ void tcp_write_queue_purge(struct sock sk_wmem_free_skb(sk, skb); } tcp_rtx_queue_purge(sk); - skb = sk->sk_tx_skb_cache; - if (skb) { - __kfree_skb(skb); - sk->sk_tx_skb_cache = NULL; - } INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); sk_mem_reclaim(sk); tcp_clear_all_retrans_hints(tcp_sk(sk)); @@@ -2958,10 -2961,6 +2958,6 @@@ int tcp_disconnect(struct sock *sk, in
tcp_clear_xmit_timers(sk); __skb_queue_purge(&sk->sk_receive_queue); - if (sk->sk_rx_skb_cache) { - __kfree_skb(sk->sk_rx_skb_cache); - sk->sk_rx_skb_cache = NULL; - } WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); tp->urg_data = 0; tcp_write_queue_purge(sk); @@@ -4502,7 -4501,10 +4498,10 @@@ void __init tcp_init(void sizeof_field(struct sk_buff, cb));
percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); - percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL); + + timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); + mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); + inet_hashinfo_init(&tcp_hashinfo); inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", thash_entries, 21, /* one slot per 2 MB*/ diff --combined net/mac80211/mesh.c index 5dcfd53a4ab6,a4212a333d61..15ac08d111ea --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@@ -672,7 -672,7 +672,7 @@@ ieee80211_mesh_update_bss_params(struc u8 *ie, u8 ie_len) { struct ieee80211_supported_band *sband; - const u8 *cap; + const struct element *cap; const struct ieee80211_he_operation *he_oper = NULL;
sband = ieee80211_get_sband(sdata); @@@ -687,10 -687,9 +687,10 @@@
sdata->vif.bss_conf.he_support = true;
- cap = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); - if (cap && cap[1] >= ieee80211_he_oper_size(&cap[3])) - he_oper = (void *)(cap + 3); + cap = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ie_len); + if (cap && cap->datalen >= 1 + sizeof(*he_oper) && + cap->datalen >= 1 + ieee80211_he_oper_size(cap->data + 1)) + he_oper = (void *)(cap->data + 1);
if (he_oper) sdata->vif.bss_conf.he_oper.params = @@@ -1247,7 -1246,7 +1247,7 @@@ ieee80211_mesh_rx_probe_req(struct ieee struct sk_buff *presp; struct beacon_data *bcn; struct ieee80211_mgmt *hdr; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; size_t baselen; u8 *pos;
@@@ -1256,22 -1255,24 +1256,24 @@@ if (baselen > len) return;
- ieee802_11_parse_elems(pos, len - baselen, false, &elems, mgmt->bssid, - NULL); - - if (!elems.mesh_id) + elems = ieee802_11_parse_elems(pos, len - baselen, false, mgmt->bssid, + NULL); + if (!elems) return;
+ if (!elems->mesh_id) + goto free; + /* 802.11-2012 10.1.4.3.2 */ if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) && !is_broadcast_ether_addr(mgmt->da)) || - elems.ssid_len != 0) - return; + elems->ssid_len != 0) + goto free;
- if (elems.mesh_id_len != 0 && - (elems.mesh_id_len != ifmsh->mesh_id_len || - memcmp(elems.mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) - return; + if (elems->mesh_id_len != 0 && + (elems->mesh_id_len != ifmsh->mesh_id_len || + memcmp(elems->mesh_id, ifmsh->mesh_id, ifmsh->mesh_id_len))) + goto free;
rcu_read_lock(); bcn = rcu_dereference(ifmsh->beacon); @@@ -1295,6 -1296,8 +1297,8 @@@ ieee80211_tx_skb(sdata, presp); out: rcu_read_unlock(); + free: + kfree(elems); }
static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, @@@ -1305,7 -1308,7 +1309,7 @@@ { struct ieee80211_local *local = sdata->local; struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; struct ieee80211_channel *channel; size_t baselen; int freq; @@@ -1320,42 -1323,47 +1324,47 @@@ if (baselen > len) return;
- ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, - false, &elems, mgmt->bssid, NULL); + elems = ieee802_11_parse_elems(mgmt->u.probe_resp.variable, + len - baselen, + false, mgmt->bssid, NULL); + if (!elems) + return;
/* ignore non-mesh or secure / unsecure mismatch */ - if ((!elems.mesh_id || !elems.mesh_config) || - (elems.rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || - (!elems.rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) - return; + if ((!elems->mesh_id || !elems->mesh_config) || + (elems->rsn && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) || + (!elems->rsn && sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)) + goto free;
- if (elems.ds_params) - freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); + if (elems->ds_params) + freq = ieee80211_channel_to_frequency(elems->ds_params[0], band); else freq = rx_status->freq;
channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED) - return; + goto free;
- if (mesh_matches_local(sdata, &elems)) { + if (mesh_matches_local(sdata, elems)) { mpl_dbg(sdata, "rssi_threshold=%d,rx_status->signal=%d\n", sdata->u.mesh.mshcfg.rssi_threshold, rx_status->signal); if (!sdata->u.mesh.user_mpm || sdata->u.mesh.mshcfg.rssi_threshold == 0 || sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal) - mesh_neighbour_update(sdata, mgmt->sa, &elems, + mesh_neighbour_update(sdata, mgmt->sa, elems, rx_status);
if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT && !sdata->vif.csa_active) - ieee80211_mesh_process_chnswitch(sdata, &elems, true); + ieee80211_mesh_process_chnswitch(sdata, elems, true); }
if (ifmsh->sync_ops) - ifmsh->sync_ops->rx_bcn_presp(sdata, - stype, mgmt, &elems, rx_status); + ifmsh->sync_ops->rx_bcn_presp(sdata, stype, mgmt, len, + elems->mesh_config, rx_status); + free: + kfree(elems); }
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata) @@@ -1447,7 -1455,7 +1456,7 @@@ static void mesh_rx_csa_frame(struct ie struct ieee80211_mgmt *mgmt, size_t len) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; - struct ieee802_11_elems elems; + struct ieee802_11_elems *elems; u16 pre_value; bool fwd_csa = true; size_t baselen; @@@ -1460,33 -1468,37 +1469,37 @@@ pos = mgmt->u.action.u.chan_switch.variable; baselen = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); - ieee802_11_parse_elems(pos, len - baselen, true, &elems, - mgmt->bssid, NULL); - - if (!mesh_matches_local(sdata, &elems)) + elems = ieee802_11_parse_elems(pos, len - baselen, true, + mgmt->bssid, NULL); + if (!elems) return;
- ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl; + if (!mesh_matches_local(sdata, elems)) + goto free; + + ifmsh->chsw_ttl = elems->mesh_chansw_params_ie->mesh_ttl; if (!--ifmsh->chsw_ttl) fwd_csa = false;
- pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value); + pre_value = le16_to_cpu(elems->mesh_chansw_params_ie->mesh_pre_value); if (ifmsh->pre_value >= pre_value) - return; + goto free;
ifmsh->pre_value = pre_value;
if (!sdata->vif.csa_active && - !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) { + !ieee80211_mesh_process_chnswitch(sdata, elems, false)) { mcsa_dbg(sdata, "Failed to process CSA action frame"); - return; + goto free; }
/* forward or re-broadcast the CSA frame */ if (fwd_csa) { - if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0) + if (mesh_fwd_csa_frame(sdata, mgmt, len, elems) < 0) mcsa_dbg(sdata, "Failed to forward the CSA frame"); } + free: + kfree(elems); }
static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, diff --combined net/netfilter/ipvs/ip_vs_ctl.c index 0ff94c66641f,e62b40bd349e..38ed88b89007 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@@ -48,8 -48,6 +48,8 @@@
#include <net/ip_vs.h>
+MODULE_ALIAS_GENL_FAMILY(IPVS_GENL_NAME); + /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ static DEFINE_MUTEX(__ip_vs_mutex);
@@@ -2019,6 -2017,12 +2019,12 @@@ static struct ctl_table vs_vars[] = .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "run_estimation", + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, #ifdef CONFIG_IP_VS_DEBUG { .procname = "debug_level", @@@ -4092,6 -4096,8 +4098,8 @@@ static int __net_init ip_vs_control_net tbl[idx++].data = &ipvs->sysctl_conn_reuse_mode; tbl[idx++].data = &ipvs->sysctl_schedule_icmp; tbl[idx++].data = &ipvs->sysctl_ignore_tunneled; + ipvs->sysctl_run_estimation = 1; + tbl[idx++].data = &ipvs->sysctl_run_estimation; #ifdef CONFIG_IP_VS_DEBUG /* Global sysctls must be ro in non-init netns */ if (!net_eq(net, &init_net)) diff --combined net/tls/tls_main.c index 9ab81db8a654,278192ee133e..acfba9f1ba72 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@@ -421,6 -421,88 +421,88 @@@ static int do_tls_getsockopt_conf(struc rc = -EFAULT; break; } + case TLS_CIPHER_AES_CCM_128: { + struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 = + container_of(crypto_info, + struct tls12_crypto_info_aes_ccm_128, info); + + if (len != sizeof(*aes_ccm_128)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(aes_ccm_128->iv, + cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE, + TLS_CIPHER_AES_CCM_128_IV_SIZE); + memcpy(aes_ccm_128->rec_seq, cctx->rec_seq, + TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_CHACHA20_POLY1305: { + struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 = + container_of(crypto_info, + struct tls12_crypto_info_chacha20_poly1305, + info); + + if (len != sizeof(*chacha20_poly1305)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(chacha20_poly1305->iv, + cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE, + TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE); + memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq, + TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, chacha20_poly1305, + sizeof(*chacha20_poly1305))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_gcm, info); + + if (len != sizeof(*sm4_gcm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_gcm_info->iv, + cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, + TLS_CIPHER_SM4_GCM_IV_SIZE); + memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) + rc = -EFAULT; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info = + container_of(crypto_info, + struct tls12_crypto_info_sm4_ccm, info); + + if (len != sizeof(*sm4_ccm_info)) { + rc = -EINVAL; + goto out; + } + lock_sock(sk); + memcpy(sm4_ccm_info->iv, + cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, + TLS_CIPHER_SM4_CCM_IV_SIZE); + memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, + TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); + release_sock(sk); + if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) + rc = -EFAULT; + break; + } default: rc = -EINVAL; } @@@ -524,6 -606,12 +606,12 @@@ static int do_tls_setsockopt_conf(struc case TLS_CIPHER_CHACHA20_POLY1305: optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305); break; + case TLS_CIPHER_SM4_GCM: + optsize = sizeof(struct tls12_crypto_info_sm4_gcm); + break; + case TLS_CIPHER_SM4_CCM: + optsize = sizeof(struct tls12_crypto_info_sm4_ccm); + break; default: rc = -EINVAL; goto err_crypto_info; @@@ -681,12 -769,12 +769,12 @@@ static void build_protos(struct proto p
prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; prot[TLS_BASE][TLS_SW].recvmsg = tls_sw_recvmsg; - prot[TLS_BASE][TLS_SW].stream_memory_read = tls_sw_stream_read; + prot[TLS_BASE][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; prot[TLS_BASE][TLS_SW].close = tls_sk_proto_close;
prot[TLS_SW][TLS_SW] = prot[TLS_SW][TLS_BASE]; prot[TLS_SW][TLS_SW].recvmsg = tls_sw_recvmsg; - prot[TLS_SW][TLS_SW].stream_memory_read = tls_sw_stream_read; + prot[TLS_SW][TLS_SW].sock_is_readable = tls_sw_sock_is_readable; prot[TLS_SW][TLS_SW].close = tls_sk_proto_close;
#ifdef CONFIG_TLS_DEVICE diff --combined net/tls/tls_sw.c index d5d09bd817b7,4147bb2e7057..c5ba3fd50e91 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@@ -498,9 -498,15 +498,15 @@@ static int tls_do_encryption(struct soc int rc, iv_offset = 0;
/* For CCM based ciphers, first byte of IV is a constant */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE; iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + rec->iv_data[0] = TLS_SM4_CCM_IV_B0_BYTE; + iv_offset = 1; + break; }
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv, @@@ -1457,10 -1463,16 +1463,16 @@@ static int decrypt_internal(struct soc aad = (u8 *)(sgout + n_sgout); iv = aad + prot->aad_size;
- /* For CCM based ciphers, first byte of nonce+iv is always '2' */ - if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) { - iv[0] = 2; + /* For CCM based ciphers, first byte of nonce+iv is a constant */ + switch (prot->cipher_type) { + case TLS_CIPHER_AES_CCM_128: + iv[0] = TLS_AES_CCM_IV_B0_BYTE; + iv_offset = 1; + break; + case TLS_CIPHER_SM4_CCM: + iv[0] = TLS_SM4_CCM_IV_B0_BYTE; iv_offset = 1; + break; }
/* Prepare IV */ @@@ -2026,7 -2038,7 +2038,7 @@@ splice_read_end return copied ? : err; }
-bool tls_sw_stream_read(const struct sock *sk) +bool tls_sw_sock_is_readable(struct sock *sk) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); @@@ -2424,6 -2436,40 +2436,40 @@@ int tls_set_sw_offload(struct sock *sk cipher_name = "rfc7539(chacha20,poly1305)"; break; } + case TLS_CIPHER_SM4_GCM: { + struct tls12_crypto_info_sm4_gcm *sm4_gcm_info; + + sm4_gcm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_GCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_GCM_IV_SIZE; + iv = sm4_gcm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE; + rec_seq = sm4_gcm_info->rec_seq; + keysize = TLS_CIPHER_SM4_GCM_KEY_SIZE; + key = sm4_gcm_info->key; + salt = sm4_gcm_info->salt; + salt_size = TLS_CIPHER_SM4_GCM_SALT_SIZE; + cipher_name = "gcm(sm4)"; + break; + } + case TLS_CIPHER_SM4_CCM: { + struct tls12_crypto_info_sm4_ccm *sm4_ccm_info; + + sm4_ccm_info = (void *)crypto_info; + nonce_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + tag_size = TLS_CIPHER_SM4_CCM_TAG_SIZE; + iv_size = TLS_CIPHER_SM4_CCM_IV_SIZE; + iv = sm4_ccm_info->iv; + rec_seq_size = TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE; + rec_seq = sm4_ccm_info->rec_seq; + keysize = TLS_CIPHER_SM4_CCM_KEY_SIZE; + key = sm4_ccm_info->key; + salt = sm4_ccm_info->salt; + salt_size = TLS_CIPHER_SM4_CCM_SALT_SIZE; + cipher_name = "ccm(sm4)"; + break; + } default: rc = -EINVAL; goto free_priv; diff --combined net/wireless/core.c index aaba847d79eb,45be124a98f1..eb297e1015e0 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@@ -524,7 -524,6 +524,7 @@@ use_default_name INIT_WORK(&rdev->propagate_cac_done_wk, cfg80211_propagate_cac_done_wk); INIT_WORK(&rdev->mgmt_registrations_update_wk, cfg80211_mgmt_registrations_update_wk); + spin_lock_init(&rdev->mgmt_registrations_lock);
#ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@@ -1081,6 -1080,16 +1081,16 @@@ void cfg80211_dev_free(struct cfg80211_ list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) cfg80211_put_bss(&rdev->wiphy, &scan->pub); mutex_destroy(&rdev->wiphy.mtx); + + /* + * The 'regd' can only be non-NULL if we never finished + * initializing the wiphy and thus never went through the + * unregister path - e.g. in failure scenarios. Thus, it + * cannot have been visible to anyone if non-NULL, so we + * can just free it here. + */ + kfree(rcu_dereference_raw(rdev->wiphy.regd)); + kfree(rdev); }
@@@ -1280,6 -1289,7 +1290,6 @@@ void cfg80211_init_wdev(struct wireless INIT_LIST_HEAD(&wdev->event_list); spin_lock_init(&wdev->event_lock); INIT_LIST_HEAD(&wdev->mgmt_registrations); - spin_lock_init(&wdev->mgmt_registrations_lock); INIT_LIST_HEAD(&wdev->pmsr_list); spin_lock_init(&wdev->pmsr_lock); INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); diff --combined net/wireless/scan.c index adc0d14cfd86,e4f79b23f7f6..22e92be61938 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@@ -383,7 -383,7 +383,7 @@@ static bool is_bss(struct cfg80211_bss const u8 *ssid, size_t ssid_len) { const struct cfg80211_bss_ies *ies; - const u8 *ssidie; + const struct element *ssid_elem;
if (bssid && !ether_addr_equal(a->bssid, bssid)) return false; @@@ -394,12 -394,12 +394,12 @@@ ies = rcu_access_pointer(a->ies); if (!ies) return false; - ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); - if (!ssidie) + ssid_elem = cfg80211_find_elem(WLAN_EID_SSID, ies->data, ies->len); + if (!ssid_elem) return false; - if (ssidie[1] != ssid_len) + if (ssid_elem->datalen != ssid_len) return false; - return memcmp(ssidie + 2, ssid, ssid_len) == 0; + return memcmp(ssid_elem->data, ssid, ssid_len) == 0; }
static int @@@ -418,17 -418,14 +418,17 @@@ cfg80211_add_nontrans_list(struct cfg80 } ssid_len = ssid[1]; ssid = ssid + 2; - rcu_read_unlock();
/* check if nontrans_bss is in the list */ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) { - if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) + if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) { + rcu_read_unlock(); return 0; + } }
+ rcu_read_unlock(); + /* add to the list */ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list); return 0; @@@ -1794,25 -1791,13 +1794,13 @@@ cfg80211_bss_update(struct cfg80211_reg return NULL; }
- /* - * Update RX channel information based on the available frame payload - * information. This is mainly for the 2.4 GHz band where frames can be received - * from neighboring channels and the Beacon frames use the DSSS Parameter Set - * element to indicate the current (transmitting) channel, but this might also - * be needed on other bands if RX frequency does not match with the actual - * operating channel of a BSS. - */ - static struct ieee80211_channel * - cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, - struct ieee80211_channel *channel, - enum nl80211_bss_scan_width scan_width) + int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen, + enum nl80211_band band) { const u8 *tmp; - u32 freq; int channel_number = -1; - struct ieee80211_channel *alt_channel;
- if (channel->band == NL80211_BAND_S1GHZ) { + if (band == NL80211_BAND_S1GHZ) { tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen); if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) { struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2); @@@ -1833,6 -1818,29 +1821,29 @@@ } }
+ return channel_number; + } + EXPORT_SYMBOL(cfg80211_get_ies_channel_number); + + /* + * Update RX channel information based on the available frame payload + * information. This is mainly for the 2.4 GHz band where frames can be received + * from neighboring channels and the Beacon frames use the DSSS Parameter Set + * element to indicate the current (transmitting) channel, but this might also + * be needed on other bands if RX frequency does not match with the actual + * operating channel of a BSS. + */ + static struct ieee80211_channel * + cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen, + struct ieee80211_channel *channel, + enum nl80211_bss_scan_width scan_width) + { + u32 freq; + int channel_number; + struct ieee80211_channel *alt_channel; + + channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band); + if (channel_number < 0) { /* No channel information in frame payload */ return channel; @@@ -2075,12 -2083,12 +2086,12 @@@ static void cfg80211_parse_mbssid_data(
if (!non_tx_data) return; - if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + if (!cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return; if (!wiphy->support_mbssid) return; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return;
new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp); @@@ -2447,10 -2455,10 +2458,10 @@@ cfg80211_inform_bss_frame_data(struct w res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt, len, gfp); if (!res || !wiphy->support_mbssid || - !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) + !cfg80211_find_elem(WLAN_EID_MULTIPLE_BSSID, ie, ielen)) return res; if (wiphy->support_only_he_mbssid && - !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) + !cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen)) return res;
non_tx_data.tx_bss = res; diff --combined net/wireless/util.c index a1a99a574984,2991f711491a..5ff1f8726faf --- a/net/wireless/util.c +++ b/net/wireless/util.c @@@ -80,6 -80,7 +80,7 @@@ u32 ieee80211_channel_to_freq_khz(int c return 0; /* not supported */ switch (band) { case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: if (chan == 14) return MHZ_TO_KHZ(2484); else if (chan < 14) @@@ -209,6 -210,7 +210,7 @@@ static void set_mandatory_flags_band(st WARN_ON(want); break; case NL80211_BAND_2GHZ: + case NL80211_BAND_LC: want = 7; for (i = 0; i < sband->n_bitrates; i++) { switch (sband->bitrates[i].bitrate) { @@@ -1028,14 -1030,14 +1030,14 @@@ int cfg80211_change_iface(struct cfg802 !(rdev->wiphy.interface_modes & (1 << ntype))) return -EOPNOTSUPP;
- /* if it's part of a bridge, reject changing type to station/ibss */ - if (netif_is_bridge_port(dev) && - (ntype == NL80211_IFTYPE_ADHOC || - ntype == NL80211_IFTYPE_STATION || - ntype == NL80211_IFTYPE_P2P_CLIENT)) - return -EBUSY; - if (ntype != otype) { + /* if it's part of a bridge, reject changing type to station/ibss */ + if (netif_is_bridge_port(dev) && + (ntype == NL80211_IFTYPE_ADHOC || + ntype == NL80211_IFTYPE_STATION || + ntype == NL80211_IFTYPE_P2P_CLIENT)) + return -EBUSY; + dev->ieee80211_ptr->use_4addr = false; dev->ieee80211_ptr->mesh_id_up_len = 0; wdev_lock(dev->ieee80211_ptr);