The following commit has been merged in the master branch: commit 9db993cef1a8bafb15f5b6eb5cf983a9e80746df Merge: 18eeb22f3088827f3245c6efbc66f0f290ef0032 89212e160b81e778f829b89743570665810e3b13 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Tue Jun 15 10:58:18 2021 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS index 8f47d37efd95,183cc61e2dc0..bfda616dc973 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -1452,14 -1452,6 +1452,14 @@@ S: Odd Fixe F: drivers/amba/ F: include/linux/amba/bus.h
+ARM PRIMECELL PL35X SMC DRIVER +M: Miquel Raynal <miquel.raynal@bootlin.com@bootlin.com> +M: Naga Sureshkumar Relli nagasure@xilinx.com +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/mtd/arm,pl353-smc.yaml +F: drivers/memory/pl353-smc.c + ARM PRIMECELL CLCD PL110 DRIVER M: Russell King linux@armlinux.org.uk S: Odd Fixes @@@ -1818,7 -1810,6 +1818,7 @@@ F: Documentation/devicetree/bindings/ar F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt +F: arch/arm/boot/dts/gemini* F: arch/arm/mach-gemini/ F: drivers/net/ethernet/cortina/ F: drivers/pinctrl/pinctrl-gemini.c @@@ -2445,12 -2436,9 +2445,12 @@@ F: drivers/*/*/*s3c24 F: drivers/*/*s3c24* F: drivers/*/*s3c64xx* F: drivers/*/*s5pv210* +F: drivers/clocksource/samsung_pwm_timer.c F: drivers/memory/samsung/ +F: drivers/pwm/pwm-samsung.c F: drivers/soc/samsung/ F: drivers/tty/serial/samsung* +F: include/clocksource/samsung_pwm.h F: include/linux/platform_data/*s3c* F: include/linux/serial_s3c.h F: include/linux/soc/samsung/ @@@ -3748,6 -3736,8 +3748,6 @@@ F: drivers/gpio/gpio-bcm-kona. BROADCOM NETXTREME-E ROCE DRIVER M: Selvin Xavier selvin.xavier@broadcom.com M: Devesh Sharma devesh.sharma@broadcom.com -M: Somnath Kotur somnath.kotur@broadcom.com -M: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com M: Naresh Kumar PBS nareshkumar.pbs@broadcom.com L: linux-rdma@vger.kernel.org S: Supported @@@ -3887,7 -3877,6 +3887,7 @@@ L: linux-btrfs@vger.kernel.or S: Maintained W: http://btrfs.wiki.kernel.org/ Q: http://patchwork.kernel.org/project/linux-btrfs/list/ +C: irc://irc.libera.chat/btrfs T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git F: Documentation/filesystems/btrfs.rst F: fs/btrfs/ @@@ -4549,7 -4538,7 +4549,7 @@@ F: include/linux/clk F: include/linux/of_clk.h X: drivers/clk/clkdev.c
-COMMON INTERNET FILE SYSTEM (CIFS) +COMMON INTERNET FILE SYSTEM CLIENT (CIFS) M: Steve French sfrench@samba.org L: linux-cifs@vger.kernel.org L: samba-technical@lists.samba.org (moderated for non-subscribers) @@@ -4559,16 -4548,6 +4559,16 @@@ T: git git://git.samba.org/sfrench/cifs F: Documentation/admin-guide/cifs/ F: fs/cifs/
+COMMON INTERNET FILE SYSTEM SERVER (CIFSD) +M: Namjae Jeon namjae.jeon@samsung.com +M: Sergey Senozhatsky sergey.senozhatsky@gmail.com +M: Steve French sfrench@samba.org +M: Hyunchul Lee hyc.lee@gmail.com +L: linux-cifs@vger.kernel.org +L: linux-cifsd-devel@lists.sourceforge.net +S: Maintained +F: fs/cifsd/ + COMPACTPCI HOTPLUG CORE M: Scott Murray scott@spiteful.org L: linux-pci@vger.kernel.org @@@ -5209,13 -5188,6 +5209,13 @@@ W: https://linuxtv.or T: git git://linuxtv.org/media_tree.git F: drivers/media/platform/sti/delta
+DELTA DPS920AB PSU DRIVER +M: Robert Marko robert.marko@sartura.hr +L: linux-hwmon@vger.kernel.org +S: Maintained +F: Documentation/hwmon/dps920ab.rst +F: drivers/hwmon/pmbus/dps920ab.c + DENALI NAND DRIVER L: linux-mtd@lists.infradead.org S: Orphan @@@ -6839,6 -6811,8 +6839,8 @@@ F: Documentation/devicetree/bindings/ne F: Documentation/devicetree/bindings/net/qca,ar803x.yaml F: Documentation/networking/phy.rst F: drivers/net/mdio/ + F: drivers/net/mdio/acpi_mdio.c + F: drivers/net/mdio/fwnode_mdio.c F: drivers/net/mdio/of_mdio.c F: drivers/net/pcs/ F: drivers/net/phy/ @@@ -6973,7 -6947,6 +6975,7 @@@ F: net/core/failover. FANOTIFY M: Jan Kara jack@suse.cz R: Amir Goldstein amir73il@gmail.com +R: Matthew Bobrowski repnop@google.com L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/notify/fanotify/ @@@ -7105,13 -7078,6 +7107,13 @@@ F: include/linux/firewire. F: include/uapi/linux/firewire*.h F: tools/firewire/
+FIRMWARE FRAMEWORK FOR ARMV8-A +M: Sudeep Holla sudeep.holla@arm.com +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: drivers/firmware/arm_ffa/ +F: include/linux/arm_ffa.h + FIRMWARE LOADER (request_firmware) M: Luis Chamberlain mcgrof@kernel.org L: linux-kernel@vger.kernel.org @@@ -9175,14 -9141,6 +9177,14 @@@ F: drivers/net/ethernet/intel/* F: include/linux/avf/virtchnl.h F: include/linux/net/intel/iidc.h
+INTEL ETHERNET PROTOCOL DRIVER FOR RDMA +M: Mustafa Ismail mustafa.ismail@intel.com +M: Shiraz Saleem shiraz.saleem@intel.com +L: linux-rdma@vger.kernel.org +S: Supported +F: drivers/infiniband/hw/irdma/ +F: include/uapi/rdma/irdma-abi.h + INTEL FRAMEBUFFER DRIVER (excluding 810 and 815) M: Maik Broemme mbroemme@libmpq.org L: linux-fbdev@vger.kernel.org @@@ -9416,6 -9374,14 +9418,6 @@@ L: linux-pm@vger.kernel.or S: Supported F: drivers/cpufreq/intel_pstate.c
-INTEL RDMA RNIC DRIVER -M: Faisal Latif faisal.latif@intel.com -M: Shiraz Saleem shiraz.saleem@intel.com -L: linux-rdma@vger.kernel.org -S: Supported -F: drivers/infiniband/hw/i40iw/ -F: include/uapi/rdma/i40iw-abi.h - INTEL SCU DRIVERS M: Mika Westerberg mika.westerberg@linux.intel.com S: Maintained @@@ -9487,6 -9453,13 +9489,13 @@@ L: Dell.Client.Kernel@dell.co S: Maintained F: drivers/platform/x86/intel-wmi-thunderbolt.c
+ INTEL WWAN IOSM DRIVER + M: M Chetan Kumar m.chetan.kumar@intel.com + M: Intel Corporation linuxwwan@intel.com + L: netdev@vger.kernel.org + S: Maintained + F: drivers/net/wwan/iosm/ + INTEL(R) TRACE HUB M: Alexander Shishkin alexander.shishkin@linux.intel.com S: Supported @@@ -11327,7 -11300,6 +11336,7 @@@ F: include/media/imx.
MEDIA DRIVERS FOR FREESCALE IMX7 M: Rui Miguel Silva rmfrfs@gmail.com +M: Laurent Pinchart laurent.pinchart@ideasonboard.com L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git @@@ -11437,7 -11409,6 +11446,7 @@@ L: linux-renesas-soc@vger.kernel.or S: Supported T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/renesas,csi2.yaml +F: Documentation/devicetree/bindings/media/renesas,isp.yaml F: Documentation/devicetree/bindings/media/renesas,vin.yaml F: drivers/media/platform/rcar-vin/
@@@ -11842,7 -11813,6 +11851,7 @@@ T: git git://git.kernel.org/pub/scm/lin F: Documentation/devicetree/bindings/memory-controllers/ F: drivers/memory/ F: include/dt-bindings/memory/ +F: include/memory/
MEMORY FREQUENCY SCALING DRIVERS FOR NVIDIA TEGRA M: Dmitry Osipenko digetx@gmail.com @@@ -12028,13 -11998,11 +12037,13 @@@ MICROCHIP ISC DRIVE M: Eugen Hristev eugen.hristev@microchip.com L: linux-media@vger.kernel.org S: Supported -F: Documentation/devicetree/bindings/media/atmel-isc.txt +F: Documentation/devicetree/bindings/media/atmel,isc.yaml +F: Documentation/devicetree/bindings/media/microchip,xisc.yaml F: drivers/media/platform/atmel/atmel-isc-base.c F: drivers/media/platform/atmel/atmel-isc-regs.h F: drivers/media/platform/atmel/atmel-isc.h F: drivers/media/platform/atmel/atmel-sama5d2-isc.c +F: drivers/media/platform/atmel/atmel-sama7g5-isc.c F: include/linux/atmel-isc-media.h
MICROCHIP ISI DRIVER @@@ -12428,6 -12396,12 +12437,12 @@@ F: Documentation/userspace-api/media/dr F: drivers/media/pci/meye/ F: include/uapi/linux/meye.h
+ MOTORCOMM PHY DRIVER + M: Peter Geis pgwipeout@gmail.com + L: netdev@vger.kernel.org + S: Maintained + F: drivers/net/phy/motorcomm.c + MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD S: Orphan F: Documentation/driver-api/serial/moxa-smartio.rst @@@ -12642,7 -12616,7 +12657,7 @@@ S: Orpha F: drivers/net/ethernet/natsemi/natsemi.c
NCR 5380 SCSI DRIVERS -M: Finn Thain fthain@telegraphics.com.au +M: Finn Thain fthain@linux-m68k.org M: Michael Schmitz schmitzmic@gmail.com L: linux-scsi@vger.kernel.org S: Maintained @@@ -13114,7 -13088,7 +13129,7 @@@ F: Documentation/filesystems/ntfs.rs F: fs/ntfs/
NUBUS SUBSYSTEM -M: Finn Thain fthain@telegraphics.com.au +M: Finn Thain fthain@linux-m68k.org L: linux-m68k@lists.linux-m68k.org S: Maintained F: arch/*/include/asm/nubus.h @@@ -13236,6 -13210,7 +13251,7 @@@ M: Vladimir Oltean <olteanv@gmail.com L: linux-kernel@vger.kernel.org S: Maintained F: drivers/net/dsa/sja1105 + F: drivers/net/pcs/pcs-xpcs-nxp.c
NXP TDA998X DRM DRIVER M: Russell King linux@armlinux.org.uk @@@ -13395,6 -13370,12 +13411,6 @@@ L: linux-omap@vger.kernel.or S: Maintained F: arch/arm/mach-omap2/omap_hwmod*data*
-OMAP HWMOD DATA FOR OMAP4-BASED DEVICES -M: Beno��t Cousson bcousson@baylibre.com -L: linux-omap@vger.kernel.org -S: Maintained -F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c - OMAP HWMOD SUPPORT M: Beno��t Cousson bcousson@baylibre.com M: Paul Walmsley paul@pwsan.com @@@ -13407,7 -13388,7 +13423,7 @@@ M: Vignesh R <vigneshr@ti.com L: linux-omap@vger.kernel.org L: linux-i2c@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/i2c/i2c-omap.txt +F: Documentation/devicetree/bindings/i2c/ti,omap4-i2c.yaml F: drivers/i2c/busses/i2c-omap.c
OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS) @@@ -14153,7 -14134,6 +14169,7 @@@ F: drivers/pci/controller/pci-v3-semi. PCI ENDPOINT SUBSYSTEM M: Kishon Vijay Abraham I kishon@ti.com M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com +R: Krzysztof Wilczy��ski kw@linux.com L: linux-pci@vger.kernel.org S: Supported F: Documentation/PCI/endpoint/* @@@ -14202,7 -14182,6 +14218,7 @@@ F: drivers/pci/controller/pci-xgene-msi PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com R: Rob Herring robh@kernel.org +R: Krzysztof Wilczy��ski kw@linux.com L: linux-pci@vger.kernel.org S: Supported Q: http://patchwork.ozlabs.org/project/linux-pci/list/ @@@ -14362,12 -14341,10 +14378,12 @@@ PER-CPU MEMORY ALLOCATO M: Dennis Zhou dennis@kernel.org M: Tejun Heo tj@kernel.org M: Christoph Lameter cl@linux.com +L: linux-mm@kvack.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git F: arch/*/include/asm/percpu.h F: include/linux/percpu*.h +F: lib/percpu*.c F: mm/percpu*.c
PER-TASK DELAY ACCOUNTING @@@ -15620,9 -15597,8 +15636,9 @@@ F: drivers/clk/renesas
RENESAS EMEV2 I2C DRIVER M: Wolfram Sang wsa+renesas@sang-engineering.com +L: linux-renesas-soc@vger.kernel.org S: Supported -F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt +F: Documentation/devicetree/bindings/i2c/renesas,iic-emev2.yaml F: drivers/i2c/busses/i2c-emev2.c
RENESAS ETHERNET DRIVERS @@@ -15642,9 -15618,8 +15658,9 @@@ F: drivers/iio/adc/rcar-gyroadc.
RENESAS R-CAR I2C DRIVERS M: Wolfram Sang wsa+renesas@sang-engineering.com +L: linux-renesas-soc@vger.kernel.org S: Supported -F: Documentation/devicetree/bindings/i2c/renesas,i2c.txt +F: Documentation/devicetree/bindings/i2c/renesas,rcar-i2c.yaml F: Documentation/devicetree/bindings/i2c/renesas,iic.txt F: drivers/i2c/busses/i2c-rcar.c F: drivers/i2c/busses/i2c-sh_mobile.c @@@ -15660,9 -15635,8 +15676,9 @@@ F: drivers/thermal/rcar_thermal.
RENESAS RIIC DRIVER M: Chris Brandt chris.brandt@renesas.com +L: linux-renesas-soc@vger.kernel.org S: Supported -F: Documentation/devicetree/bindings/i2c/renesas,riic.txt +F: Documentation/devicetree/bindings/i2c/renesas,riic.yaml F: drivers/i2c/busses/i2c-riic.c
RENESAS USB PHY DRIVER @@@ -17042,13 -17016,6 +17058,13 @@@ S: Maintaine F: drivers/ssb/ F: include/linux/ssb/
+SONY IMX208 SENSOR DRIVER +M: Sakari Ailus sakari.ailus@linux.intel.com +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: drivers/media/i2c/imx208.c + SONY IMX214 SENSOR DRIVER M: Ricardo Ribalda ribalda@kernel.org L: linux-media@vger.kernel.org @@@ -17717,6 -17684,7 +17733,7 @@@ M: Jose Abreu <Jose.Abreu@synopsys.com L: netdev@vger.kernel.org S: Supported F: drivers/net/pcs/pcs-xpcs.c + F: drivers/net/pcs/pcs-xpcs.h F: include/linux/pcs/pcs-xpcs.h
SYNOPSYS DESIGNWARE I2C DRIVER @@@ -17726,6 -17694,7 +17743,6 @@@ R: Mika Westerberg <mika.westerberg@lin L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-designware-* -F: include/linux/platform_data/i2c-designware.h
SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER M: Jaehoon Chung jh80.chung@samsung.com @@@ -18917,13 -18886,6 +18934,13 @@@ S: Maintaine F: drivers/usb/host/isp116x* F: include/linux/usb/isp116x.h
+USB ISP1760 DRIVER +M: Rui Miguel Silva rui.silva@linaro.org +L: linux-usb@vger.kernel.org +S: Maintained +F: drivers/usb/isp1760/* +F: Documentation/devicetree/bindings/usb/nxp,isp1760.yaml + USB LAN78XX ETHERNET DRIVER M: Woojung Huh woojung.huh@microchip.com M: UNGLinuxDriver@microchip.com @@@ -19638,7 -19600,6 +19655,7 @@@ S: Maintaine T: git git://git.kernel.org/pub/scm/linux/kernel/git/pmladek/printk.git F: Documentation/core-api/printk-formats.rst F: lib/test_printf.c +F: lib/test_scanf.c F: lib/vsprintf.c
VT1211 HARDWARE MONITOR DRIVER @@@ -20069,7 -20030,6 +20086,7 @@@ F: arch/x86/xen/*swiotlb F: drivers/xen/*swiotlb*
XFS FILESYSTEM +C: irc://irc.oftc.net/xfs M: Darrick J. Wong djwong@kernel.org M: linux-xfs@vger.kernel.org L: linux-xfs@vger.kernel.org diff --combined arch/arm64/boot/dts/rockchip/rk3308.dtsi index 4fca2c4a5322,b815ce73e5c6..a185901aba9a --- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi @@@ -164,7 -164,7 +164,7 @@@
grf: grf@ff000000 { compatible = "rockchip,rk3308-grf", "syscon", "simple-mfd"; - reg = <0x0 0xff000000 0x0 0x10000>; + reg = <0x0 0xff000000 0x0 0x08000>;
reboot-mode { compatible = "syscon-reboot-mode"; @@@ -177,42 -177,6 +177,42 @@@ }; };
+ usb2phy_grf: syscon@ff008000 { + compatible = "rockchip,rk3308-usb2phy-grf", "syscon", "simple-mfd"; + reg = <0x0 0xff008000 0x0 0x4000>; + #address-cells = <1>; + #size-cells = <1>; + + u2phy: usb2phy@100 { + compatible = "rockchip,rk3308-usb2phy"; + reg = <0x100 0x10>; + assigned-clocks = <&cru USB480M>; + assigned-clock-parents = <&u2phy>; + clocks = <&cru SCLK_USBPHY_REF>; + clock-names = "phyclk"; + clock-output-names = "usb480m_phy"; + #clock-cells = <0>; + status = "disabled"; + + u2phy_otg: otg-port { + interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "otg-bvalid", "otg-id", + "linestate"; + #phy-cells = <0>; + status = "disabled"; + }; + + u2phy_host: host-port { + interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "linestate"; + #phy-cells = <0>; + status = "disabled"; + }; + }; + }; + detect_grf: syscon@ff00b000 { compatible = "rockchip,rk3308-detect-grf", "syscon", "simple-mfd"; reg = <0x0 0xff00b000 0x0 0x1000>; @@@ -615,42 -579,6 +615,42 @@@ status = "disabled"; };
+ usb20_otg: usb@ff400000 { + compatible = "rockchip,rk3308-usb", "rockchip,rk3066-usb", + "snps,dwc2"; + reg = <0x0 0xff400000 0x0 0x40000>; + interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_OTG>; + clock-names = "otg"; + dr_mode = "otg"; + g-np-tx-fifo-size = <16>; + g-rx-fifo-size = <280>; + g-tx-fifo-size = <256 128 128 64 32 16>; + phys = <&u2phy_otg>; + phy-names = "usb2-phy"; + status = "disabled"; + }; + + usb_host_ehci: usb@ff440000 { + compatible = "generic-ehci"; + reg = <0x0 0xff440000 0x0 0x10000>; + interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_HOST>, <&cru HCLK_HOST_ARB>, <&u2phy>; + phys = <&u2phy_host>; + phy-names = "usb"; + status = "disabled"; + }; + + usb_host_ohci: usb@ff450000 { + compatible = "generic-ohci"; + reg = <0x0 0xff450000 0x0 0x10000>; + interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru HCLK_HOST>, <&cru HCLK_HOST_ARB>, <&u2phy>; + phys = <&u2phy_host>; + phy-names = "usb"; + status = "disabled"; + }; + sdmmc: mmc@ff480000 { compatible = "rockchip,rk3308-dw-mshc", "rockchip,rk3288-dw-mshc"; reg = <0x0 0xff480000 0x0 0x4000>; @@@ -709,6 -637,28 +709,28 @@@ status = "disabled"; };
+ gmac: ethernet@ff4e0000 { + compatible = "rockchip,rk3308-gmac"; + reg = <0x0 0xff4e0000 0x0 0x10000>; + interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq"; + clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX_TX>, + <&cru SCLK_MAC_RX_TX>, <&cru SCLK_MAC_REF>, + <&cru SCLK_MAC>, <&cru ACLK_MAC>, + <&cru PCLK_MAC>, <&cru SCLK_MAC_RMII>; + clock-names = "stmmaceth", "mac_clk_rx", + "mac_clk_tx", "clk_mac_ref", + "clk_mac_refout", "aclk_mac", + "pclk_mac", "clk_mac_speed"; + phy-mode = "rmii"; + pinctrl-names = "default"; + pinctrl-0 = <&rmii_pins &mac_refclk_12ma>; + resets = <&cru SRST_MAC_A>; + reset-names = "stmmaceth"; + rockchip,grf = <&grf>; + status = "disabled"; + }; + cru: clock-controller@ff500000 { compatible = "rockchip,rk3308-cru"; reg = <0x0 0xff500000 0x0 0x1000>; diff --combined drivers/infiniband/hw/mlx5/fs.c index 18ee2f293825,941adf5cf3d0..5fbc0a8454b9 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@@ -1194,8 -1194,9 +1194,8 @@@ static struct ib_flow *mlx5_ib_create_f goto free_ucmd; }
- if (flow_attr->port > dev->num_ports || - (flow_attr->flags & - ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS))) { + if (flow_attr->flags & + ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) { err = -EINVAL; goto free_ucmd; } @@@ -2133,12 -2134,6 +2133,12 @@@ static int UVERBS_HANDLER(MLX5_IB_METHO if (err) goto end;
+ if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB && + mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) { + err = -EINVAL; + goto end; + } + uobj->object = obj; obj->mdev = dev->mdev; atomic_set(&obj->usecnt, 0); @@@ -2285,6 -2280,7 +2285,7 @@@ static int mlx5_ib_flow_action_create_p u8 ft_type, u8 dv_prt, void *in, size_t len) { + struct mlx5_pkt_reformat_params reformat_params; enum mlx5_flow_namespace_type namespace; u8 prm_prt; int ret; @@@ -2297,9 -2293,13 +2298,13 @@@ if (ret) return ret;
+ memset(&reformat_params, 0, sizeof(reformat_params)); + reformat_params.type = prm_prt; + reformat_params.size = len; + reformat_params.data = in; maction->flow_action_raw.pkt_reformat = - mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len, - in, namespace); + mlx5_packet_reformat_alloc(dev->mdev, &reformat_params, + namespace); if (IS_ERR(maction->flow_action_raw.pkt_reformat)) { ret = PTR_ERR(maction->flow_action_raw.pkt_reformat); return ret; diff --combined drivers/net/ethernet/amazon/ena/ena_netdev.c index 52571486705e,3bb0e66b2c7e..edaf37823c50 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@@ -35,9 -35,6 +35,6 @@@ MODULE_LICENSE("GPL")
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) - static int debug = -1; - module_param(debug, int, 0); - MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static struct ena_aenq_handlers aenq_handlers;
@@@ -89,6 -86,12 +86,12 @@@ static void ena_increase_stat(u64 *stat u64_stats_update_end(syncp); }
+ static void ena_ring_tx_doorbell(struct ena_ring *tx_ring) + { + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp); + } + static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct ena_adapter *adapter = netdev_priv(dev); @@@ -147,7 -150,7 +150,7 @@@ static int ena_xmit_common(struct net_d netif_dbg(adapter, tx_queued, dev, "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", ring->qid); - ena_com_write_sq_doorbell(ring->ena_com_io_sq); + ena_ring_tx_doorbell(ring); }
/* prepare the packet's descriptors to dma engine */ @@@ -197,7 -200,6 +200,6 @@@ static int ena_xdp_io_poll(struct napi_ int ret;
xdp_ring = ena_napi->xdp_ring; - xdp_ring->first_interrupt = ena_napi->first_interrupt;
xdp_budget = budget;
@@@ -229,6 -231,7 +231,7 @@@ xdp_ring->tx_stats.napi_comp += napi_comp_call; xdp_ring->tx_stats.tx_poll++; u64_stats_update_end(&xdp_ring->syncp); + xdp_ring->tx_stats.last_napi_jiffies = jiffies;
return ret; } @@@ -236,48 -239,36 +239,48 @@@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, struct ena_tx_buffer *tx_info, struct xdp_frame *xdpf, - void **push_hdr, - u32 *push_len) + struct ena_com_tx_ctx *ena_tx_ctx) { struct ena_adapter *adapter = xdp_ring->adapter; struct ena_com_buf *ena_buf; - dma_addr_t dma = 0; + int push_len = 0; + dma_addr_t dma; + void *data; u32 size;
tx_info->xdpf = xdpf; + data = tx_info->xdpf->data; size = tx_info->xdpf->len; - ena_buf = tx_info->bufs;
- /* llq push buffer */ - *push_len = min_t(u32, size, xdp_ring->tx_max_header_size); - *push_hdr = tx_info->xdpf->data; + if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + /* Designate part of the packet for LLQ */ + push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
- if (size - *push_len > 0) { + ena_tx_ctx->push_header = data; + + size -= push_len; + data += push_len; + } + + ena_tx_ctx->header_len = push_len; + + if (size > 0) { dma = dma_map_single(xdp_ring->dev, - *push_hdr + *push_len, - size - *push_len, + data, + size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(xdp_ring->dev, dma))) goto error_report_dma_error;
- tx_info->map_linear_data = 1; - tx_info->num_of_bufs = 1; - } + tx_info->map_linear_data = 0;
- ena_buf->paddr = dma; - ena_buf->len = size; + ena_buf = tx_info->bufs; + ena_buf->paddr = dma; + ena_buf->len = size; + + ena_tx_ctx->ena_bufs = ena_buf; + ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1; + }
return 0;
@@@ -286,6 -277,10 +289,6 @@@ error_report_dma_error &xdp_ring->syncp); netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
- xdp_return_frame_rx_napi(tx_info->xdpf); - tx_info->xdpf = NULL; - tx_info->num_of_bufs = 0; - return -EINVAL; }
@@@ -297,6 -292,8 +300,6 @@@ static int ena_xdp_xmit_frame(struct en struct ena_com_tx_ctx ena_tx_ctx = {}; struct ena_tx_buffer *tx_info; u16 next_to_use, req_id; - void *push_hdr; - u32 push_len; int rc;
next_to_use = xdp_ring->next_to_use; @@@ -304,11 -301,15 +307,11 @@@ tx_info = &xdp_ring->tx_buffer_info[req_id]; tx_info->num_of_bufs = 0;
- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len); + rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx); if (unlikely(rc)) return rc;
- ena_tx_ctx.ena_bufs = tx_info->bufs; - ena_tx_ctx.push_header = push_hdr; - ena_tx_ctx.num_bufs = tx_info->num_of_bufs; ena_tx_ctx.req_id = req_id; - ena_tx_ctx.header_len = push_len;
rc = ena_xmit_common(dev, xdp_ring, @@@ -318,14 -319,12 +321,12 @@@ xdpf->len); if (rc) goto error_unmap_dma; - /* trigger the dma engine. ena_com_write_sq_doorbell() - * has a mb + + /* trigger the dma engine. ena_ring_tx_doorbell() + * calls a memory barrier inside it. */ - if (flags & XDP_XMIT_FLUSH) { - ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); - ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, - &xdp_ring->syncp); - } + if (flags & XDP_XMIT_FLUSH) + ena_ring_tx_doorbell(xdp_ring);
return rc;
@@@ -366,11 -365,8 +367,8 @@@ static int ena_xdp_xmit(struct net_devi }
/* Ring doorbell to make device aware of the packets */ - if (flags & XDP_XMIT_FLUSH) { - ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); - ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, - &xdp_ring->syncp); - } + if (flags & XDP_XMIT_FLUSH) + ena_ring_tx_doorbell(xdp_ring);
spin_unlock(&xdp_ring->xdp_tx_lock);
@@@ -385,7 -381,6 +383,6 @@@ static int ena_xdp_execute(struct ena_r u32 verdict = XDP_PASS; struct xdp_frame *xdpf; u64 *xdp_stat; - int qid;
rcu_read_lock(); xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); @@@ -406,8 -401,7 +403,7 @@@ }
/* Find xmit queue */ - qid = rx_ring->qid + rx_ring->adapter->num_io_queues; - xdp_ring = &rx_ring->adapter->tx_ring[qid]; + xdp_ring = rx_ring->xdp_ring;
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ spin_lock(&xdp_ring->xdp_tx_lock); @@@ -534,7 -528,7 +530,7 @@@ static void ena_xdp_exchange_program_rx rx_ring->rx_headroom = XDP_PACKET_HEADROOM; } else { ena_xdp_unregister_rxq_info(rx_ring); - rx_ring->rx_headroom = 0; + rx_ring->rx_headroom = NET_SKB_PAD; } } } @@@ -683,7 -677,6 +679,6 @@@ static void ena_init_io_rings_common(st ring->ena_dev = adapter->ena_dev; ring->per_napi_packets = 0; ring->cpu = 0; - ring->first_interrupt = false; ring->no_interrupt_event_cnt = 0; u64_stats_init(&ring->syncp); } @@@ -726,7 -719,9 +721,9 @@@ static void ena_init_io_rings(struct en rxr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); rxr->empty_rx_queue = 0; + rxr->rx_headroom = NET_SKB_PAD; adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; } } } @@@ -980,47 -975,66 +977,66 @@@ static void ena_free_all_io_rx_resource ena_free_rx_resources(adapter, i); }
- static int ena_alloc_rx_page(struct ena_ring *rx_ring, - struct ena_rx_buffer *rx_info, gfp_t gfp) + static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, + dma_addr_t *dma) { - int headroom = rx_ring->rx_headroom; - struct ena_com_buf *ena_buf; struct page *page; - dma_addr_t dma;
- /* restore page offset value in case it has been changed by device */ - rx_info->page_offset = headroom; - - /* if previous allocated page is not used */ - if (unlikely(rx_info->page)) - return 0; - - page = alloc_page(gfp); - if (unlikely(!page)) { + /* This would allocate the page on the same NUMA node the executing code + * is running on. + */ + page = dev_alloc_page(); + if (!page) { ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp); - return -ENOMEM; + return ERR_PTR(-ENOSPC); }
/* To enable NIC-side port-mirroring, AKA SPAN port, * we make the buffer readable from the nic as well */ - dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { + *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, &rx_ring->syncp); - __free_page(page); - return -EIO; + return ERR_PTR(-EIO); } + + return page; + } + + static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, + struct ena_rx_buffer *rx_info) + { + int headroom = rx_ring->rx_headroom; + struct ena_com_buf *ena_buf; + struct page *page; + dma_addr_t dma; + int tailroom; + + /* restore page offset value in case it has been changed by device */ + rx_info->page_offset = headroom; + + /* if previous allocated page is not used */ + if (unlikely(rx_info->page)) + return 0; + + /* We handle DMA here */ + page = ena_alloc_map_page(rx_ring, &dma); + if (unlikely(IS_ERR(page))) + return PTR_ERR(page); + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "Allocate page %p, rx_info %p\n", page, rx_info);
+ tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + rx_info->page = page; ena_buf = &rx_info->ena_buf; ena_buf->paddr = dma + headroom; - ena_buf->len = ENA_PAGE_SIZE - headroom; + ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
return 0; } @@@ -1067,8 -1081,7 +1083,7 @@@ static int ena_refill_rx_bufs(struct en
rx_info = &rx_ring->rx_buffer_info[req_id];
- rc = ena_alloc_rx_page(rx_ring, rx_info, - GFP_ATOMIC | __GFP_COMP); + rc = ena_alloc_rx_buffer(rx_ring, rx_info); if (unlikely(rc < 0)) { netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, "Failed to allocate buffer for rx queue %d\n", @@@ -1386,21 -1399,23 +1401,23 @@@ static int ena_clean_tx_irq(struct ena_ return tx_pkts; }
- static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags) + static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag) { struct sk_buff *skb;
- if (frags) - skb = napi_get_frags(rx_ring->napi); - else + if (!first_frag) skb = netdev_alloc_skb_ip_align(rx_ring->netdev, rx_ring->rx_copybreak); + else + skb = build_skb(first_frag, ENA_PAGE_SIZE);
if (unlikely(!skb)) { ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, &rx_ring->syncp); + netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, - "Failed to allocate skb. frags: %d\n", frags); + "Failed to allocate skb. first_frag %s\n", + first_frag ? "provided" : "not provided"); return NULL; }
@@@ -1412,10 -1427,12 +1429,12 @@@ static struct sk_buff *ena_rx_skb(struc u32 descs, u16 *next_to_clean) { - struct sk_buff *skb; struct ena_rx_buffer *rx_info; u16 len, req_id, buf = 0; - void *va; + struct sk_buff *skb; + void *page_addr; + u32 page_offset; + void *data_addr;
len = ena_bufs[buf].len; req_id = ena_bufs[buf].req_id; @@@ -1433,12 -1450,14 +1452,14 @@@ rx_info, rx_info->page);
/* save virt address of first buffer */ - va = page_address(rx_info->page) + rx_info->page_offset; + page_addr = page_address(rx_info->page); + page_offset = rx_info->page_offset; + data_addr = page_addr + page_offset;
- prefetch(va); + prefetch(data_addr);
if (len <= rx_ring->rx_copybreak) { - skb = ena_alloc_skb(rx_ring, false); + skb = ena_alloc_skb(rx_ring, NULL); if (unlikely(!skb)) return NULL;
@@@ -1451,7 -1470,7 +1472,7 @@@ dma_unmap_addr(&rx_info->ena_buf, paddr), len, DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, va, len); + skb_copy_to_linear_data(skb, data_addr, len); dma_sync_single_for_device(rx_ring->dev, dma_unmap_addr(&rx_info->ena_buf, paddr), len, @@@ -1465,16 -1484,18 +1486,18 @@@ return skb; }
- skb = ena_alloc_skb(rx_ring, true); + ena_unmap_rx_buff(rx_ring, rx_info); + + skb = ena_alloc_skb(rx_ring, page_addr); if (unlikely(!skb)) return NULL;
- do { - ena_unmap_rx_buff(rx_ring, rx_info); - - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, - rx_info->page_offset, len, ENA_PAGE_SIZE); + /* Populate skb's linear part */ + skb_reserve(skb, page_offset); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ do { netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "RX skb updated. len %d. data_len %d\n", skb->len, skb->data_len); @@@ -1493,6 -1514,12 +1516,12 @@@ req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id]; + + ena_unmap_rx_buff(rx_ring, rx_info); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, + rx_info->page_offset, len, ENA_PAGE_SIZE); + } while (1);
return skb; @@@ -1705,14 -1732,12 +1734,12 @@@ static int ena_clean_rx_irq(struct ena_
skb_record_rx_queue(skb, rx_ring->qid);
- if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) { - total_len += rx_ring->ena_bufs[0].len; + if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) rx_copybreak_pkt++; - napi_gro_receive(napi, skb); - } else { - total_len += skb->len; - napi_gro_frags(napi); - } + + total_len += skb->len; + + napi_gro_receive(napi, skb);
res_budget--; } while (likely(res_budget)); @@@ -1924,9 -1949,6 +1951,6 @@@ static int ena_io_poll(struct napi_stru tx_ring = ena_napi->tx_ring; rx_ring = ena_napi->rx_ring;
- tx_ring->first_interrupt = ena_napi->first_interrupt; - rx_ring->first_interrupt = ena_napi->first_interrupt; - tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || @@@ -1981,6 -2003,8 +2005,8 @@@ tx_ring->tx_stats.tx_poll++; u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->tx_stats.last_napi_jiffies = jiffies; + return ret; }
@@@ -2005,7 -2029,8 +2031,8 @@@ static irqreturn_t ena_intr_msix_io(in { struct ena_napi *ena_napi = data;
- ena_napi->first_interrupt = true; + /* Used to check HW health */ + WRITE_ONCE(ena_napi->first_interrupt, true);
WRITE_ONCE(ena_napi->interrupts_masked, true); smp_wmb(); /* write interrupts_masked before calling napi */ @@@ -3091,14 -3116,11 +3118,11 @@@ static netdev_tx_t ena_start_xmit(struc } }
- if (netif_xmit_stopped(txq) || !netdev_xmit_more()) { - /* trigger the dma engine. ena_com_write_sq_doorbell() - * has a mb + if (netif_xmit_stopped(txq) || !netdev_xmit_more()) + /* trigger the dma engine. ena_ring_tx_doorbell() + * calls a memory barrier inside it. */ - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); - ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, - &tx_ring->syncp); - } + ena_ring_tx_doorbell(tx_ring);
return NETDEV_TX_OK;
@@@ -3348,7 -3370,7 +3372,7 @@@ static int ena_set_queues_placement_pol
llq_feature_mask = 1 << ENA_ADMIN_LLQ; if (!(ena_dev->supported_features & llq_feature_mask)) { - dev_err(&pdev->dev, + dev_warn(&pdev->dev, "LLQ is not supported Fallback to host mode policy.\n"); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; return 0; @@@ -3659,7 -3681,9 +3683,9 @@@ static void ena_fw_reset_device(struct static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, struct ena_ring *rx_ring) { - if (likely(rx_ring->first_interrupt)) + struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); + + if (likely(READ_ONCE(ena_napi->first_interrupt))) return 0;
if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) @@@ -3683,6 -3707,10 +3709,10 @@@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct ena_ring *tx_ring) { + struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); + unsigned int time_since_last_napi; + unsigned int missing_tx_comp_to; + bool is_tx_comp_time_expired; struct ena_tx_buffer *tx_buf; unsigned long last_jiffies; u32 missed_tx = 0; @@@ -3696,8 -3724,10 +3726,10 @@@ /* no pending Tx at this location */ continue;
- if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies + - 2 * adapter->missing_tx_completion_to))) { + is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies + + 2 * adapter->missing_tx_completion_to); + + if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) { /* If after graceful period interrupt is still not * received, we schedule a reset */ @@@ -3710,12 -3740,17 +3742,17 @@@ return -EIO; }
- if (unlikely(time_is_before_jiffies(last_jiffies + - adapter->missing_tx_completion_to))) { - if (!tx_buf->print_once) + is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies + + adapter->missing_tx_completion_to); + + if (unlikely(is_tx_comp_time_expired)) { + if (!tx_buf->print_once) { + time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); + missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to); netif_notice(adapter, tx_err, adapter->netdev, - "Found a Tx that wasn't completed on time, qid %d, index %d.\n", - tx_ring->qid, i); + "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n", + tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to); + }
tx_buf->print_once = 1; missed_tx++; @@@ -4246,7 -4281,7 +4283,7 @@@ static int ena_probe(struct pci_dev *pd adapter->ena_dev = ena_dev; adapter->netdev = netdev; adapter->pdev = pdev; - adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->msg_enable = DEFAULT_MSG_ENABLE;
ena_dev->net_device = netdev;
diff --combined drivers/net/ethernet/atheros/alx/main.c index 7748b276e5fd,45e380f3b065..11ef1fbe7aee --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@@ -1,5 -1,5 +1,5 @@@ /* - * Copyright (c) 2013 Johannes Berg johannes@sipsolutions.net + * Copyright (c) 2013, 2021 Johannes Berg johannes@sipsolutions.net * * This file is free software: you may copy, redistribute and/or modify it * under the terms of the GNU General Public License as published by the @@@ -1091,8 -1091,9 +1091,9 @@@ static int alx_init_sw(struct alx_priv ALX_MAC_CTRL_RXFC_EN | ALX_MAC_CTRL_TXFC_EN | 7 << ALX_MAC_CTRL_PRMBLEN_SHIFT; + mutex_init(&alx->mtx);
- return err; + return 0; }
@@@ -1122,6 -1123,8 +1123,8 @@@ static void alx_halt(struct alx_priv *a { struct alx_hw *hw = &alx->hw;
+ lockdep_assert_held(&alx->mtx); + alx_netif_stop(alx); hw->link_speed = SPEED_UNKNOWN; hw->duplex = DUPLEX_UNKNOWN; @@@ -1147,6 -1150,8 +1150,8 @@@ static void alx_configure(struct alx_pr
static void alx_activate(struct alx_priv *alx) { + lockdep_assert_held(&alx->mtx); + /* hardware setting lost, restore it */ alx_reinit_rings(alx); alx_configure(alx); @@@ -1161,7 -1166,7 +1166,7 @@@
static void alx_reinit(struct alx_priv *alx) { - ASSERT_RTNL(); + lockdep_assert_held(&alx->mtx);
alx_halt(alx); alx_activate(alx); @@@ -1249,6 -1254,8 +1254,8 @@@ out_disable_adv_intr
static void __alx_stop(struct alx_priv *alx) { + lockdep_assert_held(&alx->mtx); + alx_free_irq(alx);
cancel_work_sync(&alx->link_check_wk); @@@ -1284,6 -1291,8 +1291,8 @@@ static void alx_check_link(struct alx_p int old_speed; int err;
+ lockdep_assert_held(&alx->mtx); + /* clear PHY internal interrupt status, otherwise the main * interrupt status will be asserted forever */ @@@ -1338,12 -1347,24 +1347,24 @@@ reset
static int alx_open(struct net_device *netdev) { - return __alx_open(netdev_priv(netdev), false); + struct alx_priv *alx = netdev_priv(netdev); + int ret; + + mutex_lock(&alx->mtx); + ret = __alx_open(alx, false); + mutex_unlock(&alx->mtx); + + return ret; }
static int alx_stop(struct net_device *netdev) { - __alx_stop(netdev_priv(netdev)); + struct alx_priv *alx = netdev_priv(netdev); + + mutex_lock(&alx->mtx); + __alx_stop(alx); + mutex_unlock(&alx->mtx); + return 0; }
@@@ -1353,18 -1374,18 +1374,18 @@@ static void alx_link_check(struct work_
alx = container_of(work, struct alx_priv, link_check_wk);
- rtnl_lock(); + mutex_lock(&alx->mtx); alx_check_link(alx); - rtnl_unlock(); + mutex_unlock(&alx->mtx); }
static void alx_reset(struct work_struct *work) { struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
- rtnl_lock(); + mutex_lock(&alx->mtx); alx_reinit(alx); - rtnl_unlock(); + mutex_unlock(&alx->mtx); }
static int alx_tpd_req(struct sk_buff *skb) @@@ -1771,6 -1792,8 +1792,8 @@@ static int alx_probe(struct pci_dev *pd goto out_unmap; }
+ mutex_lock(&alx->mtx); + alx_reset_pcie(hw);
phy_configured = alx_phy_configured(hw); @@@ -1781,7 -1804,7 +1804,7 @@@ err = alx_reset_mac(hw); if (err) { dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err); - goto out_unmap; + goto out_unlock; }
/* setup link to put it in a known good starting state */ @@@ -1791,7 -1814,7 +1814,7 @@@ dev_err(&pdev->dev, "failed to configure PHY speed/duplex (err=%d)\n", err); - goto out_unmap; + goto out_unlock; } }
@@@ -1824,9 -1847,11 +1847,11 @@@ if (!alx_get_phy_info(hw)) { dev_err(&pdev->dev, "failed to identify PHY\n"); err = -EIO; - goto out_unmap; + goto out_unlock; }
+ mutex_unlock(&alx->mtx); + INIT_WORK(&alx->link_check_wk, alx_link_check); INIT_WORK(&alx->reset_wk, alx_reset); netif_carrier_off(netdev); @@@ -1843,13 -1868,14 +1868,15 @@@
return 0;
+ out_unlock: + mutex_unlock(&alx->mtx); out_unmap: iounmap(hw->hw_addr); out_free_netdev: free_netdev(netdev); out_pci_release: pci_release_mem_regions(pdev); + pci_disable_pcie_error_reporting(pdev); out_pci_disable: pci_disable_device(pdev); return err; @@@ -1870,6 -1896,8 +1897,8 @@@ static void alx_remove(struct pci_dev * pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev);
+ mutex_destroy(&alx->mtx); + free_netdev(alx->dev); }
@@@ -1881,7 -1909,11 +1910,11 @@@ static int alx_suspend(struct device *d if (!netif_running(alx->dev)) return 0; netif_device_detach(alx->dev); + + mutex_lock(&alx->mtx); __alx_stop(alx); + mutex_unlock(&alx->mtx); + return 0; }
@@@ -1891,20 -1923,23 +1924,23 @@@ static int alx_resume(struct device *de struct alx_hw *hw = &alx->hw; int err;
+ mutex_lock(&alx->mtx); alx_reset_phy(hw);
- if (!netif_running(alx->dev)) - return 0; + if (!netif_running(alx->dev)) { + err = 0; + goto unlock; + }
- rtnl_lock(); err = __alx_open(alx, true); - rtnl_unlock(); if (err) - return err; + goto unlock;
netif_device_attach(alx->dev);
- return 0; + unlock: + mutex_unlock(&alx->mtx); + return err; }
static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); @@@ -1923,7 -1958,7 +1959,7 @@@ static pci_ers_result_t alx_pci_error_d
dev_info(&pdev->dev, "pci error detected\n");
- rtnl_lock(); + mutex_lock(&alx->mtx);
if (netif_running(netdev)) { netif_device_detach(netdev); @@@ -1935,7 -1970,7 +1971,7 @@@ else pci_disable_device(pdev);
- rtnl_unlock(); + mutex_unlock(&alx->mtx);
return rc; } @@@ -1948,7 -1983,7 +1984,7 @@@ static pci_ers_result_t alx_pci_error_s
dev_info(&pdev->dev, "pci error slot reset\n");
- rtnl_lock(); + mutex_lock(&alx->mtx);
if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n"); @@@ -1961,7 -1996,7 +1997,7 @@@ if (!alx_reset_mac(hw)) rc = PCI_ERS_RESULT_RECOVERED; out: - rtnl_unlock(); + mutex_unlock(&alx->mtx);
return rc; } @@@ -1973,14 -2008,14 +2009,14 @@@ static void alx_pci_error_resume(struc
dev_info(&pdev->dev, "pci error resume\n");
- rtnl_lock(); + mutex_lock(&alx->mtx);
if (netif_running(netdev)) { alx_activate(alx); netif_device_attach(netdev); }
- rtnl_unlock(); + mutex_unlock(&alx->mtx); }
static const struct pci_error_handlers alx_err_handlers = { diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 762113a04dde,6479ceedc352..9a2b166d651e --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -3894,7 -3894,6 +3894,6 @@@ static const struct net_device_ops cxgb .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan, .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state, }; - #endif
static void cxgb4_mgmt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) @@@ -3909,6 -3908,7 +3908,7 @@@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = { .get_drvinfo = cxgb4_mgmt_get_drvinfo, }; + #endif
static void notify_fatal_err(struct work_struct *work) { @@@ -4424,8 -4424,10 +4424,8 @@@ static int adap_init0_phy(struct adapte
/* Load PHY Firmware onto adapter. */ - spin_lock_bh(&adap->win0_lock); ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, (u8 *)phyf->data, phyf->size); - spin_unlock_bh(&adap->win0_lock); if (ret < 0) dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", -ret); diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a0555f4d76fc,ae3ad99fbd06..444af17a8113 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@@ -3060,19 -3060,16 +3060,19 @@@ int t4_read_flash(struct adapter *adapt * @addr: the start address to write * @n: length of data to write in bytes * @data: the data to write + * @byte_oriented: whether to store data as bytes or as words * * Writes up to a page of data (256 bytes) to the serial flash starting * at the given address. All the data must be written to the same page. + * If @byte_oriented is set the write data is stored as byte stream + * (i.e. matches what on disk), otherwise in big-endian. */ static int t4_write_flash(struct adapter *adapter, unsigned int addr, - unsigned int n, const u8 *data) + unsigned int n, const u8 *data, bool byte_oriented) { - int ret; - u32 buf[64]; unsigned int i, c, left, val, offset = addr & 0xff; + u32 buf[64]; + int ret;
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) return -EINVAL; @@@ -3083,14 -3080,10 +3083,14 @@@ (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) goto unlock;
- for (left = n; left; left -= c) { + for (left = n; left; left -= c, data += c) { c = min(left, 4U); - for (val = 0, i = 0; i < c; ++i) - val = (val << 8) + *data++; + for (val = 0, i = 0; i < c; ++i) { + if (byte_oriented) + val = (val << 8) + data[i]; + else + val = (val << 8) + data[c - i - 1]; + }
ret = sf1_write(adapter, c, c != left, 1, val); if (ret) @@@ -3103,8 -3096,7 +3103,8 @@@ t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
/* Read the page to verify the write succeeded */ - ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); + ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, + byte_oriented); if (ret) return ret;
@@@ -3700,7 -3692,7 +3700,7 @@@ int t4_load_fw(struct adapter *adap, co */ memcpy(first_page, fw_data, SF_PAGE_SIZE); ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); - ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page); + ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true); if (ret) goto out;
@@@ -3708,14 -3700,14 +3708,14 @@@ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; fw_data += SF_PAGE_SIZE; - ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true); if (ret) goto out; }
- ret = t4_write_flash(adap, - fw_start + offsetof(struct fw_hdr, fw_ver), - sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); + ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver), + sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, + true); out: if (ret) dev_err(adap->pdev_dev, "firmware download failed, error %d\n", @@@ -3820,11 -3812,9 +3820,11 @@@ int t4_load_phy_fw(struct adapter *adap /* Copy the supplied PHY Firmware image to the adapter memory location * allocated by the adapter firmware. */ + spin_lock_bh(&adap->win0_lock); ret = t4_memory_rw(adap, win, mtype, maddr, phy_fw_size, (__be32 *)phy_fw_data, T4_MEMORY_WRITE); + spin_unlock_bh(&adap->win0_lock); if (ret) return ret;
@@@ -6993,7 -6983,7 +6993,7 @@@ int t4_fw_bye(struct adapter *adap, uns }
/** - * t4_init_cmd - ask FW to initialize the device + * t4_early_init - ask FW to initialize the device * @adap: the adapter * @mbox: mailbox to use for the FW command * @@@ -10218,7 -10208,7 +10218,7 @@@ int t4_load_cfg(struct adapter *adap, c n = size - i; else n = SF_PAGE_SIZE; - ret = t4_write_flash(adap, addr, n, cfg_data); + ret = t4_write_flash(adap, addr, n, cfg_data, true); if (ret) goto out;
@@@ -10234,7 -10224,7 +10234,7 @@@ out }
/** - * t4_set_vf_mac - Set MAC address for the specified VF + * t4_set_vf_mac_acl - Set MAC address for the specified VF * @adapter: The adapter * @vf: one of the VFs instantiated by the specified PF * @naddr: the number of MAC addresses @@@ -10687,14 -10677,13 +10687,14 @@@ int t4_load_boot(struct adapter *adap, for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; boot_data += SF_PAGE_SIZE; - ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data); + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, + false); if (ret) goto out; }
ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, - (const u8 *)header); + (const u8 *)header, false);
out: if (ret) @@@ -10769,7 -10758,7 +10769,7 @@@ int t4_load_bootcfg(struct adapter *ada for (i = 0; i < size; i += SF_PAGE_SIZE) { n = min_t(u32, size - i, SF_PAGE_SIZE);
- ret = t4_write_flash(adap, addr, n, cfg_data); + ret = t4_write_flash(adap, addr, n, cfg_data, false); if (ret) goto out;
@@@ -10781,8 -10770,7 +10781,8 @@@ for (i = 0; i < npad; i++) { u8 data = 0;
- ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data); + ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data, + false); if (ret) goto out; } diff --combined drivers/net/ethernet/intel/ice/ice_lib.c index 6d360aeae596,a46aba5e9c12..069ff7a5d1a6 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@@ -169,12 -169,13 +169,13 @@@ static void ice_vsi_set_num_qs(struct i
switch (vsi->type) { case ICE_VSI_PF: - vsi->alloc_txq = min3(pf->num_lan_msix, - ice_get_avail_txq_count(pf), - (u16)num_online_cpus()); if (vsi->req_txq) { vsi->alloc_txq = vsi->req_txq; vsi->num_txq = vsi->req_txq; + } else { + vsi->alloc_txq = min3(pf->num_lan_msix, + ice_get_avail_txq_count(pf), + (u16)num_online_cpus()); }
pf->num_lan_tx = vsi->alloc_txq; @@@ -183,12 -184,13 +184,13 @@@ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { vsi->alloc_rxq = 1; } else { - vsi->alloc_rxq = min3(pf->num_lan_msix, - ice_get_avail_rxq_count(pf), - (u16)num_online_cpus()); if (vsi->req_rxq) { vsi->alloc_rxq = vsi->req_rxq; vsi->num_rxq = vsi->req_rxq; + } else { + vsi->alloc_rxq = min3(pf->num_lan_msix, + ice_get_avail_rxq_count(pf), + (u16)num_online_cpus()); } }
@@@ -1296,6 -1298,7 +1298,7 @@@ static int ice_vsi_alloc_rings(struct i ring->reg_idx = vsi->txq_map[i]; ring->ring_active = false; ring->vsi = vsi; + ring->tx_tstamps = &pf->ptp.port.tx; ring->dev = dev; ring->count = vsi->num_tx_desc; WRITE_ONCE(vsi->tx_rings[i], ring); @@@ -1673,9 -1676,11 +1676,11 @@@ void ice_vsi_cfg_frame_size(struct ice_ * @pf_q: index of the Rx queue in the PF's queue space * @rxdid: flexible descriptor RXDID * @prio: priority for the RXDID for this queue + * @ena_ts: true to enable timestamp and false to disable timestamp */ void - ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio) + ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, + bool ena_ts) { int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@@ -1690,9 -1695,40 +1695,40 @@@ regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) & QRXFLXP_CNTXT_RXDID_PRIO_M;
+ if (ena_ts) + /* Enable TimeSync on this queue */ + regval |= QRXFLXP_CNTXT_TS_M; + wr32(hw, QRXFLXP_CNTXT(pf_q), regval); }
+ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) + { + if (q_idx >= vsi->num_rxq) + return -EINVAL; + + return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); + } + + int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx) + { + struct ice_aqc_add_tx_qgrp *qg_buf; + int err; + + if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) + return -EINVAL; + + qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); + if (!qg_buf) + return -ENOMEM; + + qg_buf->num_txqs = 1; + + err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); + kfree(qg_buf); + return err; + } + /** * ice_vsi_cfg_rxqs - Configure the VSI for Rx * @vsi: the VSI being configured @@@ -1710,15 -1746,11 +1746,11 @@@ int ice_vsi_cfg_rxqs(struct ice_vsi *vs ice_vsi_cfg_frame_size(vsi); setup_rings: /* set up individual rings */ - for (i = 0; i < vsi->num_rxq; i++) { - int err; + ice_for_each_rxq(vsi, i) { + int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
- err = ice_setup_rx_ctx(vsi->rx_rings[i]); - if (err) { - dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n", - i, err); + if (err) return err; - } }
return 0; @@@ -1728,13 -1760,12 +1760,13 @@@ * ice_vsi_cfg_txqs - Configure the VSI for Tx * @vsi: the VSI being configured * @rings: Tx ring array to be configured + * @count: number of Tx ring array elements * * Return 0 on success and a negative value on error * Configure the Tx VSI for operation. */ static int -ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings) +ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count) { struct ice_aqc_add_tx_qgrp *qg_buf; u16 q_idx = 0; @@@ -1746,7 -1777,7 +1778,7 @@@
qg_buf->num_txqs = 1;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) { + for (q_idx = 0; q_idx < count; q_idx++) { err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); if (err) goto err_cfg_txqs; @@@ -1766,7 -1797,7 +1798,7 @@@ err_cfg_txqs */ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) { - return ice_vsi_cfg_txqs(vsi, vsi->tx_rings); + return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); }
/** @@@ -1781,7 -1812,7 +1813,7 @@@ int ice_vsi_cfg_xdp_txqs(struct ice_vs int ret; int i;
- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings); + ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); if (ret) return ret;
@@@ -2021,18 -2052,17 +2053,18 @@@ int ice_vsi_stop_all_rx_rings(struct ic * @rst_src: reset source * @rel_vmvf_num: Relative ID of VF/VM * @rings: Tx ring array to be stopped + * @count: number of Tx ring array elements */ static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, - u16 rel_vmvf_num, struct ice_ring **rings) + u16 rel_vmvf_num, struct ice_ring **rings, u16 count) { u16 q_idx;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) return -EINVAL;
- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) { + for (q_idx = 0; q_idx < count; q_idx++) { struct ice_txq_meta txq_meta = { }; int status;
@@@ -2060,7 -2090,7 +2092,7 @@@ in ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num) { - return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings); + return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq); }
/** @@@ -2069,7 -2099,7 +2101,7 @@@ */ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) { - return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings); + return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq); }
/** @@@ -2228,7 -2258,7 +2260,7 @@@ void ice_cfg_sw_lldp(struct ice_vsi *vs }
if (status) - dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n", + dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n", create ? "adding" : "removing", tx ? "TX" : "RX", vsi->vsi_num, ice_stat_str(status)); } @@@ -3207,6 -3237,34 +3239,34 @@@ bool ice_is_reset_in_progress(unsigned test_bit(ICE_GLOBR_REQ, state); }
+ /** + * ice_wait_for_reset - Wait for driver to finish reset and rebuild + * @pf: pointer to the PF structure + * @timeout: length of time to wait, in jiffies + * + * Wait (sleep) for a short time until the driver finishes cleaning up from + * a device reset. The caller must be able to sleep. Use this to delay + * operations that could fail while the driver is cleaning up after a device + * reset. + * + * Returns 0 on success, -EBUSY if the reset is not finished within the + * timeout, and -ERESTARTSYS if the thread was interrupted. + */ + int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout) + { + long ret; + + ret = wait_event_interruptible_timeout(pf->reset_wait_queue, + !ice_is_reset_in_progress(pf->state), + timeout); + if (ret < 0) + return ret; + else if (!ret) + return -EBUSY; + else + return 0; + } + #ifdef CONFIG_DCB /** * ice_vsi_update_q_map - update our copy of the VSI info with new queue map @@@ -3341,13 -3399,22 +3401,22 @@@ int ice_status_to_errno(enum ice_statu case ICE_ERR_DOES_NOT_EXIST: return -ENOENT; case ICE_ERR_OUT_OF_RANGE: - return -ENOTTY; + case ICE_ERR_AQ_ERROR: + case ICE_ERR_AQ_TIMEOUT: + case ICE_ERR_AQ_EMPTY: + case ICE_ERR_AQ_FW_CRITICAL: + return -EIO; case ICE_ERR_PARAM: + case ICE_ERR_INVAL_SIZE: return -EINVAL; case ICE_ERR_NO_MEMORY: return -ENOMEM; case ICE_ERR_MAX_LIMIT: return -EAGAIN; + case ICE_ERR_RESET_ONGOING: + return -EBUSY; + case ICE_ERR_AQ_FULL: + return -ENOSPC; default: return -EINVAL; } diff --combined drivers/net/ethernet/intel/ice/ice_main.c index 2d2a28c0a7ac,96276533822e..37caccfd85e1 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@@ -471,6 -471,9 +471,9 @@@ ice_prepare_for_reset(struct ice_pf *pf /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf, false);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_release(pf); + if (hw->port_info) ice_sched_clear_port(hw->port_info);
@@@ -503,6 -506,7 +506,7 @@@ static void ice_do_reset(struct ice_pf clear_bit(ICE_PFR_REQ, pf->state); clear_bit(ICE_CORER_REQ, pf->state); clear_bit(ICE_GLOBR_REQ, pf->state); + wake_up(&pf->reset_wait_queue); return; }
@@@ -515,6 -519,7 +519,7 @@@ ice_rebuild(pf, reset_type); clear_bit(ICE_PREPARED_FOR_RESET, pf->state); clear_bit(ICE_PFR_REQ, pf->state); + wake_up(&pf->reset_wait_queue); ice_reset_all_vfs(pf, true); } } @@@ -565,6 -570,7 +570,7 @@@ static void ice_reset_subtask(struct ic clear_bit(ICE_PFR_REQ, pf->state); clear_bit(ICE_CORER_REQ, pf->state); clear_bit(ICE_GLOBR_REQ, pf->state); + wake_up(&pf->reset_wait_queue); ice_reset_all_vfs(pf, true); }
@@@ -861,6 -867,38 +867,38 @@@ static void ice_set_dflt_mib(struct ice kfree(lldpmib); }
+ /** + * ice_check_module_power + * @pf: pointer to PF struct + * @link_cfg_err: bitmap from the link info structure + * + * check module power level returned by a previous call to aq_get_link_info + * and print error messages if module power level is not supported + */ + static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err) + { + /* if module power level is supported, clear the flag */ + if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT | + ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) { + clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); + return; + } + + /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the + * above block didn't clear this bit, there's nothing to do + */ + if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) + return; + + if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) { + dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n"); + set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); + } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) { + dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n"); + set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); + } + } + /** * ice_link_event - process the link event * @pf: PF that the link event is associated with @@@ -896,6 -934,8 +934,8 @@@ ice_link_event(struct ice_pf *pf, struc pi->lport, ice_stat_str(status), ice_aq_str(pi->hw->adminq.sq_last_status));
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + /* Check if the link state is up after updating link info, and treat * this event as an UP event since the link is actually UP now. */ @@@ -1194,6 -1234,10 +1234,10 @@@ static int __ice_clean_ctrlq(struct ice cq = &hw->adminq; qtype = "Admin"; break; + case ICE_CTL_Q_SB: + cq = &hw->sbq; + qtype = "Sideband"; + break; case ICE_CTL_Q_MAILBOX: cq = &hw->mailboxq; qtype = "Mailbox"; @@@ -1367,6 -1411,34 +1411,34 @@@ static void ice_clean_mailboxq_subtask( ice_flush(hw); }
+ /** + * ice_clean_sbq_subtask - clean the Sideband Queue rings + * @pf: board private structure + */ + static void ice_clean_sbq_subtask(struct ice_pf *pf) + { + struct ice_hw *hw = &pf->hw; + + /* Nothing to do here if sideband queue is not supported */ + if (!ice_is_sbq_supported(hw)) { + clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); + return; + } + + if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) + return; + + if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB)) + return; + + clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); + + if (ice_ctrlq_pending(hw, &hw->sbq)) + __ice_clean_ctrlq(pf, ICE_CTL_Q_SB); + + ice_flush(hw); + } + /** * ice_service_task_schedule - schedule the service task to wake up * @pf: board private structure @@@ -2010,6 -2082,8 +2082,8 @@@ static void ice_check_media_subtask(str if (err) return;
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) ice_init_phy_user_cfg(pi); @@@ -2067,6 -2141,7 +2141,7 @@@ static void ice_service_task(struct wor
ice_process_vflr_event(pf); ice_clean_mailboxq_subtask(pf); + ice_clean_sbq_subtask(pf); ice_sync_arfs_fltrs(pf); ice_flush_fdir_ctx(pf);
@@@ -2082,6 -2157,7 +2157,7 @@@ test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || + test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@@ -2100,6 -2176,10 +2176,10 @@@ static void ice_set_ctrlq_len(struct ic hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; + hw->sbq.num_rq_entries = ICE_SBQ_LEN; + hw->sbq.num_sq_entries = ICE_SBQ_LEN; + hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; + hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; }
/** @@@ -2561,20 -2641,6 +2641,20 @@@ ice_xdp_setup_prog(struct ice_vsi *vsi return (ret || xdp_ring_err) ? -ENOMEM : 0; }
+/** + * ice_xdp_safe_mode - XDP handler for safe mode + * @dev: netdevice + * @xdp: XDP command + */ +static int ice_xdp_safe_mode(struct net_device __always_unused *dev, + struct netdev_bpf *xdp) +{ + NL_SET_ERR_MSG_MOD(xdp->extack, + "Please provide working DDP firmware package in order to use XDP\n" + "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst"); + return -EOPNOTSUPP; +} + /** * ice_xdp - implements XDP handler * @dev: netdevice @@@ -2654,6 -2720,7 +2734,7 @@@ static irqreturn_t ice_misc_intr(int __ dev = ice_pf_to_dev(pf); set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); + set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); @@@ -2725,6 -2792,11 +2806,11 @@@ } }
+ if (oicr & PFINT_OICR_TSYN_TX_M) { + ena_mask &= ~PFINT_OICR_TSYN_TX_M; + ice_ptp_process_ts(pf); + } + #define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M) if (oicr & ICE_AUX_CRIT_ERR) { struct iidc_event *event; @@@ -2775,6 -2847,9 +2861,9 @@@ static void ice_dis_ctrlq_interrupts(st wr32(hw, PFINT_MBX_CTL, rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_SB_CTL, + rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M); + /* disable Control queue Interrupt causes */ wr32(hw, PFINT_OICR_CTL, rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M); @@@ -2829,6 -2904,11 +2918,11 @@@ static void ice_ena_ctrlq_interrupts(st PFINT_MBX_CTL_CAUSE_ENA_M); wr32(hw, PFINT_MBX_CTL, val);
+ /* This enables Sideband queue Interrupt causes */ + val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) | + PFINT_SB_CTL_CAUSE_ENA_M); + wr32(hw, PFINT_SB_CTL, val); + ice_flush(hw); }
@@@ -3292,6 -3372,9 +3386,9 @@@ static void ice_deinit_pf(struct ice_p bitmap_free(pf->avail_rxqs); pf->avail_rxqs = NULL; } + + if (pf->ptp.clock) + ptp_clock_unregister(pf->ptp.clock); }
/** @@@ -3338,6 -3421,10 +3435,10 @@@ static void ice_set_pf_caps(struct ice_ func_caps->fd_fltr_best_effort); }
+ clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); + if (func_caps->common_cap.ieee_1588) + set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); + pf->max_pf_txqs = func_caps->common_cap.num_txq; pf->max_pf_rxqs = func_caps->common_cap.num_rxq; } @@@ -3357,6 -3444,8 +3458,8 @@@ static int ice_init_pf(struct ice_pf *p spin_lock_init(&pf->aq_wait_lock); init_waitqueue_head(&pf->aq_wait_queue);
+ init_waitqueue_head(&pf->reset_wait_queue); + /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); pf->serv_tmr_period = HZ; @@@ -4278,6 -4367,8 +4381,8 @@@ ice_probe(struct pci_dev *pdev, const s
ice_init_link_dflt_override(pf->hw.port_info);
+ ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err); + /* if media available, initialize PHY settings */ if (pf->hw.port_info->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { @@@ -4316,6 -4407,8 +4421,8 @@@ }
/* initialize DDP driven features */ + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_init(pf);
/* Note: Flow director init failure is non-fatal to load */ if (ice_init_fdir(pf)) @@@ -4483,6 -4576,8 +4590,8 @@@ static void ice_remove(struct pci_dev *
mutex_destroy(&(&pf->hw)->fdir_fltr_lock); ice_deinit_lag(pf); + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_release(pf); if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); ice_setup_mc_magic_wake(pf); @@@ -6227,6 -6322,12 +6336,12 @@@ static void ice_rebuild(struct ice_pf *
ice_clear_pxe_mode(hw);
+ ret = ice_init_nvm(hw); + if (ret) { + dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret)); + goto err_init_ctrlq; + } + ret = ice_get_caps(hw); if (ret) { dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret)); @@@ -6268,6 -6369,13 +6383,13 @@@ if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) ice_dcb_rebuild(pf);
+ /* If the PF previously had enabled PTP, PTP init needs to happen before + * the VSI rebuild. If not, this causes the PTP link status events to + * fail. + */ + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) + ice_ptp_init(pf); + /* rebuild PF VSI */ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF); if (err) { @@@ -6416,6 -6524,27 +6538,27 @@@ event_after return err; }
+ /** + * ice_do_ioctl - Access the hwtstamp interface + * @netdev: network interface device structure + * @ifr: interface request data + * @cmd: ioctl command + */ + static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) + { + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + + switch (cmd) { + case SIOCGHWTSTAMP: + return ice_ptp_get_ts_config(pf, ifr); + case SIOCSHWTSTAMP: + return ice_ptp_set_ts_config(pf, ifr); + default: + return -EOPNOTSUPP; + } + } + /** * ice_aq_str - convert AQ err code to a string * @aq_err: the AQ error code to convert @@@ -6932,6 -7061,8 +7075,8 @@@ int ice_open_internal(struct net_devic return -EIO; }
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err); + /* Set PHY if there is media, otherwise, turn off PHY */ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); @@@ -7051,7 -7182,6 +7196,7 @@@ static const struct net_device_ops ice_ .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, .ndo_tx_timeout = ice_tx_timeout, + .ndo_bpf = ice_xdp_safe_mode, };
static const struct net_device_ops ice_netdev_ops = { @@@ -7065,6 -7195,7 +7210,7 @@@ .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, .ndo_set_tx_maxrate = ice_set_tx_maxrate, + .ndo_do_ioctl = ice_do_ioctl, .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, .ndo_set_vf_mac = ice_set_vf_mac, .ndo_get_vf_config = ice_get_vf_cfg, diff --combined drivers/net/ethernet/lantiq_xrx200.c index 0e10d8aeffe1,27df06ed355e..62fa5009e565 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@@ -352,8 -352,8 +352,8 @@@ static irqreturn_t xrx200_dma_irq(int i struct xrx200_chan *ch = ptr;
if (napi_schedule_prep(&ch->napi)) { - __napi_schedule(&ch->napi); ltq_dma_disable_irq(&ch->dma); + __napi_schedule(&ch->napi); }
ltq_dma_ack_irq(&ch->dma); @@@ -436,7 -436,6 +436,6 @@@ static int xrx200_probe(struct platform { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct resource *res; struct xrx200_priv *priv; struct net_device *net_dev; int err; @@@ -456,13 -455,7 +455,7 @@@ net_dev->max_mtu = XRX200_DMA_DATA_LEN;
/* load the memory ranges */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to get resources\n"); - return -ENOENT; - } - - priv->pmac_reg = devm_ioremap_resource(dev, res); + priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(priv->pmac_reg)) return PTR_ERR(priv->pmac_reg);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 85eaadc989df,f0b98f5b2a92..059799e4f483 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@@ -94,9 -94,13 +94,9 @@@ void mlx5e_rep_update_flows(struct mlx5
ASSERT_RTNL();
- /* wait for encap to be fully initialized */ - wait_for_completion(&e->res_ready); - mutex_lock(&esw->offloads.encap_tbl_lock); encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID); - if (e->compl_result < 0 || (encap_connected == neigh_connected && - ether_addr_equal(e->h_dest, ha))) + if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha)) goto unlock;
mlx5e_take_all_encap_flows(e, &flow_list); @@@ -613,7 -617,7 +613,7 @@@ static bool mlx5e_restore_skb(struct sk struct mlx5e_tc_update_priv *tc_priv) { struct mlx5e_priv *priv = netdev_priv(skb->dev); - u32 tunnel_id = reg_c1 >> ESW_TUN_OFFSET; + u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
if (chain) { struct mlx5_rep_uplink_priv *uplink_priv; diff --combined drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 490131e06efb,0dfd51d2d178..2e846b741280 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@@ -120,6 -120,7 +120,7 @@@ void mlx5e_tc_encap_flows_add(struct ml struct list_head *flow_list) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_pkt_reformat_params reformat_params; struct mlx5_esw_flow_attr *esw_attr; struct mlx5_flow_handle *rule; struct mlx5_flow_attr *attr; @@@ -130,9 -131,12 +131,12 @@@ if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE) return;
+ memset(&reformat_params, 0, sizeof(reformat_params)); + reformat_params.type = e->reformat_type; + reformat_params.size = e->encap_size; + reformat_params.data = e->encap_header; e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, - e->reformat_type, - e->encap_size, e->encap_header, + &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(e->pkt_reformat)) { mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n", @@@ -251,12 -255,9 +255,12 @@@ static void mlx5e_take_all_route_decap_ mlx5e_take_tmp_flow(flow, flow_list, 0); }
+typedef bool (match_cb)(struct mlx5e_encap_entry *); + static struct mlx5e_encap_entry * -mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe, - struct mlx5e_encap_entry *e) +mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe, + struct mlx5e_encap_entry *e, + match_cb match) { struct mlx5e_encap_entry *next = NULL;
@@@ -291,7 -292,7 +295,7 @@@ retry /* wait for encap to be fully initialized */ wait_for_completion(&next->res_ready); /* continue searching if encap entry is not in valid state after completion */ - if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) { + if (!match(next)) { e = next; goto retry; } @@@ -299,30 -300,6 +303,30 @@@ return next; }
+static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e) +{ + return e->flags & MLX5_ENCAP_ENTRY_VALID; +} + +static struct mlx5e_encap_entry * +mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe, + struct mlx5e_encap_entry *e) +{ + return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid); +} + +static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e) +{ + return e->compl_result >= 0; +} + +struct mlx5e_encap_entry * +mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe, + struct mlx5e_encap_entry *e) +{ + return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized); +} + void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) { struct mlx5e_neigh *m_neigh = &nhe->m_neigh; @@@ -839,6 -816,7 +843,7 @@@ int mlx5e_attach_decap(struct mlx5e_pri { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr; + struct mlx5_pkt_reformat_params reformat_params; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_decap_entry *d; struct mlx5e_decap_key key; @@@ -880,10 -858,12 +885,12 @@@ hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key); mutex_unlock(&esw->offloads.decap_tbl_lock);
+ memset(&reformat_params, 0, sizeof(reformat_params)); + reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2; + reformat_params.size = sizeof(parse_attr->eth); + reformat_params.data = &parse_attr->eth; d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, - MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2, - sizeof(parse_attr->eth), - &parse_attr->eth, + &reformat_params, MLX5_FLOW_NAMESPACE_FDB); if (IS_ERR(d->pkt_reformat)) { err = PTR_ERR(d->pkt_reformat); diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d26b8ed51195,59ee28156603..dea2c4e01e8b --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -91,12 -91,16 +91,16 @@@ void mlx5e_update_carrier(struct mlx5e_ { struct mlx5_core_dev *mdev = priv->mdev; u8 port_state; + bool up;
port_state = mlx5_query_vport_state(mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0);
- if (port_state == VPORT_STATE_UP) { + up = port_state == VPORT_STATE_UP; + if (up == netif_carrier_ok(priv->netdev)) + netif_carrier_event(priv->netdev); + if (up) { netdev_info(priv->netdev, "Link up\n"); netif_carrier_on(priv->netdev); } else { @@@ -853,7 -857,7 +857,7 @@@ int mlx5e_open_rq(struct mlx5e_params * if (err) goto err_destroy_rq;
- if (mlx5e_is_tls_on(rq->priv) && !mlx5_accel_is_ktls_device(mdev)) + if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev)) __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) @@@ -2705,6 -2709,8 +2709,6 @@@ static int mlx5e_update_netdev_queues(s nch = priv->channels.params.num_channels; ntc = priv->channels.params.num_tc; num_rxqs = nch * priv->profile->rq_groups; - if (priv->channels.params.ptp_rx) - num_rxqs++;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@@ -4661,12 -4667,10 +4665,10 @@@ void mlx5e_build_nic_params(struct mlx5 params->log_sq_size = is_kdump_kernel() ? MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, - MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* XDP SQ */ - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, - MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)); + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* set CQE compression */ params->rx_cqe_compress_def = false; @@@ -4822,15 -4826,22 +4824,15 @@@ static void mlx5e_build_nic_netdev(stru }
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; - netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL; }
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) { - netdev->hw_features |= NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM; - netdev->hw_enc_features |= NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM; - netdev->gso_partial_features |= NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM; + netdev->hw_features |= NETIF_F_GSO_GRE; + netdev->hw_enc_features |= NETIF_F_GSO_GRE; + netdev->gso_partial_features |= NETIF_F_GSO_GRE; }
if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) { diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d4b0f270b6bb,cf4558e12325..8d84d0712c20 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@@ -83,17 -83,17 +83,17 @@@ struct mlx5e_tc_attr_to_reg_mapping mlx [CHAIN_TO_REG] = { .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, .moffset = 0, - .mlen = 2, + .mlen = 16, }, [VPORT_TO_REG] = { .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, - .moffset = 2, - .mlen = 2, + .moffset = 16, + .mlen = 16, }, [TUNNEL_TO_REG] = { .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, - .moffset = 1, - .mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8), + .moffset = 8, + .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS, .soffset = MLX5_BYTE_OFF(fte_match_param, misc_parameters_2.metadata_reg_c_1), }, @@@ -110,7 -110,7 +110,7 @@@ [NIC_CHAIN_TO_REG] = { .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B, .moffset = 0, - .mlen = 2, + .mlen = 16, }, [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct, }; @@@ -128,23 -128,46 +128,46 @@@ static void mlx5e_put_flow_tunnel_id(st void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, enum mlx5e_tc_attr_to_reg type, - u32 data, + u32 val, u32 mask) { + void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; + int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; - void *headers_c = spec->match_criteria; - void *headers_v = spec->match_value; - void *fmask, *fval; + u32 max_mask = GENMASK(match_len - 1, 0); + __be32 curr_mask_be, curr_val_be; + u32 curr_mask, curr_val;
fmask = headers_c + soffset; fval = headers_v + soffset;
- mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8)); - data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8)); + memcpy(&curr_mask_be, fmask, 4); + memcpy(&curr_val_be, fval, 4); + + curr_mask = be32_to_cpu(curr_mask_be); + curr_val = be32_to_cpu(curr_val_be); + + //move to correct offset + WARN_ON(mask > max_mask); + mask <<= moffset; + val <<= moffset; + max_mask <<= moffset; + + //zero val and mask + curr_mask &= ~max_mask; + curr_val &= ~max_mask;
- memcpy(fmask, &mask, match_len); - memcpy(fval, &data, match_len); + //add current to mask + curr_mask |= mask; + curr_val |= val; + + //back to be32 and write + curr_mask_be = cpu_to_be32(curr_mask); + curr_val_be = cpu_to_be32(curr_val); + + memcpy(fmask, &curr_mask_be, 4); + memcpy(fval, &curr_val_be, 4);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; } @@@ -152,23 -175,28 +175,28 @@@ void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, enum mlx5e_tc_attr_to_reg type, - u32 *data, + u32 *val, u32 *mask) { + void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval; int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; + int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset; int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; - void *headers_c = spec->match_criteria; - void *headers_v = spec->match_value; - void *fmask, *fval; + u32 max_mask = GENMASK(match_len - 1, 0); + __be32 curr_mask_be, curr_val_be; + u32 curr_mask, curr_val;
fmask = headers_c + soffset; fval = headers_v + soffset;
- memcpy(mask, fmask, match_len); - memcpy(data, fval, match_len); + memcpy(&curr_mask_be, fmask, 4); + memcpy(&curr_val_be, fval, 4); + + curr_mask = be32_to_cpu(curr_mask_be); + curr_val = be32_to_cpu(curr_val_be);
- *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8)))); - *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8)))); + *mask = (curr_mask >> moffset) & max_mask; + *val = (curr_val >> moffset) & max_mask; }
int @@@ -192,13 -220,13 +220,13 @@@ mlx5e_tc_match_to_reg_set_and_get_id(st (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
/* Firmware has 5bit length field and 0 means 32bits */ - if (mlen == 4) + if (mlen == 32) mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); MLX5_SET(set_action_in, modact, field, mfield); - MLX5_SET(set_action_in, modact, offset, moffset * 8); - MLX5_SET(set_action_in, modact, length, mlen * 8); + MLX5_SET(set_action_in, modact, offset, moffset); + MLX5_SET(set_action_in, modact, length, mlen); MLX5_SET(set_action_in, modact, data, data); err = mod_hdr_acts->num_actions; mod_hdr_acts->num_actions++; @@@ -296,13 -324,13 +324,13 @@@ void mlx5e_tc_match_to_reg_mod_hdr_chan modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
/* Firmware has 5bit length field and 0 means 32bits */ - if (mlen == 4) + if (mlen == 32) mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET); MLX5_SET(set_action_in, modact, field, mfield); - MLX5_SET(set_action_in, modact, offset, moffset * 8); - MLX5_SET(set_action_in, modact, length, mlen * 8); + MLX5_SET(set_action_in, modact, offset, moffset); + MLX5_SET(set_action_in, modact, length, mlen); MLX5_SET(set_action_in, modact, data, data); }
@@@ -4765,7 -4793,7 +4793,7 @@@ static void mlx5e_tc_hairpin_update_dea list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) { wait_for_completion(&hpe->res_ready); if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id) - hpe->hp->pair->peer_gone = true; + mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
mlx5e_hairpin_put(priv, hpe); } @@@ -5105,7 -5133,7 +5133,7 @@@ bool mlx5e_tc_update_skb(struct mlx5_cq
tc_skb_ext->chain = chain;
- zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) & + zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) & ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb, diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 17027536efba,721093b55acc..f7cbeb0b66d2 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@@ -129,7 -129,7 +129,7 @@@ struct tunnel_match_enc_opts */ #define TUNNEL_INFO_BITS 12 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) - #define ENC_OPTS_BITS 12 + #define ENC_OPTS_BITS 11 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) @@@ -178,9 -178,6 +178,9 @@@ void mlx5e_take_all_encap_flows(struct void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry; +struct mlx5e_encap_entry * +mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe, + struct mlx5e_encap_entry *e); void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work); @@@ -201,10 -198,10 +201,10 @@@ enum mlx5e_tc_attr_to_reg
struct mlx5e_tc_attr_to_reg_mapping { int mfield; /* rewrite field */ - int moffset; /* offset of mfield */ - int mlen; /* bytes to rewrite/match */ + int moffset; /* bit offset of mfield */ + int mlen; /* bits to rewrite/match */
- int soffset; /* offset of spec for match */ + int soffset; /* byte offset of spec for match */ };
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 320fe0cda917,669ff58107e4..c63d78eda606 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@@ -32,6 -32,7 +32,6 @@@
#include <linux/tcp.h> #include <linux/if_vlan.h> -#include <linux/ptp_classify.h> #include <net/geneve.h> #include <net/dsfield.h> #include "en.h" @@@ -66,6 -67,24 +66,6 @@@ static inline int mlx5e_get_dscp_up(str } #endif
-static bool mlx5e_use_ptpsq(struct sk_buff *skb) -{ - struct flow_keys fk; - - if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) - return false; - - if (fk.basic.n_proto == htons(ETH_P_1588)) - return true; - - if (fk.basic.n_proto != htons(ETH_P_IP) && - fk.basic.n_proto != htons(ETH_P_IPV6)) - return false; - - return (fk.basic.ip_proto == IPPROTO_UDP && - fk.ports.dst == htons(PTP_EV_PORT)); -} - static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb) { struct mlx5e_priv *priv = netdev_priv(dev); @@@ -126,9 -145,9 +126,9 @@@ u16 mlx5e_select_queue(struct net_devic }
ptp_channel = READ_ONCE(priv->channels.ptp); - if (unlikely(ptp_channel) && - test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) && - mlx5e_use_ptpsq(skb)) + if (unlikely(ptp_channel && + test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) && + mlx5e_use_ptpsq(skb))) return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL); @@@ -687,16 -706,12 +687,12 @@@ void mlx5e_tx_mpwqe_ensure_complete(str mlx5e_tx_mpwqe_session_complete(sq); }
- static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, + static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, struct mlx5_wqe_eth_seg *eseg, u16 ihs) { - if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs))) - return false; - + mlx5e_accel_tx_eseg(priv, skb, eseg, ihs); mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); - - return true; }
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) @@@ -725,10 -740,7 +721,7 @@@ if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) { struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, - attr.ihs))) - return NETDEV_TX_OK; - + mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs); mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more()); return NETDEV_TX_OK; } @@@ -743,9 -755,7 +736,7 @@@ /* May update the WQE, but may not post other WQEs. */ mlx5e_accel_tx_finish(sq, wqe, &accel, (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl)); - if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs))) - return NETDEV_TX_OK; - + mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs); mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
return NETDEV_TX_OK; diff --combined drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 9737565cd8d4,0e2b73731117..b2aa6c93c3a1 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@@ -26,6 -26,7 +26,7 @@@ enum mlx5dr_action_reformat_type DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2, DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2, DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3, + DR_ACTION_REFORMAT_TYP_INSERT_HDR, };
struct mlx5dr_match_parameters { @@@ -105,6 -106,8 +106,8 @@@ mlx5dr_action_create_flow_counter(u32 c struct mlx5dr_action * mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn, enum mlx5dr_action_reformat_type reformat_type, + u8 reformat_param_0, + u8 reformat_param_1, size_t data_sz, void *data);
@@@ -124,11 -127,10 +127,11 @@@ int mlx5dr_action_destroy(struct mlx5dr static inline bool mlx5dr_is_supported(struct mlx5_core_dev *dev) { - return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) || - (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) && - (MLX5_CAP_GEN(dev, steering_format_version) <= - MLX5_STEERING_FORMAT_CONNECTX_6DX)); + return MLX5_CAP_GEN(dev, roce) && + (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) || + (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) && + (MLX5_CAP_GEN(dev, steering_format_version) <= + MLX5_STEERING_FORMAT_CONNECTX_6DX))); }
/* buddy functions & structure */ diff --combined drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 85f0ce285146,677a53f65008..0998dcc9cac0 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@@ -149,22 -149,27 +149,27 @@@ mlxsw_thermal_module_trips_reset(struc
static int mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core, - struct mlxsw_thermal_module *tz) + struct mlxsw_thermal_module *tz, + int crit_temp, int emerg_temp) { - int crit_temp, emerg_temp; int err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module, - SFP_TEMP_HIGH_WARN, - &crit_temp); - if (err) - return err; + /* Do not try to query temperature thresholds directly from the module's + * EEPROM if we got valid thresholds from MTMP. + */ + if (!emerg_temp || !crit_temp) { + err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + SFP_TEMP_HIGH_WARN, + &crit_temp); + if (err) + return err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module, - SFP_TEMP_HIGH_ALARM, - &emerg_temp); - if (err) - return err; + err = mlxsw_env_module_temp_thresholds_get(core, tz->module, + SFP_TEMP_HIGH_ALARM, + &emerg_temp); + if (err) + return err; + }
if (crit_temp > emerg_temp) { dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n", @@@ -281,7 -286,7 +286,7 @@@ static int mlxsw_thermal_get_temp(struc dev_err(dev, "Failed to query temp sensor\n"); return err; } - mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL); + mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL); if (temp > 0) mlxsw_thermal_tz_score_update(thermal, tzdev, thermal->trips, temp); @@@ -420,36 -425,57 +425,57 @@@ static int mlxsw_thermal_module_unbind( return err; }
- static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, - int *p_temp) + static void + mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core, + u16 sensor_index, int *p_temp, + int *p_crit_temp, + int *p_emerg_temp) { - struct mlxsw_thermal_module *tz = tzdev->devdata; - struct mlxsw_thermal *thermal = tz->parent; - struct device *dev = thermal->bus_info->dev; char mtmp_pl[MLXSW_REG_MTMP_LEN]; - int temp; int err;
- /* Read module temperature. */ - mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + - tz->module, false, false); - err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl); + /* Read module temperature and thresholds. */ + mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false); + err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl); if (err) { - /* Do not return error - in case of broken module's sensor - * it will cause error message flooding. + /* Set temperature and thresholds to zero to avoid passing + * uninitialized data back to the caller. */ - temp = 0; - *p_temp = (int) temp; - return 0; + *p_temp = 0; + *p_crit_temp = 0; + *p_emerg_temp = 0; + + return; } - mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL); + mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, p_crit_temp, p_emerg_temp, + NULL); + } + + static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev, + int *p_temp) + { + struct mlxsw_thermal_module *tz = tzdev->devdata; + struct mlxsw_thermal *thermal = tz->parent; + int temp, crit_temp, emerg_temp; + struct device *dev; + u16 sensor_index; + int err; + + dev = thermal->bus_info->dev; + sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + tz->module; + + /* Read module temperature and thresholds. */ + mlxsw_thermal_module_temp_and_thresholds_get(thermal->core, + sensor_index, &temp, + &crit_temp, &emerg_temp); *p_temp = temp;
if (!temp) return 0;
/* Update trip points. */ - err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz); + err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz, + crit_temp, emerg_temp); if (!err && temp > 0) mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
@@@ -560,7 -586,7 +586,7 @@@ static int mlxsw_thermal_gearbox_temp_g if (err) return err;
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL); + mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL); if (temp > 0) mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
@@@ -693,8 -719,7 +719,8 @@@ mlxsw_thermal_module_tz_init(struct mlx MLXSW_THERMAL_TRIP_MASK, module_tz, &mlxsw_thermal_module_ops, - NULL, 0, 0); + NULL, 0, + module_tz->parent->polling_delay); if (IS_ERR(module_tz->tzdev)) { err = PTR_ERR(module_tz->tzdev); return err; @@@ -717,7 -742,10 +743,10 @@@ mlxsw_thermal_module_init(struct devic struct mlxsw_thermal *thermal, u8 module) { struct mlxsw_thermal_module *module_tz; + int dummy_temp, crit_temp, emerg_temp; + u16 sensor_index;
+ sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module; module_tz = &thermal->tz_module_arr[module]; /* Skip if parent is already set (case of port split). */ if (module_tz->parent) @@@ -728,8 -756,12 +757,12 @@@ sizeof(thermal->trips)); /* Initialize all trip point. */ mlxsw_thermal_module_trips_reset(module_tz); + /* Read module temperature and thresholds. */ + mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp, + &crit_temp, &emerg_temp); /* Update trip point according to the module data. */ - return mlxsw_thermal_module_trips_update(dev, core, module_tz); + return mlxsw_thermal_module_trips_update(dev, core, module_tz, + crit_temp, emerg_temp); }
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz) @@@ -816,8 -848,7 +849,8 @@@ mlxsw_thermal_gearbox_tz_init(struct ml MLXSW_THERMAL_TRIP_MASK, gearbox_tz, &mlxsw_thermal_gearbox_ops, - NULL, 0, 0); + NULL, 0, + gearbox_tz->parent->polling_delay); if (IS_ERR(gearbox_tz->tzdev)) return PTR_ERR(gearbox_tz->tzdev);
diff --combined drivers/net/ethernet/mellanox/mlxsw/reg.h index 2bc5a9003c6d,5304309ecb9d..93f1db3927af --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@@ -3907,7 -3907,7 +3907,7 @@@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, #define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5 #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11 -#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5 +#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, enum mlxsw_reg_qeec_hr hr, u8 index, @@@ -8305,6 -8305,8 +8305,8 @@@ enum MLXSW_REG_RECR2_TCP_UDP_EN_IPV4 = 7, /* Enable TCP/UDP header fields if packet is IPv6 */ MLXSW_REG_RECR2_TCP_UDP_EN_IPV6 = 8, + + __MLXSW_REG_RECR2_HEADER_CNT, };
/* reg_recr2_outer_header_enables @@@ -8339,6 -8341,8 +8341,8 @@@ enum MLXSW_REG_RECR2_TCP_UDP_SPORT = 74, /* TCP/UDP Destination Port */ MLXSW_REG_RECR2_TCP_UDP_DPORT = 75, + + __MLXSW_REG_RECR2_FIELD_CNT, };
/* reg_recr2_outer_header_fields_enable @@@ -8347,47 -8351,47 +8351,47 @@@ */ MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_fields_enable, 0x14, 0x14, 1);
- static inline void mlxsw_reg_recr2_ipv4_sip_enable(char *payload) - { - int i; - - for (i = MLXSW_REG_RECR2_IPV4_SIP0; i <= MLXSW_REG_RECR2_IPV4_SIP3; i++) - mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, - true); - } - - static inline void mlxsw_reg_recr2_ipv4_dip_enable(char *payload) - { - int i; - - for (i = MLXSW_REG_RECR2_IPV4_DIP0; i <= MLXSW_REG_RECR2_IPV4_DIP3; i++) - mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, - true); - } - - static inline void mlxsw_reg_recr2_ipv6_sip_enable(char *payload) - { - int i = MLXSW_REG_RECR2_IPV6_SIP0_7; - - mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true); - - i = MLXSW_REG_RECR2_IPV6_SIP8; - for (; i <= MLXSW_REG_RECR2_IPV6_SIP15; i++) - mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, - true); - } - - static inline void mlxsw_reg_recr2_ipv6_dip_enable(char *payload) - { - int i = MLXSW_REG_RECR2_IPV6_DIP0_7; + /* reg_recr2_inner_header_enables + * Bit mask where each bit enables a specific inner layer to be included in the + * hash calculation. Same values as reg_recr2_outer_header_enables. + * Access: RW + */ + MLXSW_ITEM_BIT_ARRAY(reg, recr2, inner_header_enables, 0x2C, 0x04, 1);
- mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true); + enum { + /* Inner IPv4 Source IP */ + MLXSW_REG_RECR2_INNER_IPV4_SIP0 = 3, + MLXSW_REG_RECR2_INNER_IPV4_SIP3 = 6, + /* Inner IPv4 Destination IP */ + MLXSW_REG_RECR2_INNER_IPV4_DIP0 = 7, + MLXSW_REG_RECR2_INNER_IPV4_DIP3 = 10, + /* Inner IP Protocol */ + MLXSW_REG_RECR2_INNER_IPV4_PROTOCOL = 11, + /* Inner IPv6 Source IP */ + MLXSW_REG_RECR2_INNER_IPV6_SIP0_7 = 12, + MLXSW_REG_RECR2_INNER_IPV6_SIP8 = 20, + MLXSW_REG_RECR2_INNER_IPV6_SIP15 = 27, + /* Inner IPv6 Destination IP */ + MLXSW_REG_RECR2_INNER_IPV6_DIP0_7 = 28, + MLXSW_REG_RECR2_INNER_IPV6_DIP8 = 36, + MLXSW_REG_RECR2_INNER_IPV6_DIP15 = 43, + /* Inner IPv6 Next Header */ + MLXSW_REG_RECR2_INNER_IPV6_NEXT_HEADER = 44, + /* Inner IPv6 Flow Label */ + MLXSW_REG_RECR2_INNER_IPV6_FLOW_LABEL = 45, + /* Inner TCP/UDP Source Port */ + MLXSW_REG_RECR2_INNER_TCP_UDP_SPORT = 46, + /* Inner TCP/UDP Destination Port */ + MLXSW_REG_RECR2_INNER_TCP_UDP_DPORT = 47, + + __MLXSW_REG_RECR2_INNER_FIELD_CNT, + };
- i = MLXSW_REG_RECR2_IPV6_DIP8; - for (; i <= MLXSW_REG_RECR2_IPV6_DIP15; i++) - mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, - true); - } + /* reg_recr2_inner_header_fields_enable + * Inner packet fields to enable for ECMP hash subject to inner_header_enables. + * Access: RW + */ + MLXSW_ITEM_BIT_ARRAY(reg, recr2, inner_header_fields_enable, 0x30, 0x08, 1);
static inline void mlxsw_reg_recr2_pack(char *payload, u32 seed) { @@@ -9459,6 -9463,14 +9463,14 @@@ MLXSW_ITEM32(reg, mtmp, sensor_index, 0 ((s16)((GENMASK(15, 0) + (v_) + 1) \ * 125)); })
+ /* reg_mtmp_max_operational_temperature + * The highest temperature in the nominal operational range. Reading is in + * 0.125 Celsius degrees units. + * In case of module this is SFF critical temperature threshold. + * Access: RO + */ + MLXSW_ITEM32(reg, mtmp, max_operational_temperature, 0x04, 16, 16); + /* reg_mtmp_temperature * Temperature reading from the sensor. Reading is in 0.125 Celsius * degrees units. @@@ -9537,7 -9549,9 +9549,9 @@@ static inline void mlxsw_reg_mtmp_pack( }
static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp, - int *p_max_temp, char *sensor_name) + int *p_max_temp, int *p_temp_hi, + int *p_max_oper_temp, + char *sensor_name) { s16 temp;
@@@ -9549,6 -9563,14 +9563,14 @@@ temp = mlxsw_reg_mtmp_max_temperature_get(payload); *p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); } + if (p_temp_hi) { + temp = mlxsw_reg_mtmp_temperature_threshold_hi_get(payload); + *p_temp_hi = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + } + if (p_max_oper_temp) { + temp = mlxsw_reg_mtmp_max_operational_temperature_get(payload); + *p_max_oper_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp); + } if (sensor_name) mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); } diff --combined drivers/net/ethernet/neterion/vxge/vxge-config.c index b47d74743f5a,38a273c4d593..a3204a7ef750 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@@ -3784,7 -3784,6 +3784,7 @@@ vxge_hw_rts_rth_data0_data1_get(u32 j, VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN | VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA( itable[j]); + return; default: return; } @@@ -4885,7 -4884,7 +4885,7 @@@ vpath_open_exit1 }
/** - * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath + * vxge_hw_vpath_rx_doorbell_init - Close the handle got from previous vpath * (vpath) open * @vp: Handle got from previous vpath open * diff --combined drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index ab1e0fcccabb,6556b5381ce8..13d8eb43a485 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@@ -126,24 -126,24 +126,24 @@@ static void rmnet_get_stats64(struct ne struct rtnl_link_stats64 *s) { struct rmnet_priv *priv = netdev_priv(dev); - struct rmnet_vnd_stats total_stats; + struct rmnet_vnd_stats total_stats = { }; struct rmnet_pcpu_stats *pcpu_ptr; + struct rmnet_vnd_stats snapshot; unsigned int cpu, start;
- memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); - for_each_possible_cpu(cpu) { pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do { start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); - total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts; - total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes; - total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts; - total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes; + snapshot = pcpu_ptr->stats; /* struct assignment */ } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
- total_stats.tx_drops += pcpu_ptr->stats.tx_drops; + total_stats.rx_pkts += snapshot.rx_pkts; + total_stats.rx_bytes += snapshot.rx_bytes; + total_stats.tx_pkts += snapshot.tx_pkts; + total_stats.tx_bytes += snapshot.tx_bytes; + total_stats.tx_drops += snapshot.tx_drops; }
s->rx_packets = total_stats.rx_pkts; @@@ -166,6 -166,7 +166,7 @@@ static const struct net_device_ops rmne
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = { "Checksum ok", + "Bad IPv4 header checksum", "Checksum valid bit not set", "Checksum validation failed", "Checksum error bad buffer", @@@ -174,6 -175,7 +175,7 @@@ "Checksum skipped on ip fragment", "Checksum skipped", "Checksum computed in software", + "Checksum computed in hardware", };
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@@ -354,4 -356,4 +356,4 @@@ int rmnet_vnd_update_dev_mtu(struct rmn }
return 0; -} +} diff --combined drivers/net/ethernet/smsc/smc91x.c index bc19db2dbafb,813ea941b91a..e4fc6484faa8 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@@ -671,19 -671,19 +671,19 @@@ smc_hard_start_xmit(struct sk_buff *skb status = SMC_GET_INT(lp); if (status & IM_ALLOC_INT) { SMC_ACK_INT(lp, IM_ALLOC_INT); - break; + break; } - } while (--poll_count); + } while (--poll_count);
smc_special_unlock(&lp->lock, flags);
lp->pending_tx_skb = skb; - if (!poll_count) { + if (!poll_count) { /* oh well, wait until the chip finds memory later */ netif_stop_queue(dev); DBG(2, dev, "TX memory allocation deferred.\n"); SMC_ENABLE_INT(lp, IM_ALLOC_INT); - } else { + } else { /* * Allocation succeeded: push packet to the chip's own memory * immediately. @@@ -1790,7 -1790,7 +1790,7 @@@ static int smc_findirq(struct smc_loca SMC_SET_INT_MASK(lp, IM_ALLOC_INT);
/* - * Allocate 512 bytes of memory. Note that the chip was just + * Allocate 512 bytes of memory. Note that the chip was just * reset so all the memory is available */ SMC_SET_MMU_CMD(lp, MC_ALLOC | 1); @@@ -1998,8 -1998,8 +1998,8 @@@ static int smc_probe(struct net_device
/* Grab the IRQ */ retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev); - if (retval) - goto err_out; + if (retval) + goto err_out;
#ifdef CONFIG_ARCH_PXA # ifdef SMC_USE_PXA_DMA @@@ -2190,7 -2190,6 +2190,7 @@@ static const struct of_device_id smc91x }; MODULE_DEVICE_TABLE(of, smc91x_match);
+#if defined(CONFIG_GPIOLIB) /** * try_toggle_control_gpio - configure a gpio if it exists * @dev: net device @@@ -2221,15 -2220,6 +2221,15 @@@ static int try_toggle_control_gpio(stru
return 0; } +#else +static int try_toggle_control_gpio(struct device *dev, + struct gpio_desc **desc, + const char *name, int index, + int value, unsigned int nsdelay) +{ + return 0; +} +#endif #endif
/* diff --combined drivers/net/mhi/net.c index b806f2f8f859,64af1e518484..bbbbf6ccff0c --- a/drivers/net/mhi/net.c +++ b/drivers/net/mhi/net.c @@@ -11,6 -11,7 +11,7 @@@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/u64_stats_sync.h> + #include <linux/wwan.h>
#include "mhi.h"
@@@ -18,6 -19,12 +19,12 @@@ #define MHI_NET_MAX_MTU 0xffff #define MHI_NET_DEFAULT_MTU 0x4000
+ /* When set to false, the default netdev (link 0) is not created, and it's up + * to user to create the link (via wwan rtnetlink). + */ + static bool create_default_iface = true; + module_param(create_default_iface, bool, 0); + struct mhi_device_info { const char *netname; const struct mhi_net_proto *proto; @@@ -49,7 -56,7 +56,7 @@@ static int mhi_ndo_stop(struct net_devi return 0; }
-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) { struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); const struct mhi_net_proto *proto = mhi_netdev->proto; @@@ -295,32 -302,33 +302,33 @@@ static void mhi_net_rx_refill_work(stru schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); }
- static struct device_type wwan_type = { - .name = "wwan", - }; - - static int mhi_net_probe(struct mhi_device *mhi_dev, - const struct mhi_device_id *id) + static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id, + struct netlink_ext_ack *extack) { - const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data; - struct device *dev = &mhi_dev->dev; + const struct mhi_device_info *info; + struct mhi_device *mhi_dev = ctxt; struct mhi_net_dev *mhi_netdev; - struct net_device *ndev; int err;
- ndev = alloc_netdev(sizeof(*mhi_netdev), info->netname, - NET_NAME_PREDICTABLE, mhi_net_setup); - if (!ndev) - return -ENOMEM; + info = (struct mhi_device_info *)mhi_dev->id->driver_data; + + /* For now we only support one link (link context 0), driver must be + * reworked to break 1:1 relationship for net MBIM and to forward setup + * call to rmnet(QMAP) otherwise. + */ + if (if_id != 0) + return -EINVAL; + + if (dev_get_drvdata(&mhi_dev->dev)) + return -EBUSY;
mhi_netdev = netdev_priv(ndev); - dev_set_drvdata(dev, mhi_netdev); + + dev_set_drvdata(&mhi_dev->dev, mhi_netdev); mhi_netdev->ndev = ndev; mhi_netdev->mdev = mhi_dev; mhi_netdev->skbagg_head = NULL; mhi_netdev->proto = info->proto; - SET_NETDEV_DEV(ndev, &mhi_dev->dev); - SET_NETDEV_DEVTYPE(ndev, &wwan_type);
INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); u64_stats_init(&mhi_netdev->stats.rx_syncp); @@@ -334,7 -342,10 +342,10 @@@ /* Number of transfer descriptors determines size of the queue */ mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- err = register_netdev(ndev); + if (extack) + err = register_netdevice(ndev); + else + err = register_netdev(ndev); if (err) goto out_err;
@@@ -347,23 -358,89 +358,89 @@@ return 0;
out_err_proto: - unregister_netdev(ndev); + unregister_netdevice(ndev); out_err: free_netdev(ndev); return err; }
- static void mhi_net_remove(struct mhi_device *mhi_dev) + static void mhi_net_dellink(void *ctxt, struct net_device *ndev, + struct list_head *head) { - struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); + struct mhi_device *mhi_dev = ctxt;
- unregister_netdev(mhi_netdev->ndev); + if (head) + unregister_netdevice_queue(ndev, head); + else + unregister_netdev(ndev);
- mhi_unprepare_from_transfer(mhi_netdev->mdev); + mhi_unprepare_from_transfer(mhi_dev);
kfree_skb(mhi_netdev->skbagg_head);
- free_netdev(mhi_netdev->ndev); + dev_set_drvdata(&mhi_dev->dev, NULL); + } + + const struct wwan_ops mhi_wwan_ops = { + .owner = THIS_MODULE, + .priv_size = sizeof(struct mhi_net_dev), + .setup = mhi_net_setup, + .newlink = mhi_net_newlink, + .dellink = mhi_net_dellink, + }; + + static int mhi_net_probe(struct mhi_device *mhi_dev, + const struct mhi_device_id *id) + { + const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data; + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + struct net_device *ndev; + int err; + + err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev); + if (err) + return err; + + if (!create_default_iface) + return 0; + + /* Create a default interface which is used as either RMNET real-dev, + * MBIM link 0 or ip link 0) + */ + ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname, + NET_NAME_PREDICTABLE, mhi_net_setup); + if (!ndev) { + err = -ENOMEM; + goto err_unregister; + } + + SET_NETDEV_DEV(ndev, &mhi_dev->dev); + + err = mhi_net_newlink(mhi_dev, ndev, 0, NULL); + if (err) + goto err_release; + + return 0; + + err_release: + free_netdev(ndev); + err_unregister: + wwan_unregister_ops(&cntrl->mhi_dev->dev); + + return err; + } + + static void mhi_net_remove(struct mhi_device *mhi_dev) + { + struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + + /* rtnetlink takes care of removing remaining links */ + wwan_unregister_ops(&cntrl->mhi_dev->dev); + + if (create_default_iface) + mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL); }
static const struct mhi_device_info mhi_hwip0 = { diff --combined drivers/net/vrf.c index 28a6c4cfe9b8,07eaef5e73c2..452822f88214 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@@ -274,7 -274,7 +274,7 @@@ vrf_map_register_dev(struct net_device int res;
/* we pre-allocate elements used in the spin-locked section (so that we - * keep the spinlock as short as possibile). + * keep the spinlock as short as possible). */ new_me = vrf_map_elem_alloc(GFP_KERNEL); if (!new_me) @@@ -1183,6 -1183,9 +1183,6 @@@ static int vrf_dev_init(struct net_devi
dev->flags = IFF_MASTER | IFF_NOARP;
- /* MTU is irrelevant for VRF device; set to 64k similar to lo */ - dev->mtu = 64 * 1024; - /* similarly, oper state is irrelevant; set to up to avoid confusion */ dev->operstate = IF_OPER_UP; netdev_lockdep_set_classes(dev); @@@ -1682,8 -1685,7 +1682,8 @@@ static void vrf_setup(struct net_devic * which breaks networking. */ dev->min_mtu = IPV6_MIN_MTU; - dev->max_mtu = ETH_MAX_MTU; + dev->max_mtu = IP6_MAX_MTU; + dev->mtu = dev->max_mtu; }
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], diff --combined include/linux/acpi.h index 4d7f1783744f,6ace3a0f1415..b4b5744c6c58 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@@ -132,7 -132,6 +132,7 @@@ enum acpi_address_range_id union acpi_subtable_headers { struct acpi_subtable_header common; struct acpi_hmat_structure hmat; + struct acpi_prmt_module_header prmt; };
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table); @@@ -551,7 -550,6 +551,7 @@@ acpi_status acpi_run_osc(acpi_handle ha #define OSC_SB_OSLPI_SUPPORT 0x00000100 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 +#define OSC_SB_PRM_SUPPORT 0x00020000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
extern bool osc_sb_apei_support_acked; @@@ -668,9 -666,7 +668,9 @@@ extern bool acpi_driver_match_device(st const struct device_driver *drv); int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); -void acpi_walk_dep_device_list(acpi_handle handle); +int acpi_walk_dep_device_list(acpi_handle handle, + int (*callback)(struct acpi_dep_data *, void *), + void *data);
struct platform_device *acpi_create_platform_device(struct acpi_device *, struct property_entry *); @@@ -714,6 -710,8 +714,8 @@@ static inline u64 acpi_arch_get_root_po } #endif
+ int acpi_get_local_address(acpi_handle handle, u32 *addr); + #else /* !CONFIG_ACPI */
#define acpi_disabled 1 @@@ -769,7 -767,7 +771,7 @@@ static inline bool is_acpi_device_node( return false; }
-static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode) +static inline struct acpi_device *to_acpi_device_node(const struct fwnode_handle *fwnode) { return NULL; } @@@ -779,12 -777,12 +781,12 @@@ static inline bool is_acpi_data_node(co return false; }
-static inline struct acpi_data_node *to_acpi_data_node(struct fwnode_handle *fwnode) +static inline struct acpi_data_node *to_acpi_data_node(const struct fwnode_handle *fwnode) { return NULL; }
-static inline bool acpi_data_node_match(struct fwnode_handle *fwnode, +static inline bool acpi_data_node_match(const struct fwnode_handle *fwnode, const char *name) { return false; @@@ -915,7 -913,7 +917,7 @@@ acpi_create_platform_device(struct acpi return NULL; }
-static inline bool acpi_dma_supported(struct acpi_device *adev) +static inline bool acpi_dma_supported(const struct acpi_device *adev) { return false; } @@@ -969,6 -967,11 +971,11 @@@ static inline struct acpi_device *acpi_ return NULL; }
+ static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) + { + return -ENODEV; + } + #endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC diff --combined include/linux/mm_types.h index 8f0fb62e8975,ed6862eacb52..862f88a8c28a --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@@ -96,6 -96,13 +96,13 @@@ struct page unsigned long private; }; struct { /* page_pool used by netstack */ + /** + * @pp_magic: magic value to avoid recycling non + * page_pool allocated pages. + */ + unsigned long pp_magic; + struct page_pool *pp; + unsigned long _pp_mapping_pad; /** * @dma_addr: might require a 64-bit value on * 32-bit architectures. @@@ -445,6 -452,13 +452,6 @@@ struct mm_struct */ atomic_t has_pinned;
- /** - * @write_protect_seq: Locked when any thread is write - * protecting pages mapped by this mm to enforce a later COW, - * for instance during page table copying for fork(). - */ - seqcount_t write_protect_seq; - #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif @@@ -453,18 -467,6 +460,18 @@@ spinlock_t page_table_lock; /* Protects page tables and some * counters */ + /* + * With some kernel config, the current mmap_lock's offset + * inside 'mm_struct' is at 0x120, which is very optimal, as + * its two hot fields 'count' and 'owner' sit in 2 different + * cachelines, and when mmap_lock is highly contended, both + * of the 2 fields will be accessed frequently, current layout + * will help to reduce cache bouncing. + * + * So please be careful with adding new fields before + * mmap_lock, which can easily push the 2 fields into one + * cacheline. + */ struct rw_semaphore mmap_lock;
struct list_head mmlist; /* List of maybe swapped mm's. These @@@ -485,15 -487,7 +492,15 @@@ unsigned long stack_vm; /* VM_STACK */ unsigned long def_flags;
+ /** + * @write_protect_seq: Locked when any thread is write + * protecting pages mapped by this mm to enforce a later COW, + * for instance during page table copying for fork(). + */ + seqcount_t write_protect_seq; + spinlock_t arg_lock; /* protect the below fields */ + unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; diff --combined include/net/sock.h index 7a7058f4f265,9b341c2c924f..ced2fc965ec7 --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -1934,8 -1934,7 +1934,8 @@@ static inline u32 net_tx_rndhash(void
static inline void sk_set_txhash(struct sock *sk) { - sk->sk_txhash = net_tx_rndhash(); + /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */ + WRITE_ONCE(sk->sk_txhash, net_tx_rndhash()); }
static inline bool sk_rethink_txhash(struct sock *sk) @@@ -2207,12 -2206,9 +2207,12 @@@ static inline void sock_poll_wait(struc
static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk) { - if (sk->sk_txhash) { + /* This pairs with WRITE_ONCE() in sk_set_txhash() */ + u32 txhash = READ_ONCE(sk->sk_txhash); + + if (txhash) { skb->l4_hash = 1; - skb->hash = sk->sk_txhash; + skb->hash = txhash; } }
@@@ -2270,13 -2266,8 +2270,13 @@@ struct sk_buff *sock_dequeue_err_skb(st static inline int sock_error(struct sock *sk) { int err; - if (likely(!sk->sk_err)) + + /* Avoid an atomic operation for the common case. + * This is racy since another cpu/thread can change sk_err under us. + */ + if (likely(data_race(!sk->sk_err))) return 0; + err = xchg(&sk->sk_err, 0); return -err; } @@@ -2752,6 -2743,9 +2752,9 @@@ static inline bool sk_dev_equal_l3scope void sock_def_readable(struct sock *sk);
int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); + void sock_set_timestamp(struct sock *sk, int optname, bool valbool); + int sock_set_timestamping(struct sock *sk, int optname, int val); + void sock_enable_timestamps(struct sock *sk); void sock_no_linger(struct sock *sk); void sock_set_keepalive(struct sock *sk); diff --combined net/9p/trans_virtio.c index 7bcaa46165fe,2bbd7dce0f1d..490a4c900339 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@@ -99,7 -99,7 +99,7 @@@ static unsigned int rest_of_page(void * * @client: client instance * * This reclaims a channel by freeing its resources and - * reseting its inuse flag. + * resetting its inuse flag. * */
@@@ -463,7 -463,7 +463,7 @@@ req_retry_pinned * For example TREAD have 11. * 11 is the read/write header = PDU Header(7) + IO Size (4). * Arrange in such a way that server places header in the - * alloced memory and payload onto the user buffer. + * allocated memory and payload onto the user buffer. */ in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len); @@@ -610,7 -610,7 +610,7 @@@ static int p9_virtio_probe(struct virti chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); if (!chan->vc_wq) { err = -ENOMEM; - goto out_free_tag; + goto out_remove_file; } init_waitqueue_head(chan->vc_wq); chan->ring_bufs_avail = 1; @@@ -628,8 -628,6 +628,8 @@@
return 0;
+out_remove_file: + sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr); out_free_tag: kfree(tag); out_free_vq: @@@ -762,7 -760,7 +762,7 @@@ static struct p9_trans_module p9_virtio .cancelled = p9_virtio_cancelled, /* * We leave one entry for input and one entry for response - * headers. We also skip one more entry to accomodate, address + * headers. We also skip one more entry to accommodate, address * that are not at page boundary, that can result in an extra * page in zero copy. */ diff --combined net/batman-adv/bat_iv_ogm.c index fc8be49010b9,680def809838..12022378f892 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@@ -409,10 -409,8 +409,10 @@@ static void batadv_iv_ogm_emit(struct b if (WARN_ON(!forw_packet->if_outgoing)) return;
- if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface)) + if (forw_packet->if_outgoing->soft_iface != soft_iface) { + pr_warn("%s: soft interface switch for queued OGM\n", __func__); return; + }
if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE) return; @@@ -1851,6 -1849,8 +1851,8 @@@ batadv_iv_ogm_orig_dump_subentry(struc orig_node->orig) || nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, neigh_node->addr) || + nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, + neigh_node->if_incoming->net_dev->name) || nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, neigh_node->if_incoming->net_dev->ifindex) || nla_put_u8(msg, BATADV_ATTR_TQ, tq_avg) || @@@ -2080,6 -2080,8 +2082,8 @@@ batadv_iv_ogm_neigh_dump_neigh(struct s
if (nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN, hardif_neigh->addr) || + nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, + hardif_neigh->if_incoming->net_dev->name) || nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, hardif_neigh->if_incoming->net_dev->ifindex) || nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, @@@ -2461,6 -2463,8 +2465,8 @@@ static int batadv_iv_gw_dump_entry(stru router->addr) || nla_put_string(msg, BATADV_ATTR_HARD_IFNAME, router->if_incoming->net_dev->name) || + nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, + router->if_incoming->net_dev->ifindex) || nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_DOWN, gw_node->bandwidth_down) || nla_put_u32(msg, BATADV_ATTR_BANDWIDTH_UP, diff --combined net/bluetooth/smp.c index 7dd51da73845,93144e0c7efa..4d93c6c32a71 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@@ -40,7 -40,7 +40,7 @@@ ((struct smp_dev *)((struct l2cap_chan *)((hdev)->smp_data))->data)
/* Low-level debug macros to be used for stuff that we don't want - * accidentially in dmesg, i.e. the values of the various crypto keys + * accidentally in dmesg, i.e. the values of the various crypto keys * and the inputs & outputs of crypto functions. */ #ifdef DEBUG @@@ -560,7 -560,7 +560,7 @@@ int smp_generate_oob(struct hci_dev *hd return err;
/* This is unlikely, but we need to check that - * we didn't accidentially generate a debug key. + * we didn't accidentally generate a debug key. */ if (crypto_memneq(smp->local_pk, debug_pk, 64)) break; @@@ -1902,7 -1902,7 +1902,7 @@@ static u8 sc_send_public_key(struct smp return SMP_UNSPECIFIED;
/* This is unlikely, but we need to check that - * we didn't accidentially generate a debug key. + * we didn't accidentally generate a debug key. */ if (crypto_memneq(smp->local_pk, debug_pk, 64)) break; @@@ -3229,7 -3229,7 +3229,7 @@@ static inline struct l2cap_chan *smp_ne { struct l2cap_chan *chan;
- bt_dev_dbg(pchan->conn->hcon->hdev, "pchan %p", pchan); + BT_DBG("pchan %p", pchan);
chan = l2cap_chan_create(); if (!chan) @@@ -3250,7 -3250,7 +3250,7 @@@ */ atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
- bt_dev_dbg(pchan->conn->hcon->hdev, "created chan %p", chan); + BT_DBG("created chan %p", chan);
return chan; } @@@ -3354,7 -3354,7 +3354,7 @@@ static void smp_del_chan(struct l2cap_c { struct smp_dev *smp;
- bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan); + BT_DBG("chan %p", chan);
smp = chan->data; if (smp) { diff --combined net/bridge/br_private.h index e013d33f1c7c,ec661130c2d0..a684d0cfc58c --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@@ -90,8 -90,8 +90,8 @@@ struct bridge_mcast_stats #endif
struct br_tunnel_info { - __be64 tunnel_id; - struct metadata_dst *tunnel_dst; + __be64 tunnel_id; + struct metadata_dst __rcu *tunnel_dst; };
/* private vlan flags */ @@@ -307,16 -307,18 +307,18 @@@ struct net_bridge_port
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING struct bridge_mcast_own_query ip4_own_query; + struct timer_list ip4_mc_router_timer; + struct hlist_node ip4_rlist; #if IS_ENABLED(CONFIG_IPV6) struct bridge_mcast_own_query ip6_own_query; + struct timer_list ip6_mc_router_timer; + struct hlist_node ip6_rlist; #endif /* IS_ENABLED(CONFIG_IPV6) */ u32 multicast_eht_hosts_limit; u32 multicast_eht_hosts_cnt; unsigned char multicast_router; struct bridge_mcast_stats __percpu *mcast_stats; - struct timer_list multicast_router_timer; struct hlist_head mglist; - struct hlist_node rlist; #endif
#ifdef CONFIG_SYSFS @@@ -449,14 -451,16 +451,16 @@@ struct net_bridge
struct hlist_head mcast_gc_list; struct hlist_head mdb_list; - struct hlist_head router_list;
- struct timer_list multicast_router_timer; + struct hlist_head ip4_mc_router_list; + struct timer_list ip4_mc_router_timer; struct bridge_mcast_other_query ip4_other_query; struct bridge_mcast_own_query ip4_own_query; struct bridge_mcast_querier ip4_querier; struct bridge_mcast_stats __percpu *mcast_stats; #if IS_ENABLED(CONFIG_IPV6) + struct hlist_head ip6_mc_router_list; + struct timer_list ip6_mc_router_timer; struct bridge_mcast_other_query ip6_other_query; struct bridge_mcast_own_query ip6_own_query; struct bridge_mcast_querier ip6_querier; @@@ -864,11 -868,58 +868,58 @@@ static inline bool br_group_is_l2(cons #define mlock_dereference(X, br) \ rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
- static inline bool br_multicast_is_router(struct net_bridge *br) + static inline struct hlist_node * + br_multicast_get_first_rport_node(struct net_bridge *b, struct sk_buff *skb) { + #if IS_ENABLED(CONFIG_IPV6) + if (skb->protocol == htons(ETH_P_IPV6)) + return rcu_dereference(hlist_first_rcu(&b->ip6_mc_router_list)); + #endif + return rcu_dereference(hlist_first_rcu(&b->ip4_mc_router_list)); + } + + static inline struct net_bridge_port * + br_multicast_rport_from_node_skb(struct hlist_node *rp, struct sk_buff *skb) { + #if IS_ENABLED(CONFIG_IPV6) + if (skb->protocol == htons(ETH_P_IPV6)) + return hlist_entry_safe(rp, struct net_bridge_port, ip6_rlist); + #endif + return hlist_entry_safe(rp, struct net_bridge_port, ip4_rlist); + } + + static inline bool br_ip4_multicast_is_router(struct net_bridge *br) + { + return timer_pending(&br->ip4_mc_router_timer); + } + + static inline bool br_ip6_multicast_is_router(struct net_bridge *br) { - return br->multicast_router == 2 || - (br->multicast_router == 1 && - timer_pending(&br->multicast_router_timer)); + #if IS_ENABLED(CONFIG_IPV6) + return timer_pending(&br->ip6_mc_router_timer); + #else + return false; + #endif + } + + static inline bool + br_multicast_is_router(struct net_bridge *br, struct sk_buff *skb) + { + switch (br->multicast_router) { + case MDB_RTR_TYPE_PERM: + return true; + case MDB_RTR_TYPE_TEMP_QUERY: + if (skb) { + if (skb->protocol == htons(ETH_P_IP)) + return br_ip4_multicast_is_router(br); + else if (skb->protocol == htons(ETH_P_IPV6)) + return br_ip6_multicast_is_router(br); + } else { + return br_ip4_multicast_is_router(br) || + br_ip6_multicast_is_router(br); + } + fallthrough; + default: + return false; + } }
static inline bool @@@ -1017,7 -1068,8 +1068,8 @@@ static inline void br_multicast_flood(s { }
- static inline bool br_multicast_is_router(struct net_bridge *br) + static inline bool br_multicast_is_router(struct net_bridge *br, + struct sk_buff *skb) { return false; } diff --combined net/core/neighbour.c index bf774575ad71,2b2f333bcdfe..53e85c70c6e5 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@@ -238,7 -238,6 +238,7 @@@ static int neigh_forced_gc(struct neigh
write_lock(&n->lock); if ((n->nud_state == NUD_FAILED) || + (n->nud_state == NUD_NOARP) || (tbl->is_multicast && tbl->is_multicast(n->primary_key)) || time_after(tref, n->updated)) @@@ -3142,7 -3141,7 +3142,7 @@@ static struct pneigh_entry *pneigh_get_ struct net *net = seq_file_net(seq); struct neigh_table *tbl = state->tbl; struct pneigh_entry *pn = NULL; - int bucket = state->bucket; + int bucket;
state->flags |= NEIGH_SEQ_IS_PNEIGH; for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) { diff --combined net/core/netpoll.c index 9c49a38fa315,0a6b04714558..edfc0f8011f8 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@@ -36,6 -36,7 +36,7 @@@ #include <net/ip6_checksum.h> #include <asm/unaligned.h> #include <trace/events/napi.h> + #include <linux/kconfig.h>
/* * We maintain a small pool of fully-sized skbs, to make sure the @@@ -389,7 -390,8 +390,8 @@@ void netpoll_send_udp(struct netpoll *n static atomic_t ip_ident; struct ipv6hdr *ip6h;
- WARN_ON_ONCE(!irqs_disabled()); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + WARN_ON_ONCE(!irqs_disabled());
udp_len = len + sizeof(*udph); if (np->ipv6) @@@ -428,7 -430,7 +430,7 @@@ ip6h = ipv6_hdr(skb);
/* ip6h->version = 6; ip6h->priority = 0; */ - put_unaligned(0x60, (unsigned char *)ip6h); + *(unsigned char *)ip6h = 0x60; ip6h->flow_lbl[0] = 0; ip6h->flow_lbl[1] = 0; ip6h->flow_lbl[2] = 0; @@@ -456,7 -458,7 +458,7 @@@ iph = ip_hdr(skb);
/* iph->version = 4; iph->ihl = 5; */ - put_unaligned(0x45, (unsigned char *)iph); + *(unsigned char *)iph = 0x45; iph->tos = 0; put_unaligned(htons(ip_len), &(iph->tot_len)); iph->id = htons(atomic_inc_return(&ip_ident)); diff --combined net/core/rtnetlink.c index ec931b080156,5baa86bca876..745965e49f78 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@@ -9,7 -9,7 +9,7 @@@ * Authors: Alexey Kuznetsov, kuznet@ms2.inr.ac.ru * * Fixes: - * Vitaly E. Lavrov RTA_OK arithmetics was wrong. + * Vitaly E. Lavrov RTA_OK arithmetic was wrong. */
#include <linux/bitops.h> @@@ -234,7 -234,7 +234,7 @@@ unlock * @msgtype: rtnetlink message type * @doit: Function pointer called for each request message * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message - * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions + * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions * * Like rtnl_register, but for use by removable modules. */ @@@ -254,7 -254,7 +254,7 @@@ EXPORT_SYMBOL_GPL(rtnl_register_module) * @msgtype: rtnetlink message type * @doit: Function pointer called for each request message * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message - * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions + * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions * * Registers the specified function pointers (at least one of them has * to be non-NULL) to be called whenever a request message for the @@@ -376,12 -376,12 +376,12 @@@ int __rtnl_link_register(struct rtnl_li if (rtnl_link_ops_get(ops->kind)) return -EEXIST;
- /* The check for setup is here because if ops + /* The check for alloc/setup is here because if ops * does not have that filled up, it is not possible * to use the ops for creating device. So do not * fill up dellink as well. That disables rtnl_dellink. */ - if (ops->setup && !ops->dellink) + if ((ops->alloc || ops->setup) && !ops->dellink) ops->dellink = unregister_netdevice_queue;
list_add_tail(&ops->list, &link_ops); @@@ -543,7 -543,9 +543,9 @@@ static const struct rtnl_af_ops *rtnl_a { const struct rtnl_af_ops *ops;
- list_for_each_entry_rcu(ops, &rtnl_af_ops, list) { + ASSERT_RTNL(); + + list_for_each_entry(ops, &rtnl_af_ops, list) { if (ops->family == family) return ops; } @@@ -1819,6 -1821,16 +1821,16 @@@ static int rtnl_fill_ifinfo(struct sk_b if (rtnl_fill_prop_list(skb, dev)) goto nla_put_failure;
+ if (dev->dev.parent && + nla_put_string(skb, IFLA_PARENT_DEV_NAME, + dev_name(dev->dev.parent))) + goto nla_put_failure; + + if (dev->dev.parent && dev->dev.parent->bus && + nla_put_string(skb, IFLA_PARENT_DEV_BUS_NAME, + dev->dev.parent->bus->name)) + goto nla_put_failure; + nlmsg_end(skb, nlh); return 0;
@@@ -1878,6 -1890,7 +1890,7 @@@ static const struct nla_policy ifla_pol [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, [IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), + [IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING }, };
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@@ -2274,27 -2287,18 +2287,18 @@@ static int validate_linkmsg(struct net_ nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { const struct rtnl_af_ops *af_ops;
- rcu_read_lock(); af_ops = rtnl_af_lookup(nla_type(af)); - if (!af_ops) { - rcu_read_unlock(); + if (!af_ops) return -EAFNOSUPPORT; - }
- if (!af_ops->set_link_af) { - rcu_read_unlock(); + if (!af_ops->set_link_af) return -EOPNOTSUPP; - }
if (af_ops->validate_link_af) { err = af_ops->validate_link_af(dev, af); - if (err < 0) { - rcu_read_unlock(); + if (err < 0) return err; - } } - - rcu_read_unlock(); } }
@@@ -2574,7 -2578,7 +2578,7 @@@ static int do_set_proto_down(struct net if (nl_proto_down) { proto_down = nla_get_u8(nl_proto_down);
- /* Dont turn off protodown if there are active reasons */ + /* Don't turn off protodown if there are active reasons */ if (!proto_down && dev->proto_down_reason) { NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); return -EBUSY; @@@ -2868,17 -2872,12 +2872,12 @@@ static int do_setlink(const struct sk_b nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) { const struct rtnl_af_ops *af_ops;
- rcu_read_lock(); - BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
err = af_ops->set_link_af(dev, af, extack); - if (err < 0) { - rcu_read_unlock(); + if (err < 0) goto errout; - }
- rcu_read_unlock(); status |= DO_SETLINK_NOTIFY; } } @@@ -3177,8 -3176,17 +3176,17 @@@ struct net_device *rtnl_create_link(str return ERR_PTR(-EINVAL); }
- dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type, - ops->setup, num_tx_queues, num_rx_queues); + if (ops->alloc) { + dev = ops->alloc(tb, ifname, name_assign_type, + num_tx_queues, num_rx_queues); + if (IS_ERR(dev)) + return dev; + } else { + dev = alloc_netdev_mqs(ops->priv_size, ifname, + name_assign_type, ops->setup, + num_tx_queues, num_rx_queues); + } + if (!dev) return ERR_PTR(-ENOMEM);
@@@ -3411,7 -3419,7 +3419,7 @@@ replay return -EOPNOTSUPP; }
- if (!ops->setup) + if (!ops->alloc && !ops->setup) return -EOPNOTSUPP;
if (!ifname[0]) { @@@ -4842,12 -4850,10 +4850,12 @@@ static int rtnl_bridge_notify(struct ne if (err < 0) goto errout;
- if (!skb->len) { - err = -EINVAL; + /* Notification info is only filled for bridge ports, not the bridge + * device itself. Therefore, a zero notification length is valid and + * should not result in an error. + */ + if (!skb->len) goto errout; - }
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC); return 0; diff --combined net/core/skbuff.c index bbc3b4b62032,a0b1d4847efe..2531ac4ffa69 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -70,6 -70,7 +70,7 @@@ #include <net/xfrm.h> #include <net/mpls.h> #include <net/mptcp.h> + #include <net/page_pool.h>
#include <linux/uaccess.h> #include <trace/events/skb.h> @@@ -645,10 -646,13 +646,13 @@@ static void skb_free_head(struct sk_buf { unsigned char *head = skb->head;
- if (skb->head_frag) + if (skb->head_frag) { + if (skb_pp_recycle(skb, head)) + return; skb_free_frag(head); - else + } else { kfree(head); + } }
static void skb_release_data(struct sk_buff *skb) @@@ -664,7 -668,7 +668,7 @@@ skb_zcopy_clear(skb, true);
for (i = 0; i < shinfo->nr_frags; i++) - __skb_frag_unref(&shinfo->frags[i]); + __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
if (shinfo->frag_list) kfree_skb_list(shinfo->frag_list); @@@ -1046,6 -1050,7 +1050,7 @@@ static struct sk_buff *__skb_clone(stru n->nohdr = 0; n->peeked = 0; C(pfmemalloc); + C(pp_recycle); n->destructor = NULL; C(tail); C(end); @@@ -1253,7 -1258,6 +1258,7 @@@ static void __msg_zerocopy_callback(str struct sock *sk = skb->sk; struct sk_buff_head *q; unsigned long flags; + bool is_zerocopy; u32 lo, hi; u16 len;
@@@ -1268,7 -1272,6 +1273,7 @@@ len = uarg->len; lo = uarg->id; hi = uarg->id + len - 1; + is_zerocopy = uarg->zerocopy;
serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); @@@ -1276,7 -1279,7 +1281,7 @@@ serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; serr->ee.ee_data = hi; serr->ee.ee_info = lo; - if (!uarg->zerocopy) + if (!is_zerocopy) serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
q = &sk->sk_error_queue; @@@ -3497,7 -3500,7 +3502,7 @@@ int skb_shift(struct sk_buff *tgt, stru fragto = &skb_shinfo(tgt)->frags[merge];
skb_frag_size_add(fragto, skb_frag_size(fragfrom)); - __skb_frag_unref(fragfrom); + __skb_frag_unref(fragfrom, skb->pp_recycle); }
/* Reposition in the original skb */ @@@ -5287,6 -5290,13 +5292,13 @@@ bool skb_try_coalesce(struct sk_buff *t if (skb_cloned(to)) return false;
+ /* The page pool signature of struct page will eventually figure out + * which pages can be recycled or not but for now let's prohibit slab + * allocated and page_pool allocated SKBs from being coalesced. + */ + if (to->pp_recycle != from->pp_recycle) + return false; + if (len <= skb_tailroom(to)) { if (len) BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); diff --combined net/ipv4/af_inet.c index 2f94d221c00e,750f388a4a68..54648181dd56 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@@ -318,7 -318,7 +318,7 @@@ lookup_protocol
WARN_ON(!answer_prot->slab);
- err = -ENOBUFS; + err = -ENOMEM; sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern); if (!sk) goto out; @@@ -575,7 -575,7 +575,7 @@@ int inet_dgram_connect(struct socket *s return err; }
- if (!inet_sk(sk)->inet_num && inet_autobind(sk)) + if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk)) return -EAGAIN; return sk->sk_prot->connect(sk, uaddr, addr_len); } @@@ -803,7 -803,7 +803,7 @@@ int inet_send_prepare(struct sock *sk sock_rps_record_flow(sk);
/* We may need to bind the socket. */ - if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && + if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind && inet_autobind(sk)) return -EAGAIN;
@@@ -1720,7 -1720,6 +1720,6 @@@ EXPORT_SYMBOL_GPL(snmp_fold_field64) #ifdef CONFIG_IP_MULTICAST static const struct net_protocol igmp_protocol = { .handler = igmp_rcv, - .netns_ok = 1, }; #endif
@@@ -1733,7 -1732,6 +1732,6 @@@ static struct net_protocol tcp_protoco .handler = tcp_v4_rcv, .err_handler = tcp_v4_err, .no_policy = 1, - .netns_ok = 1, .icmp_strict_tag_validation = 1, };
@@@ -1746,14 -1744,12 +1744,12 @@@ static struct net_protocol udp_protoco .handler = udp_rcv, .err_handler = udp_err, .no_policy = 1, - .netns_ok = 1, };
static const struct net_protocol icmp_protocol = { .handler = icmp_rcv, .err_handler = icmp_err, .no_policy = 1, - .netns_ok = 1, };
static __net_init int ipv4_mib_init_net(struct net *net) diff --combined net/ipv4/cipso_ipv4.c index e0480c6cebaa,d6e3a92841e3..099259fc826a --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@@ -187,8 -187,7 +187,7 @@@ static int __init cipso_v4_cache_init(v * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: - * Invalidates and frees any entries in the CIPSO cache. Returns zero on - * success and negative values on failure. + * Invalidates and frees any entries in the CIPSO cache. * */ void cipso_v4_cache_invalidate(void) @@@ -472,7 -471,6 +471,7 @@@ void cipso_v4_doi_free(struct cipso_v4_ kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); + kfree(doi_def->map.std); break; } kfree(doi_def); diff --combined net/ipv4/devinet.c index 1c6429c353a9,50deeff48c8b..73721a4448bd --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@@ -1955,7 -1955,7 +1955,7 @@@ static int inet_validate_link_af(const struct nlattr *a, *tb[IFLA_INET_MAX+1]; int err, rem;
- if (dev && !__in_dev_get_rcu(dev)) + if (dev && !__in_dev_get_rtnl(dev)) return -EAFNOSUPPORT;
err = nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, @@@ -1981,7 -1981,7 +1981,7 @@@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla, struct netlink_ext_ack *extack) { - struct in_device *in_dev = __in_dev_get_rcu(dev); + struct in_device *in_dev = __in_dev_get_rtnl(dev); struct nlattr *a, *tb[IFLA_INET_MAX+1]; int rem;
@@@ -1989,7 -1989,7 +1989,7 @@@ return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0) - BUG(); + return -EINVAL;
if (tb[IFLA_INET_CONF]) { nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) diff --combined net/ipv4/route.c index 6a36ac98476f,a4c477475f4c..66aacb939d3e --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@@ -1906,13 -1906,128 +1906,128 @@@ out hash_keys->addrs.v4addrs.dst = key_iph->daddr; }
+ static u32 fib_multipath_custom_hash_outer(const struct net *net, + const struct sk_buff *skb, + bool *p_has_inner) + { + u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; + struct flow_keys keys, hash_keys; + + if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) + return 0; + + memset(&hash_keys, 0, sizeof(hash_keys)); + skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP); + + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) + hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) + hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) + hash_keys.basic.ip_proto = keys.basic.ip_proto; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) + hash_keys.ports.src = keys.ports.src; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) + hash_keys.ports.dst = keys.ports.dst; + + *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION); + return flow_hash_from_keys(&hash_keys); + } + + static u32 fib_multipath_custom_hash_inner(const struct net *net, + const struct sk_buff *skb, + bool has_inner) + { + u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; + struct flow_keys keys, hash_keys; + + /* We assume the packet carries an encapsulation, but if none was + * encountered during dissection of the outer flow, then there is no + * point in calling the flow dissector again. + */ + if (!has_inner) + return 0; + + if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)) + return 0; + + memset(&hash_keys, 0, sizeof(hash_keys)); + skb_flow_dissect_flow_keys(skb, &keys, 0); + + if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION)) + return 0; + + if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) + hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) + hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst; + } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) + hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) + hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL) + hash_keys.tags.flow_label = keys.tags.flow_label; + } + + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO) + hash_keys.basic.ip_proto = keys.basic.ip_proto; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT) + hash_keys.ports.src = keys.ports.src; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) + hash_keys.ports.dst = keys.ports.dst; + + return flow_hash_from_keys(&hash_keys); + } + + static u32 fib_multipath_custom_hash_skb(const struct net *net, + const struct sk_buff *skb) + { + u32 mhash, mhash_inner; + bool has_inner = true; + + mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner); + mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner); + + return jhash_2words(mhash, mhash_inner, 0); + } + + static u32 fib_multipath_custom_hash_fl4(const struct net *net, + const struct flowi4 *fl4) + { + u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; + struct flow_keys hash_keys; + + if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) + return 0; + + memset(&hash_keys, 0, sizeof(hash_keys)); + hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) + hash_keys.addrs.v4addrs.src = fl4->saddr; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) + hash_keys.addrs.v4addrs.dst = fl4->daddr; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO) + hash_keys.basic.ip_proto = fl4->flowi4_proto; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) + hash_keys.ports.src = fl4->fl4_sport; + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) + hash_keys.ports.dst = fl4->fl4_dport; + + return flow_hash_from_keys(&hash_keys); + } + /* if skb is set it will be used and fl4 can be NULL */ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, const struct sk_buff *skb, struct flow_keys *flkeys) { u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0; struct flow_keys hash_keys; - u32 mhash; + u32 mhash = 0;
switch (net->ipv4.sysctl_fib_multipath_hash_policy) { case 0: @@@ -1924,6 -2039,7 +2039,7 @@@ hash_keys.addrs.v4addrs.src = fl4->saddr; hash_keys.addrs.v4addrs.dst = fl4->daddr; } + mhash = flow_hash_from_keys(&hash_keys); break; case 1: /* skb is currently provided only when forwarding */ @@@ -1957,6 -2073,7 +2073,7 @@@ hash_keys.ports.dst = fl4->fl4_dport; hash_keys.basic.ip_proto = fl4->flowi4_proto; } + mhash = flow_hash_from_keys(&hash_keys); break; case 2: memset(&hash_keys, 0, sizeof(hash_keys)); @@@ -1987,9 -2104,15 +2104,15 @@@ hash_keys.addrs.v4addrs.src = fl4->saddr; hash_keys.addrs.v4addrs.dst = fl4->daddr; } + mhash = flow_hash_from_keys(&hash_keys); + break; + case 3: + if (skb) + mhash = fib_multipath_custom_hash_skb(net, skb); + else + mhash = fib_multipath_custom_hash_fl4(net, fl4); break; } - mhash = flow_hash_from_keys(&hash_keys);
if (multipath_hash) mhash = jhash_2words(mhash, multipath_hash, 0); @@@ -2056,19 -2179,6 +2179,19 @@@ martian_source return err; }
+/* get device for dst_alloc with local routes */ +static struct net_device *ip_rt_get_dev(struct net *net, + const struct fib_result *res) +{ + struct fib_nh_common *nhc = res->fi ? res->nhc : NULL; + struct net_device *dev = NULL; + + if (nhc) + dev = l3mdev_master_dev_rcu(nhc->nhc_dev); + + return dev ? : net->loopback_dev; +} + /* * NOTE. We drop all the packets that has local source * addresses, because every properly looped back packet @@@ -2225,7 -2335,7 +2348,7 @@@ local_input } }
- rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev, + rth = rt_dst_alloc(ip_rt_get_dev(net, res), flags | RTCF_LOCAL, res->type, IN_DEV_ORCONF(in_dev, NOPOLICY), false); if (!rth) diff --combined net/ipv6/addrconf.c index 701eb82acd1c,048570900fdf..3bf685fe64b9 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@@ -5827,7 -5827,7 +5827,7 @@@ static int inet6_set_link_af(struct net return -EAFNOSUPPORT;
if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) - BUG(); + return -EINVAL;
if (tb[IFLA_INET6_TOKEN]) { err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]), @@@ -6903,10 -6903,10 +6903,10 @@@ static const struct ctl_table addrconf_ .proc_handler = proc_dointvec, }, { - .procname = "addr_gen_mode", - .data = &ipv6_devconf.addr_gen_mode, - .maxlen = sizeof(int), - .mode = 0644, + .procname = "addr_gen_mode", + .data = &ipv6_devconf.addr_gen_mode, + .maxlen = sizeof(int), + .mode = 0644, .proc_handler = addrconf_sysctl_addr_gen_mode, }, { diff --combined net/mptcp/protocol.c index 632350018fb6,993095089990..ff1c779ae945 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@@ -39,10 -39,15 +39,15 @@@ struct mptcp_skb_cb u64 map_seq; u64 end_seq; u32 offset; + u8 has_rxtstamp:1; };
#define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
+ enum { + MPTCP_CMSG_TS = BIT(0), + }; + static struct percpu_counter mptcp_sockets_allocated;
static void __mptcp_destroy_sock(struct sock *sk); @@@ -272,6 -277,7 +277,7 @@@ static bool __mptcp_move_skb(struct mpt struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = (struct sock *)msk; struct sk_buff *tail; + bool has_rxtstamp;
__skb_unlink(skb, &ssk->sk_receive_queue);
@@@ -280,15 -286,15 +286,17 @@@
/* try to fetch required memory from subflow */ if (!sk_rmem_schedule(sk, skb, skb->truesize)) { - if (ssk->sk_forward_alloc < skb->truesize) - goto drop; - __sk_mem_reclaim(ssk, skb->truesize); - if (!sk_rmem_schedule(sk, skb, skb->truesize)) + int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT; + + if (ssk->sk_forward_alloc < amount) goto drop; + + ssk->sk_forward_alloc -= amount; + sk->sk_forward_alloc += amount; }
+ has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; + /* the skb map_seq accounts for the skb offset: * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq * value @@@ -296,6 -302,7 +304,7 @@@ MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; MPTCP_SKB_CB(skb)->offset = offset; + MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp;
if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { /* in sequence */ @@@ -670,22 -677,18 +679,22 @@@ static bool __mptcp_ofo_queue(struct mp /* In most cases we will be able to lock the mptcp socket. If its already * owned, we need to defer to the work queue to avoid ABBA deadlock. */ -static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) +static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) { struct sock *sk = (struct sock *)msk; unsigned int moved = 0;
if (inet_sk_state_load(sk) == TCP_CLOSE) - return; - - mptcp_data_lock(sk); + return false;
__mptcp_move_skbs_from_subflow(msk, ssk, &moved); __mptcp_ofo_queue(msk); + if (unlikely(ssk->sk_err)) { + if (!sock_owned_by_user(sk)) + __mptcp_error_report(sk); + else + set_bit(MPTCP_ERROR_REPORT, &msk->flags); + }
/* If the moves have caught up with the DATA_FIN sequence number * it's time to ack the DATA_FIN and change socket state, but @@@ -694,7 -697,7 +703,7 @@@ */ if (mptcp_pending_data_fin(sk, NULL)) mptcp_schedule_work(sk); - mptcp_data_unlock(sk); + return moved > 0; }
void mptcp_data_ready(struct sock *sk, struct sock *ssk) @@@ -702,6 -705,7 +711,6 @@@ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct mptcp_sock *msk = mptcp_sk(sk); int sk_rbuf, ssk_rbuf; - bool wake;
/* The peer can send data while we are shutting down this * subflow at msk destruction time, but we must avoid enqueuing @@@ -710,22 -714,28 +719,22 @@@ if (unlikely(subflow->disposable)) return;
- /* move_skbs_to_msk below can legitly clear the data_avail flag, - * but we will need later to properly woke the reader, cache its - * value - */ - wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL; - if (wake) - set_bit(MPTCP_DATA_READY, &msk->flags); - ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); sk_rbuf = READ_ONCE(sk->sk_rcvbuf); if (unlikely(ssk_rbuf > sk_rbuf)) sk_rbuf = ssk_rbuf;
- /* over limit? can't append more skbs to msk */ + /* over limit? can't append more skbs to msk, Also, no need to wake-up*/ if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) - goto wake; - - move_skbs_to_msk(msk, ssk); + return;
-wake: - if (wake) + /* Wake-up the reader only for in-sequence data */ + mptcp_data_lock(sk); + if (move_skbs_to_msk(msk, ssk)) { + set_bit(MPTCP_DATA_READY, &msk->flags); sk->sk_data_ready(sk); + } + mptcp_data_unlock(sk); }
static bool mptcp_do_flush_join_list(struct mptcp_sock *msk) @@@ -857,7 -867,7 +866,7 @@@ static struct sock *mptcp_subflow_recv_ sock_owned_by_me(sk);
mptcp_for_each_subflow(msk, subflow) { - if (subflow->data_avail) + if (READ_ONCE(subflow->data_avail)) return mptcp_subflow_tcp_sock(subflow); }
@@@ -1770,7 -1780,9 +1779,9 @@@ static void mptcp_wait_data(struct soc
static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, struct msghdr *msg, - size_t len, int flags) + size_t len, int flags, + struct scm_timestamping_internal *tss, + int *cmsg_flags) { struct sk_buff *skb, *tmp; int copied = 0; @@@ -1790,6 -1802,11 +1801,11 @@@ } }
+ if (MPTCP_SKB_CB(skb)->has_rxtstamp) { + tcp_update_recv_tstamps(skb, tss); + *cmsg_flags |= MPTCP_CMSG_TS; + } + copied += count;
if (count < data_len) { @@@ -1954,9 -1971,6 +1970,9 @@@ static bool __mptcp_move_skbs(struct mp done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); mptcp_data_unlock(sk); tcp_cleanup_rbuf(ssk, moved); + + if (unlikely(ssk->sk_err)) + __mptcp_error_report(sk); unlock_sock_fast(ssk, slowpath); } while (!done);
@@@ -1980,7 -1994,8 +1996,8 @@@ static int mptcp_recvmsg(struct sock *s int nonblock, int flags, int *addr_len) { struct mptcp_sock *msk = mptcp_sk(sk); - int copied = 0; + struct scm_timestamping_internal tss; + int copied = 0, cmsg_flags = 0; int target; long timeo;
@@@ -2002,7 -2017,7 +2019,7 @@@ while (copied < len) { int bytes_read;
- bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags); + bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags); if (unlikely(bytes_read < 0)) { if (!copied) copied = bytes_read; @@@ -2083,6 -2098,11 +2100,11 @@@ set_bit(MPTCP_DATA_READY, &msk->flags); } out_err: + if (cmsg_flags && copied >= 0) { + if (cmsg_flags & MPTCP_CMSG_TS) + tcp_recv_timestamp(msg, sk, &tss); + } + pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d", msk, test_bit(MPTCP_DATA_READY, &msk->flags), skb_queue_empty_lockless(&sk->sk_receive_queue), copied); diff --combined net/mptcp/protocol.h index 385796f0ef19,89f6b73783d5..454f55e875f9 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@@ -362,6 -362,7 +362,6 @@@ mptcp_subflow_rsk(const struct request_ enum mptcp_data_avail { MPTCP_SUBFLOW_NODATA, MPTCP_SUBFLOW_DATA_AVAIL, - MPTCP_SUBFLOW_OOO_DATA };
struct mptcp_delegated_action { @@@ -626,6 -627,8 +626,8 @@@ static inline void mptcp_write_space(st
void mptcp_destroy_common(struct mptcp_sock *msk);
+ #define MPTCP_TOKEN_MAX_RETRIES 4 + void __init mptcp_token_init(void); static inline void mptcp_token_init_request(struct request_sock *req) { diff --combined net/mptcp/subflow.c index be1de4084196,33956337c46b..8889804d29c7 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@@ -162,7 -162,7 +162,7 @@@ static int subflow_check_req(struct req }
if (mp_opt.mp_capable && listener->request_mptcp) { - int err, retries = 4; + int err, retries = MPTCP_TOKEN_MAX_RETRIES;
subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; again: @@@ -430,15 -430,15 +430,15 @@@ static void subflow_finish_connect(stru goto do_reset; }
+ if (!mptcp_finish_join(sk)) + goto do_reset; + subflow_generate_hmac(subflow->local_key, subflow->remote_key, subflow->local_nonce, subflow->remote_nonce, hmac); memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
- if (!mptcp_finish_join(sk)) - goto do_reset; - subflow->mp_join = 1; MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
@@@ -784,10 -784,10 +784,10 @@@ static u64 expand_seq(u64 old_seq, u16 return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32)); }
-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) +static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) { - WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d", - ssn, subflow->map_subflow_seq, subflow->map_data_len); + pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d", + ssn, subflow->map_subflow_seq, subflow->map_data_len); }
static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb) @@@ -812,13 -812,13 +812,13 @@@ static bool validate_mapping(struct soc /* Mapping covers data later in the subflow stream, * currently unsupported. */ - warn_bad_map(subflow, ssn); + dbg_bad_map(subflow, ssn); return false; } if (unlikely(!before(ssn, subflow->map_subflow_seq + subflow->map_data_len))) { /* Mapping does covers past subflow data, invalid */ - warn_bad_map(subflow, ssn + skb->len); + dbg_bad_map(subflow, ssn); return false; } return true; @@@ -1000,7 -1000,7 +1000,7 @@@ static bool subflow_check_data_avail(st struct sk_buff *skb;
if (!skb_peek(&ssk->sk_receive_queue)) - subflow->data_avail = 0; + WRITE_ONCE(subflow->data_avail, 0); if (subflow->data_avail) return true;
@@@ -1039,13 -1039,18 +1039,13 @@@ ack_seq = mptcp_subflow_get_mapped_dsn(subflow); pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack, ack_seq); - if (ack_seq == old_ack) { - subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL; - break; - } else if (after64(ack_seq, old_ack)) { - subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA; - break; + if (unlikely(before64(ack_seq, old_ack))) { + mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); + continue; }
- /* only accept in-sequence mapping. Old values are spurious - * retransmission - */ - mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq); + WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); + break; } return true;
@@@ -1060,11 -1065,12 +1060,11 @@@ fallback * subflow_error_report() will introduce the appropriate barriers */ ssk->sk_err = EBADMSG; - ssk->sk_error_report(ssk); tcp_set_state(ssk, TCP_CLOSE); subflow->reset_transient = 0; subflow->reset_reason = MPTCP_RST_EMPTCP; tcp_send_active_reset(ssk, GFP_ATOMIC); - subflow->data_avail = 0; + WRITE_ONCE(subflow->data_avail, 0); return false; }
@@@ -1074,7 -1080,7 +1074,7 @@@ subflow->map_seq = READ_ONCE(msk->ack_seq); subflow->map_data_len = skb->len; subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; - subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL; + WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL); return true; }
@@@ -1086,7 -1092,7 +1086,7 @@@ bool mptcp_subflow_data_available(struc if (subflow->map_valid && mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) { subflow->map_valid = 0; - subflow->data_avail = 0; + WRITE_ONCE(subflow->data_avail, 0);
pr_debug("Done with mapping: seq=%u data_len=%u", subflow->map_subflow_seq, @@@ -1114,6 -1120,41 +1114,6 @@@ void mptcp_space(const struct sock *ssk *full_space = tcp_full_space(sk); }
-static void subflow_data_ready(struct sock *sk) -{ - struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); - u16 state = 1 << inet_sk_state_load(sk); - struct sock *parent = subflow->conn; - struct mptcp_sock *msk; - - msk = mptcp_sk(parent); - if (state & TCPF_LISTEN) { - /* MPJ subflow are removed from accept queue before reaching here, - * avoid stray wakeups - */ - if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) - return; - - set_bit(MPTCP_DATA_READY, &msk->flags); - parent->sk_data_ready(parent); - return; - } - - WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && - !subflow->mp_join && !(state & TCPF_CLOSE)); - - if (mptcp_subflow_data_available(sk)) - mptcp_data_ready(parent, sk); -} - -static void subflow_write_space(struct sock *ssk) -{ - struct sock *sk = mptcp_subflow_ctx(ssk)->conn; - - mptcp_propagate_sndbuf(sk, ssk); - mptcp_write_space(sk); -} - void __mptcp_error_report(struct sock *sk) { struct mptcp_subflow_context *subflow; @@@ -1154,43 -1195,6 +1154,43 @@@ static void subflow_error_report(struc mptcp_data_unlock(sk); }
+static void subflow_data_ready(struct sock *sk) +{ + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + u16 state = 1 << inet_sk_state_load(sk); + struct sock *parent = subflow->conn; + struct mptcp_sock *msk; + + msk = mptcp_sk(parent); + if (state & TCPF_LISTEN) { + /* MPJ subflow are removed from accept queue before reaching here, + * avoid stray wakeups + */ + if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue)) + return; + + set_bit(MPTCP_DATA_READY, &msk->flags); + parent->sk_data_ready(parent); + return; + } + + WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && + !subflow->mp_join && !(state & TCPF_CLOSE)); + + if (mptcp_subflow_data_available(sk)) + mptcp_data_ready(parent, sk); + else if (unlikely(sk->sk_err)) + subflow_error_report(sk); +} + +static void subflow_write_space(struct sock *ssk) +{ + struct sock *sk = mptcp_subflow_ctx(ssk)->conn; + + mptcp_propagate_sndbuf(sk, ssk); + mptcp_write_space(sk); +} + static struct inet_connection_sock_af_ops * subflow_default_af_ops(struct sock *sk) { @@@ -1501,8 -1505,6 +1501,8 @@@ static void subflow_state_change(struc */ if (mptcp_subflow_data_available(sk)) mptcp_data_ready(parent, sk); + else if (unlikely(sk->sk_err)) + subflow_error_report(sk);
subflow_sched_work_if_closed(mptcp_sk(parent), sk);
diff --combined net/netfilter/nf_tables_api.c index bf4d6ec9fc55,f20f6ae0e215..d6214242fe7f --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -862,10 -862,9 +862,9 @@@ static int nft_netlink_dump_start_rcu(s static int nf_tables_gettable(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_cur(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; const struct nft_table *table; struct net *net = info->net; struct sk_buff *skb2; @@@ -1068,10 -1067,9 +1067,9 @@@ static int nf_tables_newtable(struct sk const struct nlattr * const nla[]) { struct nftables_pernet *nft_net = nft_pernet(info->net); - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; const struct nlattr *attr; struct nft_table *table; @@@ -1263,10 -1261,9 +1261,9 @@@ out static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; const struct nlattr *attr; struct nft_table *table; @@@ -1636,10 -1633,9 +1633,9 @@@ done static int nf_tables_getchain(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_cur(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; const struct nft_chain *chain; struct net *net = info->net; struct nft_table *table; @@@ -2015,11 -2011,12 +2011,12 @@@ static void nft_basechain_hook_init(str const struct nft_chain_hook *hook, struct nft_chain *chain) { - ops->pf = family; - ops->hooknum = hook->num; - ops->priority = hook->priority; - ops->priv = chain; - ops->hook = hook->type->hooks[ops->hooknum]; + ops->pf = family; + ops->hooknum = hook->num; + ops->priority = hook->priority; + ops->priv = chain; + ops->hook = hook->type->hooks[ops->hooknum]; + ops->hook_ops_type = NF_HOOK_OP_NF_TABLES; }
static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, @@@ -2371,10 -2368,9 +2368,9 @@@ static int nf_tables_newchain(struct sk const struct nlattr * const nla[]) { struct nftables_pernet *nft_net = nft_pernet(info->net); - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; struct nft_chain *chain = NULL; struct net *net = info->net; const struct nlattr *attr; @@@ -2469,10 -2465,9 +2465,9 @@@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; const struct nlattr *attr; struct nft_table *table; @@@ -3096,10 -3091,9 +3091,9 @@@ static int nf_tables_dump_rules_done(st static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_cur(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; const struct nft_chain *chain; const struct nft_rule *rule; struct net *net = info->net; @@@ -3237,13 -3231,12 +3231,12 @@@ static int nf_tables_newrule(struct sk_ const struct nlattr * const nla[]) { struct nftables_pernet *nft_net = nft_pernet(info->net); - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; unsigned int size, i, n, ulen = 0, usize = 0; u8 genmask = nft_genmask_next(info->net); struct nft_rule *rule, *old_rule = NULL; struct nft_expr_info *expr_info = NULL; - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; struct nft_flow_rule *flow; struct nft_userdata *udata; @@@ -3477,15 -3470,15 +3470,15 @@@ static struct nft_rule *nft_rule_lookup static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; - int family = nfmsg->nfgen_family, err = 0; u8 genmask = nft_genmask_next(info->net); + u8 family = info->nfmsg->nfgen_family; struct nft_chain *chain = NULL; struct net *net = info->net; struct nft_table *table; struct nft_rule *rule; struct nft_ctx ctx; + int err = 0;
table = nft_table_lookup(net, nla[NFTA_RULE_TABLE], family, genmask, NETLINK_CB(skb).portid); @@@ -3665,30 -3658,6 +3658,6 @@@ static const struct nla_policy nft_set_ [NFTA_SET_DESC_CONCAT] = { .type = NLA_NESTED }, };
- static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, - const struct sk_buff *skb, - const struct nlmsghdr *nlh, - const struct nlattr * const nla[], - struct netlink_ext_ack *extack, - u8 genmask, u32 nlpid) - { - const struct nfgenmsg *nfmsg = nlmsg_data(nlh); - int family = nfmsg->nfgen_family; - struct nft_table *table = NULL; - - if (nla[NFTA_SET_TABLE] != NULL) { - table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, - genmask, nlpid); - if (IS_ERR(table)) { - NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]); - return PTR_ERR(table); - } - } - - nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla); - return 0; - } - static struct nft_set *nft_set_lookup(const struct nft_table *table, const struct nlattr *nla, u8 genmask) { @@@ -4068,20 -4037,26 +4037,26 @@@ static int nf_tables_dump_sets_done(str static int nf_tables_getset(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_cur(info->net); + u8 family = info->nfmsg->nfgen_family; + struct nft_table *table = NULL; struct net *net = info->net; const struct nft_set *set; struct sk_buff *skb2; struct nft_ctx ctx; int err;
- /* Verify existence before starting dump */ - err = nft_ctx_init_from_setattr(&ctx, net, skb, info->nlh, nla, extack, - genmask, 0); - if (err < 0) - return err; + if (nla[NFTA_SET_TABLE]) { + table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, + genmask, 0); + if (IS_ERR(table)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]); + return PTR_ERR(table); + } + } + + nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
if (info->nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { @@@ -4096,12 -4071,12 +4071,12 @@@ }
/* Only accept unspec with dump */ - if (nfmsg->nfgen_family == NFPROTO_UNSPEC) + if (info->nfmsg->nfgen_family == NFPROTO_UNSPEC) return -EAFNOSUPPORT; if (!nla[NFTA_SET_TABLE]) return -EINVAL;
- set = nft_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask); + set = nft_set_lookup(table, nla[NFTA_SET_NAME], genmask); if (IS_ERR(set)) return PTR_ERR(set);
@@@ -4189,11 -4164,10 +4164,10 @@@ static int nf_tables_set_desc_parse(str static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); u32 ktype, dtype, flags, policy, gc_int, objtype; struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; const struct nft_set_ops *ops; struct nft_expr *expr = NULL; struct net *net = info->net; @@@ -4364,45 -4338,13 +4338,45 @@@ err = nf_tables_set_alloc_name(&ctx, set, name); kfree(name); if (err < 0) - goto err_set_alloc_name; + goto err_set_name; + + udata = NULL; + if (udlen) { + udata = set->data + size; + nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen); + } + + INIT_LIST_HEAD(&set->bindings); + INIT_LIST_HEAD(&set->catchall_list); + set->table = table; + write_pnet(&set->net, net); + set->ops = ops; + set->ktype = ktype; + set->klen = desc.klen; + set->dtype = dtype; + set->objtype = objtype; + set->dlen = desc.dlen; + set->flags = flags; + set->size = desc.size; + set->policy = policy; + set->udlen = udlen; + set->udata = udata; + set->timeout = timeout; + set->gc_int = gc_int; + + set->field_count = desc.field_count; + for (i = 0; i < desc.field_count; i++) + set->field_len[i] = desc.field_len[i]; + + err = ops->init(set, &desc, nla); + if (err < 0) + goto err_set_init;
if (nla[NFTA_SET_EXPR]) { expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]); if (IS_ERR(expr)) { err = PTR_ERR(expr); - goto err_set_alloc_name; + goto err_set_expr_alloc; } set->exprs[0] = expr; set->num_exprs++; @@@ -4413,44 -4355,75 +4387,44 @@@
if (!(flags & NFT_SET_EXPR)) { err = -EINVAL; - goto err_set_alloc_name; + goto err_set_expr_alloc; } i = 0; nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) { if (i == NFT_SET_EXPR_MAX) { err = -E2BIG; - goto err_set_init; + goto err_set_expr_alloc; } if (nla_type(tmp) != NFTA_LIST_ELEM) { err = -EINVAL; - goto err_set_init; + goto err_set_expr_alloc; } expr = nft_set_elem_expr_alloc(&ctx, set, tmp); if (IS_ERR(expr)) { err = PTR_ERR(expr); - goto err_set_init; + goto err_set_expr_alloc; } set->exprs[i++] = expr; set->num_exprs++; } }
- udata = NULL; - if (udlen) { - udata = set->data + size; - nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen); - } - - INIT_LIST_HEAD(&set->bindings); - INIT_LIST_HEAD(&set->catchall_list); - set->table = table; - write_pnet(&set->net, net); - set->ops = ops; - set->ktype = ktype; - set->klen = desc.klen; - set->dtype = dtype; - set->objtype = objtype; - set->dlen = desc.dlen; - set->flags = flags; - set->size = desc.size; - set->policy = policy; - set->udlen = udlen; - set->udata = udata; - set->timeout = timeout; - set->gc_int = gc_int; set->handle = nf_tables_alloc_handle(table);
- set->field_count = desc.field_count; - for (i = 0; i < desc.field_count; i++) - set->field_len[i] = desc.field_len[i]; - - err = ops->init(set, &desc, nla); - if (err < 0) - goto err_set_init; - err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) - goto err_set_trans; + goto err_set_expr_alloc;
list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0;
-err_set_trans: - ops->destroy(set); -err_set_init: +err_set_expr_alloc: for (i = 0; i < set->num_exprs; i++) nft_expr_destroy(&ctx, set->exprs[i]); -err_set_alloc_name: + + ops->destroy(set); +err_set_init: kfree(set->name); err_set_name: kvfree(set); @@@ -4494,31 -4467,31 +4468,31 @@@ static void nft_set_destroy(const struc static int nf_tables_delset(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; const struct nlattr *attr; + struct nft_table *table; struct nft_set *set; struct nft_ctx ctx; - int err;
- if (nfmsg->nfgen_family == NFPROTO_UNSPEC) + if (info->nfmsg->nfgen_family == NFPROTO_UNSPEC) return -EAFNOSUPPORT; - if (nla[NFTA_SET_TABLE] == NULL) - return -EINVAL;
- err = nft_ctx_init_from_setattr(&ctx, net, skb, info->nlh, nla, extack, - genmask, NETLINK_CB(skb).portid); - if (err < 0) - return err; + table = nft_table_lookup(net, nla[NFTA_SET_TABLE], family, + genmask, NETLINK_CB(skb).portid); + if (IS_ERR(table)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_SET_TABLE]); + return PTR_ERR(table); + }
if (nla[NFTA_SET_HANDLE]) { attr = nla[NFTA_SET_HANDLE]; - set = nft_set_lookup_byhandle(ctx.table, attr, genmask); + set = nft_set_lookup_byhandle(table, attr, genmask); } else { attr = nla[NFTA_SET_NAME]; - set = nft_set_lookup(ctx.table, attr, genmask); + set = nft_set_lookup(table, attr, genmask); }
if (IS_ERR(set)) { @@@ -4532,6 -4505,8 +4506,8 @@@ return -EBUSY; }
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + return nft_delset(&ctx, set); }
@@@ -4733,28 -4708,6 +4709,6 @@@ static const struct nla_policy nft_set_ [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, };
- static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net, - const struct sk_buff *skb, - const struct nlmsghdr *nlh, - const struct nlattr * const nla[], - struct netlink_ext_ack *extack, - u8 genmask, u32 nlpid) - { - const struct nfgenmsg *nfmsg = nlmsg_data(nlh); - int family = nfmsg->nfgen_family; - struct nft_table *table; - - table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family, - genmask, nlpid); - if (IS_ERR(table)) { - NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]); - return PTR_ERR(table); - } - - nft_ctx_init(ctx, net, skb, nlh, family, table, NULL, nla); - return 0; - } - static int nft_set_elem_expr_dump(struct sk_buff *skb, const struct nft_set *set, const struct nft_set_ext *ext) @@@ -5212,21 -5165,27 +5166,27 @@@ static int nf_tables_getsetelem(struct { struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_cur(info->net); + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; + struct nft_table *table; struct nft_set *set; struct nlattr *attr; struct nft_ctx ctx; int rem, err = 0;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, info->nlh, nla, extack, - genmask, NETLINK_CB(skb).portid); - if (err < 0) - return err; + table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family, + genmask, NETLINK_CB(skb).portid); + if (IS_ERR(table)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]); + return PTR_ERR(table); + }
- set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask); + set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask); if (IS_ERR(set)) return PTR_ERR(set);
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + if (info->nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .start = nf_tables_dump_set_start, @@@ -5995,8 -5954,10 +5955,10 @@@ static int nf_tables_newsetelem(struct struct nftables_pernet *nft_net = nft_pernet(info->net); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; const struct nlattr *attr; + struct nft_table *table; struct nft_set *set; struct nft_ctx ctx; int rem, err; @@@ -6004,12 -5965,14 +5966,14 @@@ if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, info->nlh, nla, extack, - genmask, NETLINK_CB(skb).portid); - if (err < 0) - return err; + table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family, + genmask, NETLINK_CB(skb).portid); + if (IS_ERR(table)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]); + return PTR_ERR(table); + }
- set = nft_set_lookup_global(net, ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + set = nft_set_lookup_global(net, table, nla[NFTA_SET_ELEM_LIST_SET], nla[NFTA_SET_ELEM_LIST_SET_ID], genmask); if (IS_ERR(set)) return PTR_ERR(set); @@@ -6017,6 -5980,8 +5981,8 @@@ if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) return -EBUSY;
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags); if (err < 0) @@@ -6024,7 -5989,7 +5990,7 @@@ }
if (nft_net->validate_state == NFT_VALIDATE_DO) - return nft_table_validate(net, ctx.table); + return nft_table_validate(net, table);
return 0; } @@@ -6262,23 -6227,29 +6228,29 @@@ static int nf_tables_delsetelem(struct { struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; const struct nlattr *attr; + struct nft_table *table; struct nft_set *set; struct nft_ctx ctx; int rem, err = 0;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, info->nlh, nla, extack, - genmask, NETLINK_CB(skb).portid); - if (err < 0) - return err; + table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family, + genmask, NETLINK_CB(skb).portid); + if (IS_ERR(table)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]); + return PTR_ERR(table); + }
- set = nft_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], genmask); + set = nft_set_lookup(table, nla[NFTA_SET_ELEM_LIST_SET], genmask); if (IS_ERR(set)) return PTR_ERR(set); if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) return -EBUSY;
+ nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + if (!nla[NFTA_SET_ELEM_LIST_ELEMENTS]) return nft_set_flush(&ctx, set, genmask);
@@@ -6546,11 -6517,10 +6518,10 @@@ err_free_trans static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); + u8 family = info->nfmsg->nfgen_family; const struct nft_object_type *type; - int family = nfmsg->nfgen_family; struct net *net = info->net; struct nft_table *table; struct nft_object *obj; @@@ -6802,10 -6772,9 +6773,9 @@@ static int nf_tables_dump_obj_done(stru static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_cur(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; const struct nft_table *table; struct net *net = info->net; struct nft_object *obj; @@@ -6892,10 -6861,9 +6862,9 @@@ static void nft_obj_destroy(const struc static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; struct net *net = info->net; const struct nlattr *attr; struct nft_table *table; @@@ -7323,12 -7291,11 +7292,11 @@@ static int nf_tables_newflowtable(struc const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; struct nft_flowtable_hook flowtable_hook; u8 genmask = nft_genmask_next(info->net); + u8 family = info->nfmsg->nfgen_family; const struct nf_flowtable_type *type; - int family = nfmsg->nfgen_family; struct nft_flowtable *flowtable; struct nft_hook *hook, *next; struct net *net = info->net; @@@ -7512,10 -7479,9 +7480,9 @@@ static int nf_tables_delflowtable(struc const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; struct nft_flowtable *flowtable; struct net *net = info->net; const struct nlattr *attr; @@@ -7707,9 -7673,8 +7674,8 @@@ static int nf_tables_getflowtable(struc const struct nfnl_info *info, const struct nlattr * const nla[]) { - const struct nfgenmsg *nfmsg = nlmsg_data(info->nlh); u8 genmask = nft_genmask_cur(info->net); - int family = nfmsg->nfgen_family; + u8 family = info->nfmsg->nfgen_family; struct nft_flowtable *flowtable; const struct nft_table *table; struct net *net = info->net; diff --combined net/packet/af_packet.c index 8491b7a5467c,71dd6b910f7c..5dafb6044f0a --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@@ -1656,7 -1656,6 +1656,7 @@@ static int fanout_add(struct sock *sk, case PACKET_FANOUT_ROLLOVER: if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER) return -EINVAL; + break; case PACKET_FANOUT_HASH: case PACKET_FANOUT_LB: case PACKET_FANOUT_CPU: @@@ -3035,13 -3034,10 +3035,13 @@@ static int packet_sendmsg(struct socke struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk);
- if (po->tx_ring.pg_vec) + /* Reading tx_ring.pg_vec without holding pg_vec_lock is racy. + * tpacket_snd() will redo the check safely. + */ + if (data_race(po->tx_ring.pg_vec)) return tpacket_snd(po, msg); - else - return packet_snd(sock, msg, len); + + return packet_snd(sock, msg, len); }
/* @@@ -3933,12 -3929,9 +3933,9 @@@ packet_setsockopt(struct socket *sock, return -EFAULT;
lock_sock(sk); - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { - ret = -EBUSY; - } else { + if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec) po->tp_tx_has_off = !!val; - ret = 0; - } + release_sock(sk); return 0; } diff --combined net/tipc/link.c index 1b7a487c8841,5b6181277cc5..cf586840caeb --- a/net/tipc/link.c +++ b/net/tipc/link.c @@@ -654,7 -654,6 +654,7 @@@ int tipc_link_fsm_evt(struct tipc_link break; case LINK_FAILOVER_BEGIN_EVT: l->state = LINK_FAILINGOVER; + break; case LINK_FAILURE_EVT: case LINK_RESET_EVT: case LINK_ESTABLISH_EVT: @@@ -913,7 -912,7 +913,7 @@@ static int link_schedule_user(struct ti skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, dnode, l->addr, dport, 0, 0); if (!skb) - return -ENOBUFS; + return -ENOMEM; msg_set_dest_droppable(buf_msg(skb), true); TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr); skb_queue_tail(&l->wakeupq, skb); @@@ -1031,7 -1030,7 +1031,7 @@@ void tipc_link_reset(struct tipc_link * * * Consumes the buffer chain. * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted - * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS + * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS or -ENOMEM */ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff_head *xmitq) @@@ -1089,7 -1088,7 +1089,7 @@@ if (!_skb) { kfree_skb(skb); __skb_queue_purge(list); - return -ENOBUFS; + return -ENOMEM; } __skb_queue_tail(transmq, skb); tipc_link_set_skb_retransmit_time(skb, l);
linux-merge@lists.open-mesh.org