The following commit has been merged in the master branch: commit 3537c4769a8e4f818783afcfdf6c3376b59c2acd Merge: 8a2f83685a5015c8cedb271cbd6092a8a04a34f6 deecae7d96843fceebae06445b3f4bf8cceca31a Author: Stephen Rothwell sfr@canb.auug.org.au Date: Fri Aug 27 11:37:37 2021 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined Documentation/admin-guide/kernel-parameters.txt index 267d8488de3f,ee0569a040c6..e817420d1658 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@@ -380,9 -380,6 +380,9 @@@ arm64.nopauth [ARM64] Unconditionally disable Pointer Authentication support
+ arm64.nomte [ARM64] Unconditionally disable Memory Tagging Extension + support + ataflop= [HW,M68k]
atarimouse= [HW,MOUSE] Atari Mouse @@@ -4170,15 -4167,6 +4170,15 @@@ Format: <bool> (1/Y/y=enable, 0/N/n=disable) default: disabled
+ printk.console_no_auto_verbose= + Disable console loglevel raise on oops, panic + or lockdep-detected issues (only if lock debug is on). + With an exception to setups with low baudrate on + serial console, keeping this 0 is a good choice + in order to provide more debug information. + Format: <bool> + default: 0 (auto_verbose is enabled) + printk.devkmsg={on,off,ratelimit} Control writing to /dev/kmsg. on - unlimited logging to /dev/kmsg from userspace @@@ -4957,8 -4945,6 +4957,6 @@@ sa1100ir [NET] See drivers/net/irda/sa1100_ir.c.
- sbni= [NET] Granch SBNI12 leased line adapter - sched_verbose [KNL] Enables verbose scheduler debug messages.
schedstats= [KNL,X86] Enable or disable scheduled statistics. diff --combined MAINTAINERS index 87f560eafa50,06e39d3eba93..1508b59d9c01 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -1488,7 -1488,7 +1488,7 @@@ M: Miquel Raynal <miquel.raynal@bootlin M: Naga Sureshkumar Relli nagasure@xilinx.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -F: Documentation/devicetree/bindings/mtd/arm,pl353-smc.yaml +F: Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml F: drivers/memory/pl353-smc.c
ARM PRIMECELL CLCD PL110 DRIVER @@@ -2010,12 -2010,10 +2010,12 @@@ M: Krzysztof Halasa <khalasa@piap.pl L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml +F: Documentation/devicetree/bindings/bus/intel,ixp4xx-expansion-bus-controller.yaml F: Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt F: Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml F: Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml F: arch/arm/mach-ixp4xx/ +F: drivers/bus/intel-ixp4xx-eb.c F: drivers/clocksource/timer-ixp4xx.c F: drivers/crypto/ixp4xx_crypto.c F: drivers/gpio/gpio-ixp4xx.c @@@ -2309,14 -2307,14 +2309,14 @@@ N: oxna
ARM/PALM TREO SUPPORT M: Tomas Cech sleep_walker@suse.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained W: http://hackndev.com F: arch/arm/mach-pxa/palmtreo.*
ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT M: Marek Vasut marek.vasut@gmail.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained W: http://hackndev.com F: arch/arm/mach-pxa/include/mach/palmld.h @@@ -2330,7 -2328,7 +2330,7 @@@ F: arch/arm/mach-pxa/palmtx.
ARM/PALMZ72 SUPPORT M: Sergey Lapin slapin@ossfans.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained W: http://hackndev.com F: arch/arm/mach-pxa/palmz72.* @@@ -2500,7 -2498,7 +2500,7 @@@ N: s5pv21
ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT M: Andrzej Hajda a.hajda@samsung.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/s5p-g2d/ @@@ -2517,14 -2515,14 +2517,14 @@@ ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPO M: Andrzej Pietrasiewicz andrzejtp2010@gmail.com M: Jacek Anaszewski jacek.anaszewski@gmail.com M: Sylwester Nawrocki s.nawrocki@samsung.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/s5p-jpeg/
ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT M: Andrzej Hajda a.hajda@samsung.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-media@vger.kernel.org S: Maintained F: drivers/media/platform/s5p-mfc/ @@@ -3199,7 -3197,7 +3199,7 @@@ S: Maintaine W: https://www.open-mesh.org/ Q: https://patchwork.open-mesh.org/project/batman/list/ B: https://www.open-mesh.org/projects/batman-adv/issues - C: irc://chat.freenode.net/batman + C: ircs://irc.hackint.org/batadv T: git https://git.open-mesh.org/linux-merge.git F: Documentation/networking/batman-adv.rst F: include/uapi/linux/batadv_packet.h @@@ -3542,7 -3540,7 +3542,7 @@@ BROADCOM BCM5301X ARM ARCHITECTUR M: Hauke Mehrtens hauke@hauke-m.de M: Rafa�� Mi��ecki zajec5@gmail.com M: bcm-kernel-feedback-list@broadcom.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/bcm470* F: arch/arm/boot/dts/bcm5301* @@@ -3552,7 -3550,7 +3552,7 @@@ F: arch/arm/mach-bcm/bcm_5301x. BROADCOM BCM53573 ARM ARCHITECTURE M: Rafa�� Mi��ecki rafal@milecki.pl L: bcm-kernel-feedback-list@broadcom.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/bcm47189* F: arch/arm/boot/dts/bcm53573* @@@ -3868,16 -3866,6 +3868,16 @@@ L: bcm-kernel-feedback-list@broadcom.co S: Maintained F: drivers/mtd/nand/raw/brcmnand/
+BROADCOM STB PCIE DRIVER +M: Jim Quinlan jim2101024@gmail.com +M: Nicolas Saenz Julienne nsaenz@kernel.org +M: Florian Fainelli f.fainelli@gmail.com +M: bcm-kernel-feedback-list@broadcom.com +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml +F: drivers/pci/controller/pcie-brcmstb.c + BROADCOM SYSTEMPORT ETHERNET DRIVER M: Florian Fainelli f.fainelli@gmail.com L: bcm-kernel-feedback-list@broadcom.com @@@ -4510,10 -4498,9 +4510,10 @@@ L: clang-built-linux@googlegroups.co S: Supported W: https://clangbuiltlinux.github.io/ B: https://github.com/ClangBuiltLinux/linux/issues -C: irc://chat.freenode.net/clangbuiltlinux +C: irc://irc.libera.chat/clangbuiltlinux F: Documentation/kbuild/llvm.rst F: include/linux/compiler-clang.h +F: scripts/Makefile.clang F: scripts/clang-tools/ K: \b(?i:clang|llvm)\b
@@@ -4623,7 -4610,7 +4623,7 @@@ F: include/linux/clk F: include/linux/of_clk.h X: drivers/clk/clkdev.c
-COMMON INTERNET FILE SYSTEM (CIFS) +COMMON INTERNET FILE SYSTEM CLIENT (CIFS) M: Steve French sfrench@samba.org L: linux-cifs@vger.kernel.org L: samba-technical@lists.samba.org (moderated for non-subscribers) @@@ -4632,7 -4619,6 +4632,7 @@@ W: http://linux-cifs.samba.org T: git git://git.samba.org/sfrench/cifs-2.6.git F: Documentation/admin-guide/cifs/ F: fs/cifs/ +F: fs/cifs_common/
COMPACTPCI HOTPLUG CORE M: Scott Murray scott@spiteful.org @@@ -4848,7 -4834,7 +4848,7 @@@ CPUIDLE DRIVER - ARM BIG LITTL M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com M: Daniel Lezcano daniel.lezcano@linaro.org L: linux-pm@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git F: drivers/cpuidle/cpuidle-big_little.c @@@ -4868,14 -4854,14 +4868,14 @@@ CPUIDLE DRIVER - ARM PSC M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com M: Sudeep Holla sudeep.holla@arm.com L: linux-pm@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/cpuidle/cpuidle-psci.c
CPUIDLE DRIVER - ARM PSCI PM DOMAIN M: Ulf Hansson ulf.hansson@linaro.org L: linux-pm@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/cpuidle/cpuidle-psci.h F: drivers/cpuidle/cpuidle-psci-domain.c @@@ -5698,6 -5684,7 +5698,7 @@@ DPAA2 ETHERNET SWITCH DRIVE M: Ioana Ciornei ioana.ciornei@nxp.com L: netdev@vger.kernel.org S: Maintained + F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst F: drivers/net/ethernet/freescale/dpaa2/dpaa2-switch* F: drivers/net/ethernet/freescale/dpaa2/dpsw*
@@@ -6815,6 -6802,7 +6816,6 @@@ F: Documentation/admin-guide/media/em28 F: drivers/media/usb/em28xx/
EMBEDDED LINUX -M: Paul Gortmaker paul.gortmaker@windriver.com M: Matt Mackall mpm@selenic.com M: David Woodhouse dwmw2@infradead.org L: linux-embedded@vger.kernel.org @@@ -6917,6 -6905,12 +6918,12 @@@ M: Mark Einon <mark.einon@gmail.com S: Odd Fixes F: drivers/net/ethernet/agere/
+ ETAS ES58X CAN/USB DRIVER + M: Vincent Mailhol mailhol.vincent@wanadoo.fr + L: linux-can@vger.kernel.org + S: Maintained + F: drivers/net/can/usb/etas_es58x/ + ETHERNET BRIDGE M: Roopa Prabhu roopa@nvidia.com M: Nikolay Aleksandrov nikolay@nvidia.com @@@ -6958,7 -6952,7 +6965,7 @@@ F: include/uapi/linux/mdio. F: include/uapi/linux/mii.h
EXFAT FILE SYSTEM -M: Namjae Jeon namjae.jeon@samsung.com +M: Namjae Jeon linkinjeon@kernel.org M: Sungjong Seo sj1557.seo@samsung.com L: linux-fsdevel@vger.kernel.org S: Maintained @@@ -7209,7 -7203,7 +7216,7 @@@ F: tools/firewire
FIRMWARE FRAMEWORK FOR ARMV8-A M: Sudeep Holla sudeep.holla@arm.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/firmware/arm_ffa/ F: include/linux/arm_ffa.h @@@ -7388,7 -7382,7 +7395,7 @@@ F: include/linux/platform_data/video-im
FREESCALE IMX DDR PMU DRIVER M: Frank Li Frank.li@nxp.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/admin-guide/perf/imx-ddr.rst F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.yaml @@@ -7480,7 -7474,7 +7487,7 @@@ F: drivers/tty/serial/ucc_uart. FREESCALE SOC DRIVERS M: Li Yang leoyang.li@nxp.com L: linuxppc-dev@lists.ozlabs.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml F: Documentation/devicetree/bindings/soc/fsl/ @@@ -8408,7 -8402,7 +8415,7 @@@ F: drivers/crypto/hisilicon/sgl. F: drivers/crypto/hisilicon/zip/
HISILICON ROCE DRIVER -M: Lijun Ou oulijun@huawei.com +M: Wenpeng Liang liangwenpeng@huawei.com M: Weihang Li liweihang@huawei.com L: linux-rdma@vger.kernel.org S: Maintained @@@ -9762,11 -9756,6 +9769,6 @@@ M: David Sterba <dsterba@suse.com S: Odd Fixes F: drivers/tty/ipwireless/
- IPX NETWORK LAYER - L: netdev@vger.kernel.org - S: Obsolete - F: include/uapi/linux/ipx.h - IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) M: Marc Zyngier maz@kernel.org S: Maintained @@@ -10065,7 -10054,6 +10067,7 @@@ F: fs/autofs KERNEL BUILD + files below scripts/ (unless maintained elsewhere) M: Masahiro Yamada masahiroy@kernel.org M: Michal Marek michal.lkml@markovi.net +R: Nick Desaulniers ndesaulniers@google.com L: linux-kbuild@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git @@@ -10117,17 -10105,6 +10119,17 @@@ T: git git://git.kernel.org/pub/scm/lin F: Documentation/dev-tools/kselftest* F: tools/testing/selftests/
+KERNEL SMB3 SERVER (KSMBD) +M: Namjae Jeon linkinjeon@kernel.org +M: Sergey Senozhatsky senozhatsky@chromium.org +M: Steve French sfrench@samba.org +M: Hyunchul Lee hyc.lee@gmail.com +L: linux-cifs@vger.kernel.org +S: Maintained +T: git git://git.samba.org/ksmbd.git +F: fs/cifs_common/ +F: fs/ksmbd/ + KERNEL UNIT TESTING FRAMEWORK (KUnit) M: Brendan Higgins brendanhiggins@google.com L: linux-kselftest@vger.kernel.org @@@ -10413,6 -10390,7 +10415,7 @@@ F: net/core/skmsg. F: net/core/sock_map.c F: net/ipv4/tcp_bpf.c F: net/ipv4/udp_bpf.c + F: net/unix/unix_bpf.c
LANDLOCK SECURITY MODULE M: Micka��l Sala��n mic@digikod.net @@@ -11055,6 -11033,18 +11058,18 @@@ F: drivers/mailbox/arm_mhuv2. F: include/linux/mailbox/arm_mhuv2_message.h F: Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml
+ MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP) + M: Jeremy Kerr jk@codeconstruct.com.au + M: Matt Johnston matt@codeconstruct.com.au + L: netdev@vger.kernel.org + S: Maintained + F: Documentation/networking/mctp.rst + F: drivers/net/mctp/ + F: include/net/mctp.h + F: include/net/mctpdevice.h + F: include/net/netns/mctp.h + F: net/mctp/ + MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 M: Michael Kerrisk mtk.manpages@gmail.com L: linux-man@vger.kernel.org @@@ -11065,7 -11055,7 +11080,7 @@@ MARDUK (CREATOR CI40) DEVICE TREE SUPPO M: Rahul Bedarkar rahulbedarkar89@gmail.com L: linux-mips@vger.kernel.org S: Maintained -F: arch/mips/boot/dts/img/pistachio_marduk.dts +F: arch/mips/boot/dts/img/pistachio*
MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER M: Andrew Lunn andrew@lunn.ch @@@ -11117,7 -11107,7 +11132,7 @@@ F: drivers/net/wireless/marvell/liberta
MARVELL MACCHIATOBIN SUPPORT M: Russell King linux@armlinux.org.uk -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
@@@ -11352,6 -11342,12 +11367,12 @@@ W: https://linuxtv.or T: git git://linuxtv.org/media_tree.git F: drivers/media/radio/radio-maxiradio*
+ MAXLINEAR ETHERNET PHY DRIVER + M: Xu Liang lxu@maxlinear.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/phy/mxl-gpy.c + MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER R: Yasushi SHOJI yashi@spacecubics.com L: linux-can@vger.kernel.org @@@ -12668,7 -12664,6 +12689,7 @@@ M: Laurent Pinchart <laurent.pinchart@i L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git +F: Documentation/devicetree/bindings/media/i2c/aptina,mt9p031.yaml F: drivers/media/i2c/mt9p031.c F: include/media/i2c/mt9p031.h
@@@ -13279,15 -13274,6 +13300,15 @@@ T: git git://git.kernel.org/pub/scm/lin F: Documentation/filesystems/ntfs.rst F: fs/ntfs/
+NTFS3 FILESYSTEM +M: Konstantin Komarov almaz.alexandrovich@paragon-software.com +L: ntfs3@lists.linux.dev +S: Supported +W: http://www.paragon-software.com/ +T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git +F: Documentation/filesystems/ntfs3.rst +F: fs/ntfs3/ + NUBUS SUBSYSTEM M: Finn Thain fthain@linux-m68k.org L: linux-m68k@lists.linux-m68k.org @@@ -13721,13 -13707,6 +13742,13 @@@ S: Maintaine T: git git://linuxtv.org/media_tree.git F: drivers/media/i2c/ov13858.c
+OMNIVISION OV13B10 SENSOR DRIVER +M: Arec Kao arec.kao@intel.com +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: drivers/media/i2c/ov13b10.c + OMNIVISION OV2680 SENSOR DRIVER M: Rui Miguel Silva rmfrfs@gmail.com L: linux-media@vger.kernel.org @@@ -13822,15 -13801,6 +13843,15 @@@ T: git git://linuxtv.org/media_tree.gi F: Documentation/devicetree/bindings/media/i2c/ov8856.yaml F: drivers/media/i2c/ov8856.c
+OMNIVISION OV9282 SENSOR DRIVER +M: Paul J. Murphy paul.j.murphy@intel.com +M: Daniele Alessandrelli daniele.alessandrelli@intel.com +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml +F: drivers/media/i2c/ov9282.c + OMNIVISION OV9640 SENSOR DRIVER M: Petr Cvek petrcvekcz@gmail.com L: linux-media@vger.kernel.org @@@ -13921,6 -13891,12 +13942,12 @@@ F: Documentation/devicetree F: arch/*/boot/dts/ F: include/dt-bindings/
+ OPENCOMPUTE PTP CLOCK DRIVER + M: Jonathan Lemon jonathan.lemon@gmail.com + L: netdev@vger.kernel.org + S: Maintained + F: drivers/ptp/ptp_ocp.c + OPENCORES I2C BUS DRIVER M: Peter Korsgaard peter@korsgaard.com M: Andrew Lunn andrew@lunn.ch @@@ -14193,7 -14169,7 +14220,7 @@@ F: drivers/pci/controller/pcie-altera. PCI DRIVER FOR APPLIEDMICRO XGENE M: Toan Le toan@os.amperecomputing.com L: linux-pci@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci.txt F: drivers/pci/controller/pci-xgene.c @@@ -14201,7 -14177,7 +14228,7 @@@ PCI DRIVER FOR ARM VERSATILE PLATFORM M: Rob Herring robh@kernel.org L: linux-pci@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/versatile.yaml F: drivers/pci/controller/pci-versatile.c @@@ -14209,7 -14185,7 +14236,7 @@@ PCI DRIVER FOR ARMADA 8K M: Thomas Petazzoni thomas.petazzoni@bootlin.com L: linux-pci@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/pci-armada8k.txt F: drivers/pci/controller/dwc/pcie-armada8k.c @@@ -14227,7 -14203,7 +14254,7 @@@ M: Mingkai Hu <mingkai.hu@nxp.com M: Roy Zang roy.zang@nxp.com L: linuxppc-dev@lists.ozlabs.org L: linux-pci@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/pci/controller/dwc/*layerscape*
@@@ -14307,7 -14283,7 +14334,7 @@@ F: drivers/pci/controller/pci-tegra. PCI DRIVER FOR NXP LAYERSCAPE GEN4 CONTROLLER M: Hou Zhiqiang Zhiqiang.Hou@nxp.com L: linux-pci@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/layerscape-pcie-gen4.txt F: drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c @@@ -14341,7 -14317,7 +14368,7 @@@ PCI DRIVER FOR TI DRA7XX/J721 M: Kishon Vijay Abraham I kishon@ti.com L: linux-omap@vger.kernel.org L: linux-pci@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: Documentation/devicetree/bindings/pci/ti-pci.txt F: drivers/pci/controller/cadence/pci-j721e.c @@@ -14397,7 -14373,7 +14424,7 @@@ F: drivers/pci/controller/pcie-altera-m PCI MSI DRIVER FOR APPLIEDMICRO XGENE M: Toan Le toan@os.amperecomputing.com L: linux-pci@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt F: drivers/pci/controller/pci-xgene-msi.c @@@ -14481,20 -14457,6 +14508,20 @@@ S: Maintaine F: Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt F: drivers/pci/controller/dwc/pcie-histb.c
+PCIE DRIVER FOR INTEL KEEM BAY +M: Srikanth Thokala srikanth.thokala@intel.com +L: linux-pci@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/pci/intel,keembay-pcie* +F: drivers/pci/controller/dwc/pcie-keembay.c + +PCIE DRIVER FOR INTEL LGM GW SOC +M: Rahul Tanwar rtanwar@maxlinear.com +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml +F: drivers/pci/controller/dwc/pcie-intel-gw.c + PCIE DRIVER FOR MEDIATEK M: Ryder Lee ryder.lee@mediatek.com M: Jianjun Wang jianjun.wang@mediatek.com @@@ -14767,6 -14729,14 +14794,6 @@@ S: Maintaine W: http://www.st.com/spear F: drivers/pinctrl/spear/
-PISTACHIO SOC SUPPORT -M: James Hartley james.hartley@sondrel.com -L: linux-mips@vger.kernel.org -S: Odd Fixes -F: arch/mips/boot/dts/img/pistachio* -F: arch/mips/configs/pistachio*_defconfig -F: arch/mips/pistachio/ - PKTCDVD DRIVER M: linux-block@vger.kernel.org S: Orphan @@@ -14901,7 -14871,7 +14928,7 @@@ F: include/linux/dtpm. POWER STATE COORDINATION INTERFACE (PSCI) M: Mark Rutland mark.rutland@arm.com M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/firmware/psci/ F: include/linux/psci.h @@@ -14983,18 -14953,6 +15010,11 @@@ S: Maintaine F: include/linux/printk.h F: kernel/printk/
+PRINTK INDEXING +R: Chris Down chris@chrisdown.name +S: Maintained +F: kernel/printk/index.c + - PRISM54 WIRELESS DRIVER - M: Luis Chamberlain mcgrof@kernel.org - L: linux-wireless@vger.kernel.org - S: Obsolete - W: https://wireless.wiki.kernel.org/en/users/Drivers/p54 - F: drivers/net/wireless/intersil/prism54/ - PROC FILESYSTEM L: linux-kernel@vger.kernel.org L: linux-fsdevel@vger.kernel.org @@@ -15433,7 -15391,7 +15453,7 @@@ F: arch/hexagon
QUALCOMM HIDMA DRIVER M: Sinan Kaya okaya@kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-msm@vger.kernel.org L: dmaengine@vger.kernel.org S: Supported @@@ -16518,12 -16476,6 +16538,12 @@@ F: drivers/phy/samsung/phy-s5pv210-usb2 F: drivers/phy/samsung/phy-samsung-usb2.c F: drivers/phy/samsung/phy-samsung-usb2.h
+SANCLOUD BEAGLEBONE ENHANCED DEVICE TREE +M: Paul Barker paul.barker@sancloud.com +R: Marc Murphy marc.murphy@sancloud.com +S: Supported +F: arch/arm/boot/dts/am335x-sancloud* + SC1200 WDT DRIVER M: Zwane Mwaikambo zwanem@gmail.com S: Maintained @@@ -17129,7 -17081,7 +17149,7 @@@ SECURE MONITOR CALL(SMC) CALLING CONVEN M: Mark Rutland mark.rutland@arm.com M: Lorenzo Pieralisi lorenzo.pieralisi@arm.com M: Sudeep Holla sudeep.holla@arm.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/firmware/smccc/ F: include/linux/arm-smccc.h @@@ -17246,7 -17198,7 +17266,7 @@@ F: drivers/media/pci/solo6x10
SOFTWARE DELEGATED EXCEPTION INTERFACE (SDEI) M: James Morse james.morse@arm.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/arm/firmware/sdei.txt F: drivers/firmware/arm_sdei.c @@@ -17361,15 -17313,6 +17381,15 @@@ T: git git://linuxtv.org/media_tree.gi F: Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml F: drivers/media/i2c/imx334.c
+SONY IMX335 SENSOR DRIVER +M: Paul J. Murphy paul.j.murphy@intel.com +M: Daniele Alessandrelli daniele.alessandrelli@intel.com +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml +F: drivers/media/i2c/imx335.c + SONY IMX355 SENSOR DRIVER M: Tianshu Qiu tian.shu.qiu@intel.com L: linux-media@vger.kernel.org @@@ -17377,15 -17320,6 +17397,15 @@@ S: Maintaine T: git git://linuxtv.org/media_tree.git F: drivers/media/i2c/imx355.c
+SONY IMX412 SENSOR DRIVER +M: Paul J. Murphy paul.j.murphy@intel.com +M: Daniele Alessandrelli daniele.alessandrelli@intel.com +L: linux-media@vger.kernel.org +S: Maintained +T: git git://linuxtv.org/media_tree.git +F: Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml +F: drivers/media/i2c/imx412.c + SONY MEMORYSTICK SUBSYSTEM M: Maxim Levitsky maximlevitsky@gmail.com M: Alex Dubov oakad@yahoo.com @@@ -18032,7 -17966,7 +18052,7 @@@ F: drivers/mfd/syscon. SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers M: Sudeep Holla sudeep.holla@arm.com R: Cristian Marussi cristian.marussi@arm.com -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/firmware/arm,sc[mp]i.yaml F: drivers/clk/clk-sc[mp]i.c @@@ -18043,7 -17977,6 +18063,7 @@@ F: drivers/regulator/scmi-regulator. F: drivers/reset/reset-scmi.c F: include/linux/sc[mp]i_protocol.h F: include/trace/events/scmi.h +F: include/uapi/linux/virtio_scmi.h
SYSTEM RESET/SHUTDOWN DRIVERS M: Sebastian Reichel sre@kernel.org @@@ -18405,7 -18338,7 +18425,7 @@@ TEXAS INSTRUMENTS' SYSTEM CONTROL INTER M: Nishanth Menon nm@ti.com M: Tero Kristo kristo@kernel.org M: Santosh Shilimkar ssantosh@kernel.org -L: linux-arm-kernel@lists.infradead.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: Documentation/devicetree/bindings/arm/keystone/ti,k3-sci-common.yaml F: Documentation/devicetree/bindings/arm/keystone/ti,sci.txt @@@ -18462,7 -18395,6 +18482,7 @@@ F: drivers/thermal F: include/linux/cpu_cooling.h F: include/linux/thermal.h F: include/uapi/linux/thermal.h +F: tools/thermal/
THERMAL DRIVER FOR AMLOGIC SOCS M: Guillaume La Roque glaroque@baylibre.com @@@ -18895,14 -18827,6 +18915,14 @@@ F: arch/x86/mm/testmmiotrace. F: include/linux/mmiotrace.h F: kernel/trace/trace_mmiotrace.c
+TRADITIONAL CHINESE DOCUMENTATION +M: Hu Haowen src.res@email.cn +L: linux-doc-tw-discuss@lists.sourceforge.net +S: Maintained +W: https://github.com/srcres258/linux-doc +T: git git://github.com/srcres258/linux-doc.git doc-zh-tw +F: Documentation/translations/zh_TW/ + TRIVIAL PATCHES M: Jiri Kosina trivial@kernel.org S: Maintained @@@ -19812,15 -19736,6 +19832,15 @@@ S: Maintaine F: include/uapi/linux/virtio_snd.h F: sound/virtio/*
+VIRTIO I2C DRIVER +M: Jie Deng jie.deng@intel.com +M: Viresh Kumar viresh.kumar@linaro.org +L: linux-i2c@vger.kernel.org +L: virtualization@lists.linux-foundation.org +S: Maintained +F: drivers/i2c/busses/i2c-virtio.c +F: include/uapi/linux/virtio_i2c.h + VIRTUAL BOX GUEST DEVICE DRIVER M: Hans de Goede hdegoede@redhat.com M: Arnd Bergmann arnd@arndb.de diff --combined arch/arm64/boot/dts/freescale/imx8mm.dtsi index e956dcf4b208,1608a48495b6..2f632e8ca388 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@@ -192,9 -192,10 +192,9 @@@ };
pmu { - compatible = "arm,armv8-pmuv3"; + compatible = "arm,cortex-a53-pmu"; interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; - interrupt-affinity = <&A53_0>, <&A53_1>, <&A53_2>, <&A53_3>; };
timer { @@@ -240,7 -241,6 +240,7 @@@ };
usbphynop1: usbphynop1 { + #phy-cells = <0>; compatible = "usb-nop-xceiv"; clocks = <&clk IMX8MM_CLK_USB_PHY_REF>; assigned-clocks = <&clk IMX8MM_CLK_USB_PHY_REF>; @@@ -249,7 -249,6 +249,7 @@@ };
usbphynop2: usbphynop2 { + #phy-cells = <0>; compatible = "usb-nop-xceiv"; clocks = <&clk IMX8MM_CLK_USB_PHY_REF>; assigned-clocks = <&clk IMX8MM_CLK_USB_PHY_REF>; @@@ -921,7 -920,7 +921,7 @@@ };
fec1: ethernet@30be0000 { - compatible = "fsl,imx8mm-fec", "fsl,imx6sx-fec"; + compatible = "fsl,imx8mm-fec", "fsl,imx8mq-fec", "fsl,imx6sx-fec"; reg = <0x30be0000 0x10000>; interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, @@@ -969,7 -968,7 +969,7 @@@ clock-names = "usb1_ctrl_root_clk"; assigned-clocks = <&clk IMX8MM_CLK_USB_BUS>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>; - fsl,usbphy = <&usbphynop1>; + phys = <&usbphynop1>; fsl,usbmisc = <&usbmisc1 0>; status = "disabled"; }; @@@ -988,7 -987,7 +988,7 @@@ clock-names = "usb1_ctrl_root_clk"; assigned-clocks = <&clk IMX8MM_CLK_USB_BUS>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_500M>; - fsl,usbphy = <&usbphynop2>; + phys = <&usbphynop2>; fsl,usbmisc = <&usbmisc2 0>; status = "disabled"; }; diff --combined arch/arm64/boot/dts/freescale/imx8mn.dtsi index 2d154a3bf0ea,e6de293865b0..da6c942fb7f9 --- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi @@@ -190,6 -190,7 +190,6 @@@ compatible = "arm,cortex-a53-pmu"; interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; - interrupt-affinity = <&A53_0>, <&A53_1>, <&A53_2>, <&A53_3>; };
psci { @@@ -922,7 -923,7 +922,7 @@@ };
fec1: ethernet@30be0000 { - compatible = "fsl,imx8mn-fec", "fsl,imx6sx-fec"; + compatible = "fsl,imx8mn-fec", "fsl,imx8mq-fec", "fsl,imx6sx-fec"; reg = <0x30be0000 0x10000>; interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, @@@ -970,7 -971,7 +970,7 @@@ clock-names = "usb1_ctrl_root_clk"; assigned-clocks = <&clk IMX8MN_CLK_USB_BUS>; assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>; - fsl,usbphy = <&usbphynop1>; + phys = <&usbphynop1>; fsl,usbmisc = <&usbmisc1 0>; status = "disabled"; }; @@@ -1038,7 -1039,6 +1038,7 @@@ };
usbphynop1: usbphynop1 { + #phy-cells = <0>; compatible = "usb-nop-xceiv"; clocks = <&clk IMX8MN_CLK_USB_PHY_REF>; assigned-clocks = <&clk IMX8MN_CLK_USB_PHY_REF>; diff --combined drivers/net/can/usb/esd_usb2.c index 95ae740fc311,7370981e9b34..c6068a251fbe --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@@ -224,8 -224,8 +224,8 @@@ static void esd_usb2_rx_event(struct es if (id == ESD_EV_CAN_ERROR_EXT) { u8 state = msg->msg.rx.data[0]; u8 ecc = msg->msg.rx.data[1]; - u8 txerr = msg->msg.rx.data[2]; - u8 rxerr = msg->msg.rx.data[3]; + u8 rxerr = msg->msg.rx.data[2]; + u8 txerr = msg->msg.rx.data[3];
skb = alloc_can_err_skb(priv->netdev, &cf); if (skb == NULL) { @@@ -476,7 -476,7 +476,7 @@@ static void esd_usb2_write_bulk_callbac netif_trans_update(netdev); }
- static ssize_t show_firmware(struct device *d, + static ssize_t firmware_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@@ -487,9 -487,9 +487,9 @@@ (dev->version >> 8) & 0xf, dev->version & 0xff); } - static DEVICE_ATTR(firmware, 0444, show_firmware, NULL); + static DEVICE_ATTR_RO(firmware);
- static ssize_t show_hardware(struct device *d, + static ssize_t hardware_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@@ -500,9 -500,9 +500,9 @@@ (dev->version >> 24) & 0xf, (dev->version >> 16) & 0xff); } - static DEVICE_ATTR(hardware, 0444, show_hardware, NULL); + static DEVICE_ATTR_RO(hardware);
- static ssize_t show_nets(struct device *d, + static ssize_t nets_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); @@@ -510,7 -510,7 +510,7 @@@
return sprintf(buf, "%d", dev->net_count); } - static DEVICE_ATTR(nets, 0444, show_nets, NULL); + static DEVICE_ATTR_RO(nets);
static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg) { diff --combined drivers/net/dsa/hirschmann/hellcreek.c index 7062db6a083c,3faff95fd49f..542cfc4ccb08 --- a/drivers/net/dsa/hirschmann/hellcreek.c +++ b/drivers/net/dsa/hirschmann/hellcreek.c @@@ -1345,6 -1345,7 +1345,7 @@@ static int hellcreek_setup(struct dsa_s * filtering setups are not supported. */ ds->vlan_filtering_is_global = true; + ds->needs_standalone_vlan_filtering = true;
/* Intercept _all_ PTP multicast traffic */ ret = hellcreek_setup_fdb(hellcreek); @@@ -1472,6 -1473,9 +1473,6 @@@ static void hellcreek_setup_gcl(struct u16 data; u8 gates;
- cur++; - next++; - if (i == schedule->num_entries) gates = initial->gate_mask ^ cur->gate_mask; @@@ -1500,9 -1504,6 +1501,9 @@@ (initial->gate_mask << TR_GCLCMD_INIT_GATE_STATES_SHIFT); hellcreek_write(hellcreek, data, TR_GCLCMD); + + cur++; + next++; } }
@@@ -1550,7 -1551,7 +1551,7 @@@ static bool hellcreek_schedule_startabl /* Calculate difference to admin base time */ base_time_ns = ktime_to_ns(hellcreek_port->current_schedule->base_time);
- return base_time_ns - current_ns < (s64)8 * NSEC_PER_SEC; + return base_time_ns - current_ns < (s64)4 * NSEC_PER_SEC; }
static void hellcreek_start_schedule(struct hellcreek *hellcreek, int port) diff --combined drivers/net/ethernet/broadcom/bnx2.c index 5749b27e0cda,a705e2615307..8c83973adca5 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@@ -2730,7 -2730,7 +2730,7 @@@ bnx2_alloc_rx_page(struct bnx2 *bp, str if (!page) return -ENOMEM; mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { __free_page(page); return -EIO; @@@ -2753,7 -2753,7 +2753,7 @@@ bnx2_free_rx_page(struct bnx2 *bp, stru return;
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(page); rx_pg->page = NULL; @@@ -2775,7 -2775,7 +2775,7 @@@ bnx2_alloc_rx_data(struct bnx2 *bp, str mapping = dma_map_single(&bp->pdev->dev, get_l2_fhdr(data), bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { kfree(data); return -EIO; @@@ -2881,7 -2881,7 +2881,7 @@@ bnx2_tx_int(struct bnx2 *bp, struct bnx }
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE);
tx_buf->skb = NULL; last = tx_buf->nr_frags; @@@ -2895,7 -2895,7 +2895,7 @@@ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); }
sw_cons = BNX2_NEXT_TX_BD(sw_cons); @@@ -3003,7 -3003,7 +3003,7 @@@ bnx2_reuse_rx_data(struct bnx2 *bp, str
dma_sync_single_for_device(&bp->pdev->dev, dma_unmap_addr(cons_rx_buf, mapping), - BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); + BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE);
rxr->rx_prod_bseq += bp->rx_buf_use_size;
@@@ -3044,7 -3044,7 +3044,7 @@@ error }
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); skb = build_skb(data, 0); if (!skb) { kfree(data); @@@ -3110,7 -3110,7 +3110,7 @@@ }
dma_unmap_page(&bp->pdev->dev, mapping_old, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE, DMA_FROM_DEVICE);
frag_size -= frag_len; skb->data_len += frag_len; @@@ -3180,7 -3180,7 +3180,7 @@@ bnx2_rx_int(struct bnx2 *bp, struct bnx
dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE);
next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons)); next_rx_buf = &rxr->rx_buf_ring[next_ring_idx]; @@@ -5449,7 -5449,7 +5449,7 @@@ bnx2_free_tx_skbs(struct bnx2 *bp dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE);
tx_buf->skb = NULL;
@@@ -5460,7 -5460,7 +5460,7 @@@ dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[k]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } dev_kfree_skb(skb); } @@@ -5491,7 -5491,7 +5491,7 @@@ bnx2_free_rx_skbs(struct bnx2 *bp dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE);
rx_buf->data = NULL;
@@@ -5843,7 -5843,7 +5843,7 @@@ bnx2_run_loopback(struct bnx2 *bp, int packet[i] = (unsigned char) (i & 0xff);
map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; @@@ -5882,7 -5882,7 +5882,7 @@@
udelay(5);
- dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); dev_kfree_skb(skb);
if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) @@@ -5901,7 -5901,7 +5901,7 @@@
dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + bp->rx_buf_use_size, DMA_FROM_DEVICE);
if (rx_hdr->l2_fhdr_status & (L2_FHDR_ERRORS_BAD_CRC | @@@ -6660,7 -6660,8 +6660,8 @@@ bnx2_start_xmit(struct sk_buff *skb, st } else mss = 0;
- mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@@ -6741,7 -6742,7 +6742,7 @@@ dma_error tx_buf = &txr->tx_buf_ring[ring_prod]; tx_buf->skb = NULL; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE);
/* unmap remaining mapped pages */ for (i = 0; i < last_frag; i++) { @@@ -6750,7 -6751,7 +6751,7 @@@ tx_buf = &txr->tx_buf_ring[ring_prod]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); }
dev_kfree_skb_any(skb); @@@ -7241,8 -7242,10 +7242,10 @@@ bnx2_set_eeprom(struct net_device *dev return rc; }
- static int - bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) + static int bnx2_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev);
@@@ -7263,8 -7266,10 +7266,10 @@@ return 0; }
- static int - bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) + static int bnx2_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev);
@@@ -8033,9 -8038,9 +8038,9 @@@ bnx2_get_pci_speed(struct bnx2 *bp static void bnx2_read_vpd_fw_ver(struct bnx2 *bp) { + unsigned int len; int rc, i, j; u8 *data; - unsigned int block_end, rosize, len;
#define BNX2_VPD_NVRAM_OFFSET 0x300 #define BNX2_VPD_LEN 128 @@@ -8052,21 -8057,38 +8057,21 @@@ for (i = 0; i < BNX2_VPD_LEN; i += 4) swab32s((u32 *)&data[i]);
- i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); - if (i < 0) - goto vpd_done; - - rosize = pci_vpd_lrdt_size(&data[i]); - i += PCI_VPD_LRDT_TAG_SIZE; - block_end = i + rosize; - - if (block_end > BNX2_VPD_LEN) - goto vpd_done; - - j = pci_vpd_find_info_keyword(data, i, rosize, - PCI_VPD_RO_KEYWORD_MFR_ID); + j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN, + PCI_VPD_RO_KEYWORD_MFR_ID, &len); if (j < 0) goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end || len != 4 || - memcmp(&data[j], "1028", 4)) + if (len != 4 || memcmp(&data[j], "1028", 4)) goto vpd_done;
- j = pci_vpd_find_info_keyword(data, i, rosize, - PCI_VPD_RO_KEYWORD_VENDOR0); + j = pci_vpd_find_ro_info_keyword(data, BNX2_VPD_LEN, + PCI_VPD_RO_KEYWORD_VENDOR0, + &len); if (j < 0) goto vpd_done;
- len = pci_vpd_info_field_size(&data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end || len > BNX2_MAX_VER_SLEN) + if (len > BNX2_MAX_VER_SLEN) goto vpd_done;
memcpy(bp->fw_version, &data[j], len); @@@ -8202,15 -8224,15 +8207,15 @@@ bnx2_init_board(struct pci_dev *pdev, s persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
/* Configure DMA attributes. */ - if (pci_set_dma_mask(pdev, dma_mask) == 0) { + if (dma_set_mask(&pdev->dev, dma_mask) == 0) { dev->features |= NETIF_F_HIGHDMA; - rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); + rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask); if (rc) { dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed, aborting\n"); goto err_out_unmap; } - } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { dev_err(&pdev->dev, "System does not support DMA, aborting\n"); goto err_out_unmap; } @@@ -8524,7 -8546,7 +8529,7 @@@ static const struct net_device_ops bnx2 .ndo_stop = bnx2_close, .ndo_get_stats64 = bnx2_get_stats64, .ndo_set_rx_mode = bnx2_set_rx_mode, - .ndo_do_ioctl = bnx2_ioctl, + .ndo_eth_ioctl = bnx2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnx2_change_mac_addr, .ndo_change_mtu = bnx2_change_mtu, diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b6d908956e54,6d98134913cd..ae87296ae1ff --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@@ -12189,35 -12189,86 +12189,35 @@@ static int bnx2x_get_hwinfo(struct bnx2
static void bnx2x_read_fwinfo(struct bnx2x *bp) { - int cnt, i, block_end, rodi; - char vpd_start[BNX2X_VPD_LEN+1]; - char str_id_reg[VENDOR_ID_LEN+1]; - char str_id_cap[VENDOR_ID_LEN+1]; - char *vpd_data; - char *vpd_extended_data = NULL; - u8 len; - - cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start); - memset(bp->fw_ver, 0, sizeof(bp->fw_ver)); - - if (cnt < BNX2X_VPD_LEN) - goto out_not_found; - - /* VPD RO tag should be first tag after identifier string, hence - * we should be able to find it in first BNX2X_VPD_LEN chars - */ - i = pci_vpd_find_tag(vpd_start, BNX2X_VPD_LEN, PCI_VPD_LRDT_RO_DATA); - if (i < 0) - goto out_not_found; - - block_end = i + PCI_VPD_LRDT_TAG_SIZE + - pci_vpd_lrdt_size(&vpd_start[i]); - - i += PCI_VPD_LRDT_TAG_SIZE; - - if (block_end > BNX2X_VPD_LEN) { - vpd_extended_data = kmalloc(block_end, GFP_KERNEL); - if (vpd_extended_data == NULL) - goto out_not_found; - - /* read rest of vpd image into vpd_extended_data */ - memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN); - cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN, - block_end - BNX2X_VPD_LEN, - vpd_extended_data + BNX2X_VPD_LEN); - if (cnt < (block_end - BNX2X_VPD_LEN)) - goto out_not_found; - vpd_data = vpd_extended_data; - } else - vpd_data = vpd_start; + char str_id[VENDOR_ID_LEN + 1]; + unsigned int vpd_len, kw_len; + u8 *vpd_data; + int rodi;
- /* now vpd_data holds full vpd content in both cases */ - - rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, - PCI_VPD_RO_KEYWORD_MFR_ID); - if (rodi < 0) - goto out_not_found; + memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
- len = pci_vpd_info_field_size(&vpd_data[rodi]); + vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len); + if (IS_ERR(vpd_data)) + return;
- if (len != VENDOR_ID_LEN) + rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len, + PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len); + if (rodi < 0 || kw_len != VENDOR_ID_LEN) goto out_not_found;
- rodi += PCI_VPD_INFO_FLD_HDR_SIZE; - /* vendor specific info */ - snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); - snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL); - if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) || - !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) { - - rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end, - PCI_VPD_RO_KEYWORD_VENDOR0); - if (rodi >= 0) { - len = pci_vpd_info_field_size(&vpd_data[rodi]); - - rodi += PCI_VPD_INFO_FLD_HDR_SIZE; - - if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) { - memcpy(bp->fw_ver, &vpd_data[rodi], len); - bp->fw_ver[len] = ' '; - } + snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL); + if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) { + rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len, + PCI_VPD_RO_KEYWORD_VENDOR0, + &kw_len); + if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) { + memcpy(bp->fw_ver, &vpd_data[rodi], kw_len); + bp->fw_ver[kw_len] = ' '; } - kfree(vpd_extended_data); - return; } out_not_found: - kfree(vpd_extended_data); - return; + kfree(vpd_data); }
static void bnx2x_set_modes_bitmap(struct bnx2x *bp) @@@ -12997,7 -13048,7 +12997,7 @@@ static const struct net_device_ops bnx2 .ndo_set_rx_mode = bnx2x_set_rx_mode, .ndo_set_mac_address = bnx2x_change_mac_addr, .ndo_validate_addr = bnx2x_validate_addr, - .ndo_do_ioctl = bnx2x_ioctl, + .ndo_eth_ioctl = bnx2x_ioctl, .ndo_change_mtu = bnx2x_change_mtu, .ndo_fix_features = bnx2x_fix_features, .ndo_set_features = bnx2x_set_features, diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c index ad52c5d1354e,ee66d410c82c..eecd91ff840a --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@@ -278,6 -278,8 +278,8 @@@ static const u16 bnxt_async_events_arr[ ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, + ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, + ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, };
static struct workqueue_struct *bnxt_pf_wq; @@@ -670,7 -672,7 +672,7 @@@ tx_dma_error prod = txr->tx_prod; tx_buf = &txr->tx_buf_ring[prod]; dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); prod = NEXT_TX(prod);
/* unmap remaining mapped pages */ @@@ -679,7 -681,7 +681,7 @@@ tx_buf = &txr->tx_buf_ring[prod]; dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); }
tx_free: @@@ -718,7 -720,7 +720,7 @@@ static void bnxt_tx_int(struct bnxt *bp }
dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); last = tx_buf->nr_frags;
for (j = 0; j < last; j++) { @@@ -728,7 -730,7 +730,7 @@@ &pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[j]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { if (bp->flags & BNXT_FLAG_CHIP_P5) { @@@ -901,7 -903,7 +903,7 @@@ static inline int bnxt_alloc_rx_page(st }
mapping = dma_map_page_attrs(&pdev->dev, page, offset, - BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING); if (dma_mapping_error(&pdev->dev, mapping)) { __free_page(page); @@@ -1141,7 -1143,7 +1143,7 @@@ static struct sk_buff *bnxt_rx_pages(st }
dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, - PCI_DMA_FROMDEVICE, + DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING);
skb->data_len += frag_len; @@@ -2074,6 -2076,19 +2076,19 @@@ static u16 bnxt_agg_ring_id_to_grp_idx( return INVALID_HW_RING_ID; }
+ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) + { + switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: + netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", + BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); + break; + default: + netdev_err(bp->dev, "FW reported unknown error type\n"); + break; + } + } + #define BNXT_GET_EVENT_PORT(data) \ ((data) & \ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) @@@ -2234,6 -2249,14 +2249,14 @@@ static int bnxt_async_event_process(str } goto async_event_process_exit; } + case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { + bnxt_ptp_pps_event(bp, data1, data2); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { + bnxt_event_error_report(bp, data1, data2); + goto async_event_process_exit; + } default: goto async_event_process_exit; } @@@ -2690,7 -2713,7 +2713,7 @@@ static void bnxt_free_tx_skbs(struct bn dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_len(tx_buf, len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); xdp_return_frame(tx_buf->xdpf); tx_buf->action = 0; tx_buf->xdpf = NULL; @@@ -2715,7 -2738,7 +2738,7 @@@ dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE);
last = tx_buf->nr_frags; j += 2; @@@ -2727,7 -2750,7 +2750,7 @@@ dma_unmap_page( &pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_frag_size(frag), PCI_DMA_TODEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); } dev_kfree_skb(skb); } @@@ -2794,7 -2817,7 +2817,7 @@@ skip_rx_tpa_free continue;
dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING);
rx_agg_buf->page = NULL; @@@ -3176,6 -3199,58 +3199,58 @@@ static int bnxt_alloc_tx_rings(struct b return 0; }
+ static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) + { + kfree(cpr->cp_desc_ring); + cpr->cp_desc_ring = NULL; + kfree(cpr->cp_desc_mapping); + cpr->cp_desc_mapping = NULL; + } + + static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) + { + cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); + if (!cpr->cp_desc_ring) + return -ENOMEM; + cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), + GFP_KERNEL); + if (!cpr->cp_desc_mapping) + return -ENOMEM; + return 0; + } + + static void bnxt_free_all_cp_arrays(struct bnxt *bp) + { + int i; + + if (!bp->bnapi) + return; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + if (!bnapi) + continue; + bnxt_free_cp_arrays(&bnapi->cp_ring); + } + } + + static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) + { + int i, n = bp->cp_nr_pages; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + int rc; + + if (!bnapi) + continue; + rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); + if (rc) + return rc; + } + return 0; + } + static void bnxt_free_cp_rings(struct bnxt *bp) { int i; @@@ -3203,6 -3278,7 +3278,7 @@@ if (cpr2) { ring = &cpr2->cp_ring_struct; bnxt_free_ring(bp, &ring->ring_mem); + bnxt_free_cp_arrays(cpr2); kfree(cpr2); cpr->cp_ring_arr[j] = NULL; } @@@ -3221,6 -3297,12 +3297,12 @@@ static struct bnxt_cp_ring_info *bnxt_a if (!cpr) return NULL;
+ rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); + if (rc) { + bnxt_free_cp_arrays(cpr); + kfree(cpr); + return NULL; + } ring = &cpr->cp_ring_struct; rmem = &ring->ring_mem; rmem->nr_pages = bp->cp_nr_pages; @@@ -3231,6 -3313,7 +3313,7 @@@ rc = bnxt_alloc_ring(bp, rmem); if (rc) { bnxt_free_ring(bp, rmem); + bnxt_free_cp_arrays(cpr); kfree(cpr); cpr = NULL; } @@@ -3663,9 -3746,15 +3746,15 @@@ void bnxt_set_ring_params(struct bnxt * if (jumbo_factor > agg_factor) agg_factor = jumbo_factor; } - agg_ring_size = ring_size * agg_factor; + if (agg_factor) { + if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { + ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; + netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", + bp->rx_ring_size, ring_size); + bp->rx_ring_size = ring_size; + } + agg_ring_size = ring_size * agg_factor;
- if (agg_ring_size) { bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, RX_DESC_CNT); if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { @@@ -4266,6 -4355,7 +4355,7 @@@ static void bnxt_free_mem(struct bnxt * bnxt_free_tx_rings(bp); bnxt_free_rx_rings(bp); bnxt_free_cp_rings(bp); + bnxt_free_all_cp_arrays(bp); bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { bnxt_free_ring_stats(bp); @@@ -4386,6 -4476,10 +4476,10 @@@ static int bnxt_alloc_mem(struct bnxt * goto alloc_mem_err; }
+ rc = bnxt_alloc_all_cp_arrays(bp); + if (rc) + goto alloc_mem_err; + bnxt_init_ring_struct(bp);
rc = bnxt_alloc_rx_rings(bp); @@@ -7531,9 -7625,14 +7625,14 @@@ static int __bnxt_hwrm_ptp_qcfg(struct rc = -ENODEV; goto no_ptp; } - return 0; + rc = bnxt_ptp_init(bp); + if (!rc) + return 0; + + netdev_warn(bp->dev, "PTP initialization failed.\n");
no_ptp: + bnxt_ptp_clear(bp); kfree(ptp); bp->ptp_cfg = NULL; return rc; @@@ -7576,6 -7675,8 +7675,8 @@@ static int __bnxt_hwrm_func_qcaps(struc flags_ext = le32_to_cpu(resp->flags_ext); if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@@ -7613,6 -7714,7 +7714,7 @@@ if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { __bnxt_hwrm_ptp_qcfg(bp); } else { + bnxt_ptp_clear(bp); kfree(bp->ptp_cfg); bp->ptp_cfg = NULL; } @@@ -10316,15 -10418,9 +10418,9 @@@ static int bnxt_open(struct net_device if (rc) return rc;
- if (bnxt_ptp_init(bp)) { - netdev_warn(dev, "PTP initialization failed.\n"); - kfree(bp->ptp_cfg); - bp->ptp_cfg = NULL; - } rc = __bnxt_open_nic(bp, true, true); if (rc) { bnxt_hwrm_if_change(bp, false); - bnxt_ptp_clear(bp); } else { if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@@ -10415,7 -10511,6 +10511,6 @@@ static int bnxt_close(struct net_devic { struct bnxt *bp = netdev_priv(dev);
- bnxt_ptp_clear(bp); bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); @@@ -11405,7 -11500,6 +11500,6 @@@ static void bnxt_fw_reset_close(struct bnxt_clear_int_mode(bp); pci_disable_device(bp->pdev); } - bnxt_ptp_clear(bp); __bnxt_close_nic(bp, true, false); bnxt_vf_reps_free(bp); bnxt_clear_int_mode(bp); @@@ -11441,13 -11535,20 +11535,20 @@@ static bool is_bnxt_fw_ok(struct bnxt * static void bnxt_force_fw_reset(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u32 wait_dsecs;
if (!test_bit(BNXT_STATE_OPEN, &bp->state) || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return;
- set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (ptp) { + spin_lock_bh(&ptp->ptp_lock); + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + spin_unlock_bh(&ptp->ptp_lock); + } else { + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + } bnxt_fw_reset_close(bp); wait_dsecs = fw_health->master_func_wait_dsecs; if (fw_health->master) { @@@ -11503,9 -11604,16 +11604,16 @@@ void bnxt_fw_reset(struct bnxt *bp bnxt_rtnl_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state) && !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; int n = 0, tmo;
- set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (ptp) { + spin_lock_bh(&ptp->ptp_lock); + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + spin_unlock_bh(&ptp->ptp_lock); + } else { + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + } if (bp->pf.active_vfs && !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) n = bnxt_get_registered_vfs(bp); @@@ -12177,6 -12285,7 +12285,7 @@@ static void bnxt_fw_reset_task(struct w bnxt_reenable_sriov(bp); bnxt_vf_reps_alloc(bp); bnxt_vf_reps_open(bp); + bnxt_ptp_reapply_pps(bp); bnxt_dl_health_recovery_done(bp); bnxt_dl_health_status_update(bp, true); rtnl_unlock(); @@@ -12708,7 -12817,7 +12817,7 @@@ static const struct net_device_ops bnxt .ndo_stop = bnxt_close, .ndo_get_stats64 = bnxt_get_stats64, .ndo_set_rx_mode = bnxt_set_rx_mode, - .ndo_do_ioctl = bnxt_ioctl, + .ndo_eth_ioctl = bnxt_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnxt_change_mac_addr, .ndo_change_mtu = bnxt_change_mtu, @@@ -12747,6 -12856,7 +12856,7 @@@ static void bnxt_remove_one(struct pci_ if (BNXT_PF(bp)) devlink_port_type_clear(&bp->dl_port);
+ bnxt_ptp_clear(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); @@@ -13061,35 -13171,66 +13171,35 @@@ static int bnxt_init_mac_addr(struct bn return rc; }
-#define BNXT_VPD_LEN 512 static void bnxt_vpd_read_info(struct bnxt *bp) { struct pci_dev *pdev = bp->pdev; - int i, len, pos, ro_size, size; - ssize_t vpd_size; + unsigned int vpd_size, kw_len; + int pos, size; u8 *vpd_data;
- vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL); - if (!vpd_data) + vpd_data = pci_vpd_alloc(pdev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(pdev, "Unable to read VPD\n"); return; - - vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data); - if (vpd_size <= 0) { - netdev_err(bp->dev, "Unable to read VPD\n"); - goto exit; - } - - i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); - if (i < 0) { - netdev_err(bp->dev, "VPD READ-Only not found\n"); - goto exit; - } - - i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); - if (i < 0) { - netdev_err(bp->dev, "VPD READ-Only not found\n"); - goto exit; }
- ro_size = pci_vpd_lrdt_size(&vpd_data[i]); - i += PCI_VPD_LRDT_TAG_SIZE; - if (i + ro_size > vpd_size) - goto exit; - - pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, - PCI_VPD_RO_KEYWORD_PARTNO); + pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); if (pos < 0) goto read_sn;
- len = pci_vpd_info_field_size(&vpd_data[pos]); - pos += PCI_VPD_INFO_FLD_HDR_SIZE; - if (len + pos > vpd_size) - goto read_sn; - - size = min(len, BNXT_VPD_FLD_LEN - 1); + size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); memcpy(bp->board_partno, &vpd_data[pos], size);
read_sn: - pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, - PCI_VPD_RO_KEYWORD_SERIALNO); + pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, + &kw_len); if (pos < 0) goto exit;
- len = pci_vpd_info_field_size(&vpd_data[pos]); - pos += PCI_VPD_INFO_FLD_HDR_SIZE; - if (len + pos > vpd_size) - goto exit; - - size = min(len, BNXT_VPD_FLD_LEN - 1); + size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); memcpy(bp->board_serialno, &vpd_data[pos], size); exit: kfree(vpd_data); @@@ -13334,6 -13475,7 +13444,7 @@@ init_err_pci_clean bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); bnxt_ethtool_free(bp); + bnxt_ptp_clear(bp); kfree(bp->ptp_cfg); bp->ptp_cfg = NULL; kfree(bp->fw_health); diff --combined drivers/net/ethernet/broadcom/tg3.c index 8b08c1d47b7b,8a238e349e02..5e0e0e70d801 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@@ -6564,10 -6564,8 +6564,8 @@@ static void tg3_tx(struct tg3_napi *tna skb_tstamp_tx(skb, ×tamp); }
- pci_unmap_single(tp->pdev, - dma_unmap_addr(ri, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); + dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), + skb_headlen(skb), DMA_TO_DEVICE);
ri->skb = NULL;
@@@ -6584,10 -6582,10 +6582,10 @@@ if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) tx_bug = 1;
- pci_unmap_page(tp->pdev, + dma_unmap_page(&tp->pdev->dev, dma_unmap_addr(ri, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE);
while (ri->fragmented) { ri->fragmented = false; @@@ -6646,8 -6644,8 +6644,8 @@@ static void tg3_rx_data_free(struct tg if (!ri->data) return;
- pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), - map_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, + DMA_FROM_DEVICE); tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); ri->data = NULL; } @@@ -6711,11 -6709,9 +6709,9 @@@ static int tg3_alloc_rx_data(struct tg if (!data) return -ENOMEM;
- mapping = pci_map_single(tp->pdev, - data + TG3_RX_OFFSET(tp), - data_size, - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { + mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), + data_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { tg3_frag_free(skb_size <= PAGE_SIZE, data); return -EIO; } @@@ -6882,8 -6878,8 +6878,8 @@@ static int tg3_rx(struct tg3_napi *tnap if (skb_size < 0) goto drop_it;
- pci_unmap_single(tp->pdev, dma_addr, skb_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, + DMA_FROM_DEVICE);
/* Ensure that the update to the data happens * after the usage of the old DMA mapping. @@@ -6908,11 -6904,13 +6904,13 @@@ goto drop_it_no_recycle;
skb_reserve(skb, TG3_RAW_IP_ALIGN); - pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, + DMA_FROM_DEVICE); memcpy(skb->data, data + TG3_RX_OFFSET(tp), len); - pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&tp->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); }
skb_put(skb, len); @@@ -7762,10 -7760,8 +7760,8 @@@ static void tg3_tx_skb_unmap(struct tg3 skb = txb->skb; txb->skb = NULL;
- pci_unmap_single(tnapi->tp->pdev, - dma_unmap_addr(txb, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); + dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), + skb_headlen(skb), DMA_TO_DEVICE);
while (txb->fragmented) { txb->fragmented = false; @@@ -7779,9 -7775,9 +7775,9 @@@ entry = NEXT_TX(entry); txb = &tnapi->tx_buffers[entry];
- pci_unmap_page(tnapi->tp->pdev, + dma_unmap_page(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), - skb_frag_size(frag), PCI_DMA_TODEVICE); + skb_frag_size(frag), DMA_TO_DEVICE);
while (txb->fragmented) { txb->fragmented = false; @@@ -7816,10 -7812,10 +7812,10 @@@ static int tigon3_dma_hwbug_workaround( ret = -1; } else { /* New SKB is guaranteed to be linear. */ - new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, - PCI_DMA_TODEVICE); + new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, + new_skb->len, DMA_TO_DEVICE); /* Make sure the mapping succeeded */ - if (pci_dma_mapping_error(tp->pdev, new_addr)) { + if (dma_mapping_error(&tp->pdev->dev, new_addr)) { dev_kfree_skb_any(new_skb); ret = -1; } else { @@@ -8043,8 -8039,9 +8039,9 @@@ static netdev_tx_t tg3_start_xmit(struc
len = skb_headlen(skb);
- mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) + mapping = dma_map_single(&tp->pdev->dev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&tp->pdev->dev, mapping)) goto drop;
@@@ -12791,7 -12788,7 +12788,7 @@@ static void tg3_get_ethtool_stats(struc memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); }
-static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) +static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen) { int i; __be32 *buf; @@@ -12825,11 -12822,15 +12822,11 @@@ offset = TG3_NVM_VPD_OFF; len = TG3_NVM_VPD_LEN; } - } else { - len = TG3_NVM_PCI_VPD_MAX_LEN; - }
- buf = kmalloc(len, GFP_KERNEL); - if (buf == NULL) - return NULL; + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return NULL;
- if (magic == TG3_EEPROM_MAGIC) { for (i = 0; i < len; i += 4) { /* The data is in little-endian format in NVRAM. * Use the big-endian read routines to preserve @@@ -12840,9 -12841,12 +12837,9 @@@ } *vpdlen = len; } else { - ssize_t cnt; - - cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf); - if (cnt < 0) - goto error; - *vpdlen = cnt; + buf = pci_vpd_alloc(tp->pdev, vpdlen); + if (IS_ERR(buf)) + return NULL; }
return buf; @@@ -12864,10 -12868,9 +12861,10 @@@ error
static int tg3_test_nvram(struct tg3 *tp) { - u32 csum, magic, len; + u32 csum, magic; __be32 *buf; int i, j, k, err = 0, size; + unsigned int len;
if (tg3_flag(tp, NO_NVRAM)) return 0; @@@ -13010,10 -13013,33 +13007,10 @@@ if (!buf) return -ENOMEM;
- i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA); - if (i > 0) { - j = pci_vpd_lrdt_size(&((u8 *)buf)[i]); - if (j < 0) - goto out; - - if (i + PCI_VPD_LRDT_TAG_SIZE + j > len) - goto out; - - i += PCI_VPD_LRDT_TAG_SIZE; - j = pci_vpd_find_info_keyword((u8 *)buf, i, j, - PCI_VPD_RO_KEYWORD_CHKSUM); - if (j > 0) { - u8 csum8 = 0; - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - - for (i = 0; i <= j; i++) - csum8 += ((u8 *)buf)[i]; - - if (csum8) - goto out; - } - } - - err = 0; - + err = pci_vpd_check_csum(buf, len); + /* go on if no checksum found */ + if (err == 1) + err = 0; out: kfree(buf); return err; @@@ -13470,8 -13496,8 +13467,8 @@@ static int tg3_run_loopback(struct tg3 for (i = data_off; i < tx_len; i++) tx_data[i] = (u8) (i & 0xff);
- map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, map)) { + map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); + if (dma_mapping_error(&tp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; } @@@ -13569,8 -13595,8 +13566,8 @@@ } else goto out;
- pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, + DMA_FROM_DEVICE);
rx_data += TG3_RX_OFFSET(tp); for (i = data_off; i < rx_len; i++, val++) { @@@ -14011,7 -14037,10 +14008,10 @@@ static int tg3_ioctl(struct net_device return -EOPNOTSUPP; }
- static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) + static int tg3_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev);
@@@ -14019,7 -14048,10 +14019,10 @@@ return 0; }
- static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) + static int tg3_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev); u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; @@@ -14261,7 -14293,7 +14264,7 @@@ static const struct net_device_ops tg3_ .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = tg3_set_rx_mode, .ndo_set_mac_address = tg3_set_mac_addr, - .ndo_do_ioctl = tg3_ioctl, + .ndo_eth_ioctl = tg3_ioctl, .ndo_tx_timeout = tg3_tx_timeout, .ndo_change_mtu = tg3_change_mtu, .ndo_fix_features = tg3_fix_features, @@@ -15592,36 -15624,64 +15595,36 @@@ skip_phy_reset static void tg3_read_vpd(struct tg3 *tp) { u8 *vpd_data; - unsigned int block_end, rosize, len; - u32 vpdlen; - int j, i = 0; + unsigned int len, vpdlen; + int i;
vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen); if (!vpd_data) goto out_no_vpd;
- i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA); + i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, + PCI_VPD_RO_KEYWORD_MFR_ID, &len); if (i < 0) - goto out_not_found; - - rosize = pci_vpd_lrdt_size(&vpd_data[i]); - block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize; - i += PCI_VPD_LRDT_TAG_SIZE; + goto partno;
- if (block_end > vpdlen) - goto out_not_found; - - j = pci_vpd_find_info_keyword(vpd_data, i, rosize, - PCI_VPD_RO_KEYWORD_MFR_ID); - if (j > 0) { - len = pci_vpd_info_field_size(&vpd_data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end || len != 4 || - memcmp(&vpd_data[j], "1028", 4)) - goto partno; - - j = pci_vpd_find_info_keyword(vpd_data, i, rosize, - PCI_VPD_RO_KEYWORD_VENDOR0); - if (j < 0) - goto partno; + if (len != 4 || memcmp(vpd_data + i, "1028", 4)) + goto partno;
- len = pci_vpd_info_field_size(&vpd_data[j]); - - j += PCI_VPD_INFO_FLD_HDR_SIZE; - if (j + len > block_end) - goto partno; + i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, + PCI_VPD_RO_KEYWORD_VENDOR0, &len); + if (i < 0) + goto partno;
- if (len >= sizeof(tp->fw_ver)) - len = sizeof(tp->fw_ver) - 1; - memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); - snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, - &vpd_data[j]); - } + memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); + snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
partno: - i = pci_vpd_find_info_keyword(vpd_data, i, rosize, - PCI_VPD_RO_KEYWORD_PARTNO); + i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen, + PCI_VPD_RO_KEYWORD_PARTNO, &len); if (i < 0) goto out_not_found;
- len = pci_vpd_info_field_size(&vpd_data[i]); - - i += PCI_VPD_INFO_FLD_HDR_SIZE; - if (len > TG3_BPN_SIZE || - (len + i) > vpdlen) + if (len > TG3_BPN_SIZE) goto out_not_found;
memcpy(tp->board_part_number, &vpd_data[i], len); @@@ -17698,11 -17758,11 +17701,11 @@@ static int tg3_init_one(struct pci_dev
/* Configure DMA attributes. */ if (dma_mask > DMA_BIT_MASK(32)) { - err = pci_set_dma_mask(pdev, dma_mask); + err = dma_set_mask(&pdev->dev, dma_mask); if (!err) { features |= NETIF_F_HIGHDMA; - err = pci_set_consistent_dma_mask(pdev, - persist_dma_mask); + err = dma_set_coherent_mask(&pdev->dev, + persist_dma_mask); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64 bit " "DMA for consistent allocations\n"); @@@ -17711,7 -17771,7 +17714,7 @@@ } } if (err || dma_mask == DMA_BIT_MASK(32)) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); diff --combined drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index ac821c5532a4,c6fe0f2a4d0e..f6396ac64006 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@@ -526,7 -526,7 +526,7 @@@ static void octeon_destroy_resources(st oct->irq_name_storage = NULL; } /* Soft reset the octeon device before exiting */ - if (oct->pci_dev->reset_fn) + if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE)) octeon_pci_flr(oct); else cn23xx_vf_ask_pf_to_do_flr(oct); @@@ -843,7 -843,7 +843,7 @@@ static void free_netsgbuf(void *buf while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@@ -887,7 -887,7 +887,7 @@@ static void free_netsgbuf_with_resp(voi while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@@ -1889,7 -1889,7 +1889,7 @@@ static const struct net_device_ops lion .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_do_ioctl = liquidio_ioctl, + .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, }; diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 710cb00ce3a3,efa6c98d7459..0d9cda4ab303 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -3872,7 -3872,7 +3872,7 @@@ static const struct net_device_ops cxgb .ndo_set_mac_address = cxgb_set_mac_addr, .ndo_set_features = cxgb_set_features, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = cxgb_ioctl, + .ndo_eth_ioctl = cxgb_ioctl, .ndo_change_mtu = cxgb_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cxgb_netpoll, @@@ -4008,7 -4008,7 +4008,7 @@@ static void adap_free_hma_mem(struct ad
if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, - adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL); + adapter->hma.sgt->nents, DMA_BIDIRECTIONAL); adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; }
@@@ -5068,7 -5068,6 +5068,7 @@@ static int adap_init0(struct adapter *a ret = -ENOMEM; goto bye; } + bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz); #endif
params[0] = FW_PARAM_PFVF(CLIP_START); @@@ -6163,8 -6162,7 +6163,7 @@@ static void print_port_info(const struc --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "%s: Chelsio %s (%s) %s\n", - dev->name, adap->params.vpd.id, adap->name, buf); + netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); }
/* @@@ -6688,16 -6686,10 +6687,10 @@@ static int init_one(struct pci_dev *pde return 0; }
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { highdma = true; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " - "coherent allocations\n"); - goto out_free_adapter; - } } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto out_free_adapter; @@@ -6789,11 -6781,13 +6782,11 @@@
setup_memwin(adapter); err = adap_init0(adapter, 0); -#ifdef CONFIG_DEBUG_FS - bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); -#endif - setup_memwin_rdma(adapter); if (err) goto out_unmap_bar;
+ setup_memwin_rdma(adapter); + /* configure SGE_STAT_CFG_A to read WC stats */ if (!is_t4(adapter->params.chip)) t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | diff --combined drivers/net/ethernet/cirrus/Kconfig index d8af9e64dd1e,dac1764ba740..c51b5e043c5e --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@@ -6,7 -6,7 +6,7 @@@ config NET_VENDOR_CIRRUS bool "Cirrus devices" default y - depends on ISA || EISA || ARM || MAC || COMPILE_TEST + depends on ISA || EISA || ARM || MAC help If you have a network (Ethernet) card belonging to this class, say Y.
@@@ -18,9 -18,16 +18,16 @@@ if NET_VENDOR_CIRRUS
config CS89x0 - tristate "CS89x0 support" - depends on ISA || EISA || ARM + tristate + + config CS89x0_ISA + tristate "CS89x0 ISA driver support" + depends on HAS_IOPORT_MAP + depends on ISA depends on !PPC32 + depends on CS89x0_PLATFORM=n + select NETDEV_LEGACY_INIT + select CS89x0 help Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the file @@@ -30,15 -37,15 +37,15 @@@ will be called cs89x0.
config CS89x0_PLATFORM - bool "CS89x0 platform driver support" if HAS_IOPORT_MAP - default !HAS_IOPORT_MAP - depends on CS89x0 + tristate "CS89x0 platform driver support" + depends on ARM || COMPILE_TEST + select CS89x0 help - Say Y to compile the cs89x0 driver as a platform driver. This - makes this driver suitable for use on certain evaluation boards - such as the iMX21ADS. + Say Y to compile the cs89x0 platform driver. This makes this driver + suitable for use on certain evaluation boards such as the iMX21ADS.
- If you are unsure, say N. + To compile this driver as a module, choose M here. The module + will be called cs89x0.
config EP93XX_ETH tristate "EP93xx Ethernet support" diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index eb748aa35952,13042f1cac6f..444c46241afc --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@@ -169,17 -169,19 +169,19 @@@ static bool hclge_is_special_opcode(u1 /* these commands have several descriptors, * and use the first one to save opcode and return value */ - u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT, - HCLGE_OPC_STATS_32_BIT, - HCLGE_OPC_STATS_MAC, - HCLGE_OPC_STATS_MAC_ALL, - HCLGE_OPC_QUERY_32_BIT_REG, - HCLGE_OPC_QUERY_64_BIT_REG, - HCLGE_QUERY_CLEAR_MPF_RAS_INT, - HCLGE_QUERY_CLEAR_PF_RAS_INT, - HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, - HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, - HCLGE_QUERY_ALL_ERR_INFO}; + static const u16 spec_opcode[] = { + HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG, + HCLGE_QUERY_CLEAR_MPF_RAS_INT, + HCLGE_QUERY_CLEAR_PF_RAS_INT, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + HCLGE_QUERY_ALL_ERR_INFO + }; int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@@ -573,13 -575,9 +575,13 @@@ static void hclge_cmd_uninit_regs(struc
void hclge_cmd_uninit(struct hclge_dev *hdev) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME); spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock(&hdev->hw.cmq.crq.lock); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); hclge_cmd_uninit_regs(&hdev->hw); spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ac70d49e205d,8e5be127909b..53872c7b2940 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@@ -9,7 -9,6 +9,7 @@@ #include "hnae3.h"
#define HCLGE_CMDQ_TX_TIMEOUT 30000 +#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGE_DESC_DATA_LEN 6
struct hclge_dev; @@@ -271,9 -270,6 +271,9 @@@ enum hclge_opcode_type /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+ /* clear hardware resource command */ + HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, + /* NCL config command */ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
@@@ -320,6 -316,9 +320,9 @@@ /* PHY command */ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, HCLGE_OPC_PHY_REG = 0x7026, + + /* Query link diagnosis info command */ + HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A, };
#define HCLGE_TQP_REG_OFFSET 0x80000 diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 03ae122f1c9a,f6882090d38e..1b6bb0d71fcb --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@@ -23,6 -23,7 +23,7 @@@ #include "hclge_tm.h" #include "hclge_err.h" #include "hnae3.h" + #include "hclge_devlink.h"
#define HCLGE_NAME "hclge" #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset))) @@@ -1550,7 -1551,6 +1551,7 @@@ static int hclge_configure(struct hclge hdev->tm_info.hw_pfc_map = 0; hdev->wanted_umv_size = cfg.umv_space; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; + hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
@@@ -1619,7 -1619,7 +1620,7 @@@ static int hclge_config_tso(struct hclg return hclge_cmd_send(&hdev->hw, &desc, 1); }
-static int hclge_config_gro(struct hclge_dev *hdev, bool en) +static int hclge_config_gro(struct hclge_dev *hdev) { struct hclge_cfg_gro_status_cmd *req; struct hclge_desc desc; @@@ -1631,7 -1631,7 +1632,7 @@@ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); req = (struct hclge_cfg_gro_status_cmd *)desc.data;
- req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@@ -1814,6 -1814,7 +1815,7 @@@ static int hclge_vport_setup(struct hcl nic->pdev = hdev->pdev; nic->ae_algo = &ae_algo; nic->numa_node_mask = hdev->numa_node_mask; + nic->kinfo.io_base = hdev->hw.io_base;
ret = hclge_knic_setup(vport, num_tqps, hdev->num_tx_desc, hdev->num_rx_desc); @@@ -2953,12 -2954,12 +2955,12 @@@ static void hclge_update_link_status(st }
if (state != hdev->hw.mac.link) { + hdev->hw.mac.link = state; client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, state);
- hdev->hw.mac.link = state; hclge_push_link_status(hdev); }
@@@ -3789,6 -3790,12 +3791,12 @@@ static void hclge_do_reset(struct hclge }
switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + dev_info(&pdev->dev, "IMP reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); + break; case HNAE3_GLOBAL_RESET: dev_info(&pdev->dev, "global reset requested\n"); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); @@@ -10074,11 -10081,7 +10082,11 @@@ static int hclge_init_vlan_config(struc static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, bool writen_to_tbl) { - struct hclge_vport_vlan_cfg *vlan; + struct hclge_vport_vlan_cfg *vlan, *tmp; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) + if (vlan->vlan_id == vlan_id) + return;
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); if (!vlan) @@@ -11448,28 -11451,6 +11456,28 @@@ static void hclge_clear_resetting_state } }
+static int hclge_clear_hw_resource(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* This new command is only supported by new firmware, it will + * fail with older firmware. Error value -EOPNOSUPP can only be + * returned by older firmware running this command, to keep code + * backward compatible we will override this value and return + * success. + */ + if (ret && ret != -EOPNOTSUPP) { + dev_err(&hdev->pdev->dev, + "failed to clear hw resource, ret = %d\n", ret); + return ret; + } + return 0; +} + static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) { if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) @@@ -11509,20 -11490,20 +11517,24 @@@ static int hclge_init_ae_dev(struct hna if (ret) goto out;
+ ret = hclge_devlink_init(hdev); + if (ret) + goto err_pci_uninit; + /* Firmware command queue initialize */ ret = hclge_cmd_queue_init(hdev); if (ret) - goto err_pci_uninit; + goto err_devlink_uninit;
/* Firmware command initialize */ ret = hclge_cmd_init(hdev); if (ret) goto err_cmd_uninit;
+ ret = hclge_clear_hw_resource(hdev); + if (ret) + goto err_cmd_uninit; + ret = hclge_get_cap(hdev); if (ret) goto err_cmd_uninit; @@@ -11587,7 -11568,7 +11599,7 @@@ goto err_mdiobus_unreg; }
- ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) goto err_mdiobus_unreg;
@@@ -11689,6 -11670,8 +11701,8 @@@ err_msi_uninit pci_free_irq_vectors(pdev); err_cmd_uninit: hclge_cmd_uninit(hdev); + err_devlink_uninit: + hclge_devlink_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); @@@ -11968,7 -11951,7 +11982,7 @@@ static int hclge_reset_ae_dev(struct hn return ret; }
- ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) return ret;
@@@ -12079,6 -12062,7 +12093,7 @@@ static void hclge_uninit_ae_dev(struct
hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); + hclge_devlink_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); hclge_uninit_vport_vlan_table(hdev); @@@ -12702,15 -12686,8 +12717,15 @@@ static int hclge_gro_en(struct hnae3_ha { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + bool gro_en_old = hdev->gro_en; + int ret;
- return hclge_config_gro(hdev, enable); + hdev->gro_en = enable; + ret = hclge_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; }
static void hclge_sync_promisc_mode(struct hclge_dev *hdev) @@@ -12867,6 -12844,29 +12882,29 @@@ static int hclge_get_module_eeprom(stru return 0; }
+ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, + u32 *status_code) + { + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query link diagnosis info, ret = %d\n", ret); + return ret; + } + + *status_code = le32_to_cpu(desc.data[0]); + return 0; + } + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@@ -12967,6 -12967,7 +13005,7 @@@ .set_tx_hwts_info = hclge_ptp_set_tx_info, .get_rx_hwts = hclge_ptp_get_rx_hwts, .get_ts_info = hclge_ptp_get_ts_info, + .get_link_diagnosis_info = hclge_get_link_diagnosis_info, };
static struct hnae3_ae_algo ae_algo = { diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index e446b839a371,ada5c68f2851..b6c1153945e5 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@@ -8,6 -8,7 +8,7 @@@ #include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/kfifo.h> + #include <net/devlink.h>
#include "hclge_cmd.h" #include "hclge_ptp.h" @@@ -193,6 -194,7 +194,7 @@@ enum HLCGE_PORT_TYPE #define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U #define HCLGE_VECTOR0_IMP_RD_POISON_B 5U #define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U + #define HCLGE_TRIGGER_IMP_RESET_B 7U
#define HCLGE_MAC_DEFAULT_FRAME \ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) @@@ -927,7 -929,6 +929,7 @@@ struct hclge_dev unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; u8 fd_en; + bool gro_en;
u16 wanted_umv_size; /* max available unicast mac vlan space */ @@@ -944,6 -945,7 +946,7 @@@ cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; struct hclge_ptp *ptp; + struct devlink *devlink; };
/* VPort level vlan tag configuration for TX direction */ diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 938654778979,ff651739f16b..60588b194fe7 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@@ -8,6 -8,7 +8,7 @@@ #include "hclgevf_main.h" #include "hclge_mbx.h" #include "hnae3.h" + #include "hclgevf_devlink.h"
#define HCLGEVF_NAME "hclgevf"
@@@ -506,10 -507,10 +507,10 @@@ void hclgevf_update_link_status(struct link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; if (link_state != hdev->hw.mac.link) { + hdev->hw.mac.link = link_state; client->ops->link_status_change(handle, !!link_state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, !!link_state); - hdev->hw.mac.link = link_state; }
clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); @@@ -538,6 -539,7 +539,7 @@@ static int hclgevf_set_handle_info(stru nic->pdev = hdev->pdev; nic->numa_node_mask = hdev->numa_node_mask; nic->flags |= HNAE3_SUPPORT_VF; + nic->kinfo.io_base = hdev->hw.io_base;
ret = hclgevf_knic_setup(hdev); if (ret) @@@ -2487,8 -2489,6 +2489,8 @@@ static int hclgevf_configure(struct hcl { int ret;
+ hdev->gro_en = true; + ret = hclgevf_get_basic_info(hdev); if (ret) return ret; @@@ -2551,7 -2551,7 +2553,7 @@@ static int hclgevf_init_roce_base_info( return 0; }
-static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) +static int hclgevf_config_gro(struct hclgevf_dev *hdev) { struct hclgevf_cfg_gro_status_cmd *req; struct hclgevf_desc desc; @@@ -2564,7 -2564,7 +2566,7 @@@ false); req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
- req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0;
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) @@@ -3310,7 -3310,7 +3312,7 @@@ static int hclgevf_reset_hdev(struct hc return ret; }
- ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) return ret;
@@@ -3339,6 -3339,10 +3341,10 @@@ static int hclgevf_init_hdev(struct hcl if (ret) return ret;
+ ret = hclgevf_devlink_init(hdev); + if (ret) + goto err_devlink_init; + ret = hclgevf_cmd_queue_init(hdev); if (ret) goto err_cmd_queue_init; @@@ -3391,7 -3395,7 +3397,7 @@@ if (ret) goto err_config;
- ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) goto err_config;
@@@ -3443,6 -3447,8 +3449,8 @@@ err_misc_irq_init err_cmd_init: hclgevf_cmd_uninit(hdev); err_cmd_queue_init: + hclgevf_devlink_uninit(hdev); + err_devlink_init: hclgevf_pci_uninit(hdev); clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; @@@ -3464,6 -3470,7 +3472,7 @@@ static void hclgevf_uninit_hdev(struct }
hclgevf_cmd_uninit(hdev); + hclgevf_devlink_uninit(hdev); hclgevf_pci_uninit(hdev); hclgevf_uninit_mac_list(hdev); } @@@ -3640,15 -3647,8 +3649,15 @@@ void hclgevf_update_speed_duplex(struc static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + bool gro_en_old = hdev->gro_en; + int ret;
- return hclgevf_config_gro(hdev, enable); + hdev->gro_en = enable; + ret = hclgevf_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; + + return ret; }
static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, diff --combined drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index e8013be055f8,6f222a3a0bf2..73e8bb5efc30 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@@ -6,6 -6,7 +6,7 @@@ #include <linux/fs.h> #include <linux/if_vlan.h> #include <linux/types.h> + #include <net/devlink.h> #include "hclge_mbx.h" #include "hclgevf_cmd.h" #include "hnae3.h" @@@ -310,8 -311,6 +311,8 @@@ struct hclgevf_dev u16 *vector_status; int *vector_irq;
+ bool gro_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclgevf_mac_table_cfg mac_table; @@@ -332,6 -331,8 +333,8 @@@ u32 flag; unsigned long serv_processed_cnt; unsigned long last_serv_processed; + + struct devlink *devlink; };
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) diff --combined drivers/net/ethernet/intel/e1000e/ich8lan.c index a80336c4319b,2f97c9f5611d..60c582a16821 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@@ -321,6 -321,7 +321,7 @@@ static s32 e1000_init_phy_workarounds_p case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: if (e1000_phy_is_accessible_pchlan(hw)) break;
@@@ -466,6 -467,7 +467,7 @@@ static s32 e1000_init_phy_params_pchlan case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@@ -711,6 -713,7 +713,7 @@@ static s32 e1000_init_mac_params_ich8la case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@@ -1006,8 -1009,6 +1009,8 @@@ static s32 e1000_platform_pm_pch_lpt(st { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u16 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */
if (link) { @@@ -1061,17 -1062,7 +1064,17 @@@ E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
- if (lat_enc > max_ltr_enc) + lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((lat_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((max_ltr_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + if (lat_enc_d > max_ltr_enc_d) lat_enc = max_ltr_enc; }
@@@ -1278,9 -1269,11 +1281,11 @@@ static s32 e1000_disable_ulp_lpt_lp(str usleep_range(10000, 11000); } if (firmware_bug) - e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10); + e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n", + i * 10); else - e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + e_dbg("ULP_CONFIG_DONE cleared after %d msec\n", + i * 10);
if (force) { mac_reg = er32(H2ME); @@@ -1675,6 -1668,7 +1680,7 @@@ static s32 e1000_get_variants_ich8lan(s case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@@ -2130,6 -2124,7 +2136,7 @@@ static s32 e1000_sw_lcd_config_ich8lan( case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@@ -3174,6 -3169,7 +3181,7 @@@ static s32 e1000_valid_nvm_bank_detect_ case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD;
@@@ -4113,6 -4109,7 +4121,7 @@@ static s32 e1000_validate_nvm_checksum_ case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@@ -4127,17 -4124,13 +4136,17 @@@ return ret_val;
if (!(data & valid_csum_mask)) { - data |= valid_csum_mask; - ret_val = e1000_write_nvm(hw, word, 1, &data); - if (ret_val) - return ret_val; - ret_val = e1000e_update_nvm_checksum(hw); - if (ret_val) - return ret_val; + e_dbg("NVM Checksum Invalid\n"); + + if (hw->mac.type < e1000_pch_cnp) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } }
return e1000e_validate_nvm_checksum_generic(hw); diff --combined drivers/net/ethernet/intel/e1000e/ich8lan.h index e757896287eb,9b145f6248a8..d6a092e5ee74 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@@ -41,12 -41,15 +41,15 @@@ #define E1000_FWSM_WLOCK_MAC_MASK 0x0380 #define E1000_FWSM_WLOCK_MAC_SHIFT 7 #define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ + #define E1000_EXFWSM_DPG_EXIT_DONE 0x00000001
/* Shared Receive Address Registers */ #define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) #define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
#define E1000_H2ME 0x05B50 /* Host to ME */ + #define E1000_H2ME_START_DPG 0x00000001 /* indicate the ME of DPG */ + #define E1000_H2ME_EXIT_DPG 0x00000002 /* indicate the ME exit DPG */ #define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ #define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
@@@ -274,11 -277,8 +277,11 @@@
/* Latency Tolerance Reporting */ #define E1000_LTRV 0x000F8 +#define E1000_LTRV_VALUE_MASK 0x000003FF #define E1000_LTRV_SCALE_MAX 5 #define E1000_LTRV_SCALE_FACTOR 5 +#define E1000_LTRV_SCALE_SHIFT 10 +#define E1000_LTRV_SCALE_MASK 0x00001C00 #define E1000_LTRV_REQ_SHIFT 15 #define E1000_LTRV_NOSNOOP_SHIFT 16 #define E1000_LTRV_SEND (1 << 30) diff --combined drivers/net/ethernet/intel/ice/ice_devlink.c index 7fe6e8ea39f0,8c863d64930b..14afce82ef63 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@@ -42,9 -42,7 +42,9 @@@ static int ice_info_pba(struct ice_pf *
status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) - return -EIO; + /* We failed to locate the PBA, so just skip this entry */ + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", + ice_stat_str(status));
return 0; } @@@ -477,7 -475,7 +477,7 @@@ struct ice_pf *ice_allocate_pf(struct d { struct devlink *devlink;
- devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf)); + devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); if (!devlink) return NULL;
@@@ -504,7 -502,7 +504,7 @@@ int ice_devlink_register(struct ice_pf struct device *dev = ice_pf_to_dev(pf); int err;
- err = devlink_register(devlink, dev); + err = devlink_register(devlink); if (err) { dev_err(dev, "devlink registration failed: %d\n", err); return err; diff --combined drivers/net/ethernet/intel/igc/igc_main.c index ed2d66bc2d6c,db1c63e8802a..c6c075a637ea --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@@ -12,6 -12,8 +12,8 @@@ #include <net/pkt_sched.h> #include <linux/bpf_trace.h> #include <net/xdp_sock_drv.h> + #include <linux/pci.h> + #include <net/ipv6.h>
#include "igc.h" @@@ -149,9 -151,6 +151,9 @@@ static void igc_release_hw_control(stru struct igc_hw *hw = &adapter->hw; u32 ctrl_ext;
+ if (!pci_device_is_present(adapter->pdev)) + return; + /* Let firmware take over control of h/w */ ctrl_ext = rd32(IGC_CTRL_EXT); wr32(IGC_CTRL_EXT, @@@ -3078,11 -3077,320 +3080,320 @@@ static void igc_del_etype_filter(struc etype); }
+ static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) + { + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; + } + + static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) + { + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; + } + + static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) + { + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } + } + + static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) + { + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; + } + + static bool igc_flex_filter_in_use(struct igc_adapter *adapter) + { + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; + } + + static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) + { + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; + } + + static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) + { + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + } + static int igc_enable_nfc_rule(struct igc_adapter *adapter, - const struct igc_nfc_rule *rule) + struct igc_nfc_rule *rule) { int err;
+ if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { err = igc_add_etype_filter(adapter, rule->filter.etype, rule->action); @@@ -3119,6 -3427,11 +3430,11 @@@ static void igc_disable_nfc_rule(struct igc_adapter *adapter, const struct igc_nfc_rule *rule) { + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) igc_del_etype_filter(adapter, rule->filter.etype);
@@@ -4452,29 -4765,26 +4768,29 @@@ void igc_down(struct igc_adapter *adapt
igc_ptp_suspend(adapter);
- /* disable receives in the hardware */ - rctl = rd32(IGC_RCTL); - wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); - /* flush and sleep below */ - + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev);
netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev);
- /* disable transmits in the hardware */ - tctl = rd32(IGC_TCTL); - tctl &= ~IGC_TCTL_EN; - wr32(IGC_TCTL, tctl); - /* flush both disables and wait for them to finish */ - wrfl(); - usleep_range(10000, 20000); + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000);
- igc_irq_disable(adapter); + igc_irq_disable(adapter); + }
adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
@@@ -4817,6 -5127,7 +5133,7 @@@ static irqreturn_t igc_msix_ring(int ir */ static int igc_request_msix(struct igc_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; int i = 0, err = 0, vector = 0, free_vector = 0; struct net_device *netdev = adapter->netdev;
@@@ -4825,7 -5136,13 +5142,13 @@@ if (err) goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igc_q_vector *q_vector = adapter->q_vector[i];
vector++; @@@ -4904,20 -5221,12 +5227,12 @@@ bool igc_has_link(struct igc_adapter *a * false until the igc_check_for_link establishes link * for copper adapters ONLY */ - switch (hw->phy.media_type) { - case igc_media_type_copper: - if (!hw->mac.get_link_status) - return true; - hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - break; - default: - case igc_media_type_unknown: - break; - } + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status;
- if (hw->mac.type == igc_i225 && - hw->phy.id == I225_I_PHY_ID) { + if (hw->mac.type == igc_i225) { if (!netif_carrier_ok(adapter->netdev)) { adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { @@@ -5005,7 -5314,9 +5320,9 @@@ static void igc_watchdog_task(struct wo adapter->tx_timeout_factor = 14; break; case SPEED_100: - /* maybe add some timeout factor ? */ + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 7; break; }
@@@ -5495,7 -5806,7 +5812,7 @@@ static bool validate_schedule(struct ig if (e->command != TC_TAPRIO_CMD_SET_GATES) return false;
- for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { if (e->gate_mask & BIT(i)) queue_uses[i]++;
@@@ -5552,7 -5863,7 +5869,7 @@@ static int igc_save_qbv_schedule(struc
end_time += e->interval;
- for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i))) @@@ -5704,7 -6015,7 +6021,7 @@@ static const struct net_device_ops igc_ .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, .ndo_features_check = igc_features_check, - .ndo_do_ioctl = igc_ioctl, + .ndo_eth_ioctl = igc_ioctl, .ndo_setup_tc = igc_setup_tc, .ndo_bpf = igc_bpf, .ndo_xdp_xmit = igc_xdp_xmit, @@@ -5865,6 -6176,10 +6182,10 @@@ static int igc_probe(struct pci_dev *pd
pci_enable_pcie_error_reporting(pdev);
+ err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + pci_set_master(pdev);
err = -ENOMEM; diff --combined drivers/net/ethernet/intel/igc/igc_ptp.c index 4ae19c6a3247,f6848181cdbd..0f021909b430 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@@ -9,6 -9,8 +9,8 @@@ #include <linux/ptp_classify.h> #include <linux/clocksource.h> #include <linux/ktime.h> + #include <linux/delay.h> + #include <linux/iopoll.h>
#define INCVALUE_MASK 0x7fffffff #define ISGN 0x80000000 @@@ -16,6 -18,9 +18,9 @@@ #define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) #define IGC_PTP_TX_TIMEOUT (HZ * 15)
+ #define IGC_PTM_STAT_SLEEP 2 + #define IGC_PTM_STAT_TIMEOUT 100 + /* SYSTIM read access for I225 */ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) { @@@ -752,6 -757,147 +757,147 @@@ int igc_ptp_get_ts_config(struct net_de -EFAULT : 0; }
+ /* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ + static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) + { + return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false; + } + + static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) + { + #if IS_ENABLED(CONFIG_X86_TSC) + return convert_art_ns_to_tsc(tstamp); + #else + return (struct system_counterval_t) { }; + #endif + } + + static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) + { + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } + } + + static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) + { + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; + } + + static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) + { + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); + } + /** * igc_ptp_init - Initialize PTP functionality * @adapter: Board private structure @@@ -788,6 -934,11 +934,11 @@@ void igc_ptp_init(struct igc_adapter *a adapter->ptp_caps.n_per_out = IGC_N_PEROUT; adapter->ptp_caps.n_pins = IGC_N_SDP; adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; break; default: adapter->ptp_clock = NULL; @@@ -849,8 -1000,7 +1000,8 @@@ void igc_ptp_suspend(struct igc_adapte adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- igc_ptp_time_save(adapter); + if (pci_device_is_present(adapter->pdev)) + igc_ptp_time_save(adapter); }
/** @@@ -879,7 -1029,9 +1030,9 @@@ void igc_ptp_stop(struct igc_adapter *a void igc_ptp_reset(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; unsigned long flags; + u32 timadj;
/* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@@ -888,12 -1040,38 +1041,38 @@@
switch (adapter->hw.mac.type) { case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + wr32(IGC_TSAUXC, 0x0); wr32(IGC_TSSDP, 0x0); wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS | (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + break; default: /* No work to do. */ diff --combined drivers/net/ethernet/marvell/mvneta.c index de32e5b49053,0e6d40701862..9d460a270601 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@@ -105,7 -105,7 +105,7 @@@ #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) #define MVNETA_PORT_STATUS 0x2444 -#define MVNETA_TX_IN_PRGRS BIT(1) +#define MVNETA_TX_IN_PRGRS BIT(0) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c /* Only exists on Armada XP and Armada 370 */ @@@ -2327,7 -2327,7 +2327,7 @@@ mvneta_swbm_build_skb(struct mvneta_por if (!skb) return ERR_PTR(-ENOMEM);
- skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool); + skb_mark_for_recycle(skb);
skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); @@@ -2339,10 -2339,6 +2339,6 @@@ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(frag), skb_frag_off(frag), skb_frag_size(frag), PAGE_SIZE); - /* We don't need to reset pp_recycle here. It's already set, so - * just mark fragments for recycling. - */ - page_pool_store_mem_info(skb_frag_page(frag), pool); }
return skb; @@@ -2666,7 -2662,7 +2662,7 @@@ static int mvneta_tx_tso(struct sk_buf return 0;
if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { - pr_info("*** Is this even possible???!?!?\n"); + pr_info("*** Is this even possible?\n"); return 0; }
@@@ -3832,12 -3828,20 +3828,20 @@@ static void mvneta_validate(struct phyl struct mvneta_port *pp = netdev_priv(ndev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_QSGMII && - state->interface != PHY_INTERFACE_MODE_SGMII && - !phy_interface_mode_is_8023z(state->interface) && - !phy_interface_mode_is_rgmii(state->interface)) { + /* We only support QSGMII, SGMII, 802.3z and RGMII modes. + * When in 802.3z mode, we must have AN enabled: + * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When <PortType> = 1 (1000BASE-X) this field must be set to 1." + */ + if (phy_interface_mode_is_8023z(state->interface)) { + if (!phylink_test(state->advertising, Autoneg)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + } else if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); return; } @@@ -4496,8 -4500,11 +4500,11 @@@ static int mvneta_ethtool_nway_reset(st }
/* Set interrupt coalescing for ethtools */ - static int mvneta_ethtool_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) + static int + mvneta_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); int queue; @@@ -4520,8 -4527,11 +4527,11 @@@ }
/* get coalescing for ethtools */ - static int mvneta_ethtool_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) + static int + mvneta_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev);
@@@ -4986,7 -4996,7 +4996,7 @@@ static const struct net_device_ops mvne .ndo_change_mtu = mvneta_change_mtu, .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, - .ndo_do_ioctl = mvneta_ioctl, + .ndo_eth_ioctl = mvneta_ioctl, .ndo_bpf = mvneta_xdp, .ndo_xdp_xmit = mvneta_xdp_xmit, .ndo_setup_tc = mvneta_setup_tc, diff --combined drivers/net/ethernet/qlogic/qed/qed_main.c index 6bb9ec98a12b,6871d892eabf..15ef59aa34ff --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@@ -49,11 -49,10 +49,10 @@@ #define QED_NVM_CFG_MAX_ATTRS 50
static char version[] = - "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; + "QLogic FastLinQ 4xxxx Core Module qed\n";
MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); MODULE_LICENSE("GPL"); - MODULE_VERSION(DRV_MODULE_VERSION);
#define FW_FILE_VERSION \ __stringify(FW_MAJOR_VERSION) "." \ @@@ -616,12 -615,7 +615,12 @@@ static int qed_enable_msix(struct qed_d rc = cnt; }
- if (rc > 0) { + /* For VFs, we should return with an error in case we didn't get the + * exact number of msix vectors as we requested. + * Not doing that will lead to a crash when starting queues for + * this VF. + */ + if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { /* MSI-x configuration was achieved */ int_params->out.int_mode = QED_INT_MODE_MSIX; int_params->out.num_vectors = rc; @@@ -1221,6 -1215,10 +1220,10 @@@ static void qed_slowpath_task(struct wo
if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, &hwfn->slowpath_task_flags)) { + /* skip qed_db_rec_handler during recovery/unload */ + if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) + goto out; + qed_db_rec_handler(hwfn, ptt); if (hwfn->periodic_db_rec_count--) qed_slowpath_delayed_work(hwfn, @@@ -1228,6 -1226,7 +1231,7 @@@ QED_PERIODIC_DB_REC_INTERVAL); }
+ out: qed_ptt_release(hwfn, ptt); }
diff --combined drivers/net/ethernet/qlogic/qede/qede_main.c index 1c7f9ed6f1c1,4877cb88c31a..9837bdb89cd4 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@@ -39,12 -39,8 +39,8 @@@ #include "qede.h" #include "qede_ptp.h"
- static char version[] = - "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; - MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); MODULE_LICENSE("GPL"); - MODULE_VERSION(DRV_MODULE_VERSION);
static uint debug; module_param(debug, uint, 0); @@@ -258,7 -254,7 +254,7 @@@ int __init qede_init(void { int ret;
- pr_info("qede_init: %s\n", version); + pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
qede_forced_speed_maps_init();
@@@ -644,7 -640,7 +640,7 @@@ static const struct net_device_ops qede .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, - .ndo_do_ioctl = qede_ioctl, + .ndo_eth_ioctl = qede_ioctl, .ndo_tx_timeout = qede_tx_timeout, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, @@@ -1157,10 -1153,6 +1153,6 @@@ static int __qede_probe(struct pci_dev /* Start the Slowpath-process */ memset(&sp_params, 0, sizeof(sp_params)); sp_params.int_mode = QED_INT_MODE_MSIX; - sp_params.drv_major = QEDE_MAJOR_VERSION; - sp_params.drv_minor = QEDE_MINOR_VERSION; - sp_params.drv_rev = QEDE_REVISION_VERSION; - sp_params.drv_eng = QEDE_ENGINEERING_VERSION; strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); rc = qed_ops->common->slowpath_start(cdev, &sp_params); if (rc) { @@@ -1874,7 -1866,6 +1866,7 @@@ static void qede_sync_free_irqs(struct }
edev->int_info.used_cnt = 0; + edev->int_info.msix_cnt = 0; }
static int qede_req_msix_irqs(struct qede_dev *edev) @@@ -1907,6 -1898,12 +1899,12 @@@ &edev->fp_array[i]); if (rc) { DP_ERR(edev, "Request fp %d irq failed\n", i); + #ifdef CONFIG_RFS_ACCEL + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; + #endif qede_sync_free_irqs(edev); return rc; } @@@ -2299,6 -2296,15 +2297,15 @@@ static void qede_unload(struct qede_de
rc = qede_stop_queues(edev); if (rc) { + #ifdef CONFIG_RFS_ACCEL + if (edev->dev_info.common.b_arfs_capable) { + qede_poll_for_freeing_arfs_filters(edev); + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; + } + #endif qede_sync_free_irqs(edev); goto out; } @@@ -2428,6 -2434,7 +2435,6 @@@ static int qede_load(struct qede_dev *e goto out; err4: qede_sync_free_irqs(edev); - memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); err3: qede_napi_disable_remove(edev); err2: @@@ -2628,8 -2635,10 +2635,10 @@@ static void qede_generic_hw_err_handler "Generic sleepable HW error handling started - err_flags 0x%lx\n", edev->err_flags);
- if (edev->devlink) + if (edev->devlink) { + DP_NOTICE(edev, "Reporting fatal error to devlink\n"); edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type); + }
clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
@@@ -2651,6 -2660,8 +2660,8 @@@ static void qede_set_hw_err_flags(struc case QED_HW_ERR_FW_ASSERT: set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); + /* make this error as recoverable and start recovery*/ + set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags); break;
default: diff --combined drivers/net/ethernet/sfc/efx.c index 8b3237b923d6,a295e2621cf3..43ef4f529028 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@@ -591,7 -591,7 +591,7 @@@ static const struct net_device_ops efx_ .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = efx_ioctl, + .ndo_eth_ioctl = efx_ioctl, .ndo_change_mtu = efx_change_mtu, .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, @@@ -900,36 -900,74 +900,36 @@@ static void efx_pci_remove(struct pci_d
/* NIC VPD information * Called during probe to display the part number of the - * installed NIC. VPD is potentially very large but this should - * always appear within the first 512 bytes. + * installed NIC. */ -#define SFC_VPD_LEN 512 static void efx_probe_vpd_strings(struct efx_nic *efx) { struct pci_dev *dev = efx->pci_dev; - char vpd_data[SFC_VPD_LEN]; - ssize_t vpd_size; - int ro_start, ro_size, i, j; - - /* Get the vpd data from the device */ - vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); - if (vpd_size <= 0) { - netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); - return; - } - - /* Get the Read only section */ - ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); - if (ro_start < 0) { - netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); - return; - } - - ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); - j = ro_size; - i = ro_start + PCI_VPD_LRDT_TAG_SIZE; - if (i + j > vpd_size) - j = vpd_size - i; - - /* Get the Part number */ - i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); - if (i < 0) { - netif_err(efx, drv, efx->net_dev, "Part number not found\n"); - return; - } + unsigned int vpd_size, kw_len; + u8 *vpd_data; + int start;
- j = pci_vpd_info_field_size(&vpd_data[i]); - i += PCI_VPD_INFO_FLD_HDR_SIZE; - if (i + j > vpd_size) { - netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); + vpd_data = pci_vpd_alloc(dev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(dev, "Unable to read VPD\n"); return; }
- netif_info(efx, drv, efx->net_dev, - "Part Number : %.*s\n", j, &vpd_data[i]); - - i = ro_start + PCI_VPD_LRDT_TAG_SIZE; - j = ro_size; - i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); - if (i < 0) { - netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); - return; - } - - j = pci_vpd_info_field_size(&vpd_data[i]); - i += PCI_VPD_INFO_FLD_HDR_SIZE; - if (i + j > vpd_size) { - netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); - return; - } + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); + if (start < 0) + pci_err(dev, "Part number not found or incomplete\n"); + else + pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); - if (!efx->vpd_sn) - return; + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); + if (start < 0) + pci_err(dev, "Serial number not found or incomplete\n"); + else + efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); + kfree(vpd_data); }
diff --combined drivers/net/ethernet/sfc/falcon/efx.c index d7912c716bbf,c177ea0f301e..423bdf81200f --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@@ -2219,7 -2219,7 +2219,7 @@@ static const struct net_device_ops ef4_ .ndo_tx_timeout = ef4_watchdog, .ndo_start_xmit = ef4_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = ef4_ioctl, + .ndo_eth_ioctl = ef4_ioctl, .ndo_change_mtu = ef4_change_mtu, .ndo_set_mac_address = ef4_set_mac_address, .ndo_set_rx_mode = ef4_set_rx_mode, @@@ -2780,36 -2780,75 +2780,36 @@@ static void ef4_pci_remove(struct pci_d };
/* NIC VPD information - * Called during probe to display the part number of the - * installed NIC. VPD is potentially very large but this should - * always appear within the first 512 bytes. + * Called during probe to display the part number of the installed NIC. */ -#define SFC_VPD_LEN 512 static void ef4_probe_vpd_strings(struct ef4_nic *efx) { struct pci_dev *dev = efx->pci_dev; - char vpd_data[SFC_VPD_LEN]; - ssize_t vpd_size; - int ro_start, ro_size, i, j; - - /* Get the vpd data from the device */ - vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data); - if (vpd_size <= 0) { - netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n"); - return; - } - - /* Get the Read only section */ - ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); - if (ro_start < 0) { - netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n"); - return; - } - - ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]); - j = ro_size; - i = ro_start + PCI_VPD_LRDT_TAG_SIZE; - if (i + j > vpd_size) - j = vpd_size - i; - - /* Get the Part number */ - i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN"); - if (i < 0) { - netif_err(efx, drv, efx->net_dev, "Part number not found\n"); - return; - } + unsigned int vpd_size, kw_len; + u8 *vpd_data; + int start;
- j = pci_vpd_info_field_size(&vpd_data[i]); - i += PCI_VPD_INFO_FLD_HDR_SIZE; - if (i + j > vpd_size) { - netif_err(efx, drv, efx->net_dev, "Incomplete part number\n"); + vpd_data = pci_vpd_alloc(dev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(dev, "Unable to read VPD\n"); return; }
- netif_info(efx, drv, efx->net_dev, - "Part Number : %.*s\n", j, &vpd_data[i]); - - i = ro_start + PCI_VPD_LRDT_TAG_SIZE; - j = ro_size; - i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN"); - if (i < 0) { - netif_err(efx, drv, efx->net_dev, "Serial number not found\n"); - return; - } - - j = pci_vpd_info_field_size(&vpd_data[i]); - i += PCI_VPD_INFO_FLD_HDR_SIZE; - if (i + j > vpd_size) { - netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n"); - return; - } + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); + if (start < 0) + pci_warn(dev, "Part number not found or incomplete\n"); + else + pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
- efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL); - if (!efx->vpd_sn) - return; + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); + if (start < 0) + pci_warn(dev, "Serial number not found or incomplete\n"); + else + efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
- snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]); + kfree(vpd_data); }
diff --combined drivers/net/ethernet/smsc/Kconfig index 6571a6a90c1a,72e42a868346..30573013d2ed --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@@ -23,6 -23,7 +23,7 @@@ config SMC919 tristate "SMC 9194 support" depends on ISA select CRC32 + select NETDEV_LEGACY_INIT help This is support for the SMC9xxx based Ethernet cards. Choose this option if you have a DELL laptop with the docking station, or @@@ -37,6 -38,7 +38,6 @@@ config SMC91 tristate "SMC 91C9x/91C1xxx support" select CRC32 select MII - depends on !OF || GPIOLIB depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \ MIPS || NIOS2 || SUPERH || XTENSA || H8300 || COMPILE_TEST help diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fa90bcdf4e45,7b3fcf558603..ed0cd3920171 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -2500,6 -2500,7 +2500,7 @@@ static int stmmac_tx_clean(struct stmma } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; + priv->xstats.txq_stats[queue].tx_pkt_n++; } if (skb) stmmac_get_tx_hwtstamp(priv, p, skb); @@@ -4914,10 -4915,6 +4915,10 @@@ read_again
prefetch(np);
+ /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->dev->stats, &priv->xstats, @@@ -4938,6 -4935,10 +4939,6 @@@ continue; }
- /* Ensure a valid XSK buffer before proceed */ - if (!buf->xdp) - break; - /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ if (likely(status & rx_not_ls)) { xsk_buff_free(buf->xdp); @@@ -5000,6 -5001,9 +5001,9 @@@
stmmac_finalize_xdp_rx(priv, xdp_status);
+ priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { if (failure || stmmac_rx_dirty(priv, queue) > 0) xsk_set_rx_need_wakeup(rx_q->xsk_pool); @@@ -5287,6 -5291,7 +5291,7 @@@ drain_data stmmac_rx_refill(priv, queue);
priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count;
return count; } @@@ -6451,7 -6456,7 +6456,7 @@@ static const struct net_device_ops stmm .ndo_set_features = stmmac_set_features, .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, - .ndo_do_ioctl = stmmac_ioctl, + .ndo_eth_ioctl = stmmac_ioctl, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER diff --combined drivers/net/mhi_net.c index e60e38c1f09d,975f7f9bdf4c..d127eb6e9257 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@@ -11,28 -11,42 +11,42 @@@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/u64_stats_sync.h> - #include <linux/wwan.h> - - #include "mhi.h"
#define MHI_NET_MIN_MTU ETH_MIN_MTU #define MHI_NET_MAX_MTU 0xffff #define MHI_NET_DEFAULT_MTU 0x4000
- /* When set to false, the default netdev (link 0) is not created, and it's up - * to user to create the link (via wwan rtnetlink). - */ - static bool create_default_iface = true; - module_param(create_default_iface, bool, 0); + struct mhi_net_stats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_errors; + u64_stats_t tx_dropped; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; + }; + + struct mhi_net_dev { + struct mhi_device *mdev; + struct net_device *ndev; + struct sk_buff *skbagg_head; + struct sk_buff *skbagg_tail; + struct delayed_work rx_refill; + struct mhi_net_stats stats; + u32 rx_queue_sz; + int msg_enable; + unsigned int mru; + };
struct mhi_device_info { const char *netname; - const struct mhi_net_proto *proto; };
static int mhi_ndo_open(struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
/* Feed the rx buffer pool */ schedule_delayed_work(&mhi_netdev->rx_refill, 0); @@@ -47,7 -61,7 +61,7 @@@
static int mhi_ndo_stop(struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
netif_stop_queue(ndev); netif_carrier_off(ndev); @@@ -58,17 -72,10 +72,10 @@@
static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); - const struct mhi_net_proto *proto = mhi_netdev->proto; + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); struct mhi_device *mdev = mhi_netdev->mdev; int err;
- if (proto && proto->tx_fixup) { - skb = proto->tx_fixup(mhi_netdev, skb); - if (unlikely(!skb)) - goto exit_drop; - } - err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); if (unlikely(err)) { net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", @@@ -93,7 -100,7 +100,7 @@@ exit_drop static void mhi_ndo_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *stats) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); unsigned int start;
do { @@@ -101,8 -108,6 +108,6 @@@ stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); - stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped); - stats->rx_length_errors = u64_stats_read(&mhi_netdev->stats.rx_length_errors); } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start));
do { @@@ -165,7 -170,6 +170,6 @@@ static void mhi_net_dl_callback(struct struct mhi_result *mhi_res) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); - const struct mhi_net_proto *proto = mhi_netdev->proto; struct sk_buff *skb = mhi_res->buf_addr; int free_desc_count;
@@@ -205,11 -209,6 +209,6 @@@ mhi_netdev->skbagg_head = NULL; }
- u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); - u64_stats_inc(&mhi_netdev->stats.rx_packets); - u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); - u64_stats_update_end(&mhi_netdev->stats.rx_syncp); - switch (skb->data[0] & 0xf0) { case 0x40: skb->protocol = htons(ETH_P_IP); @@@ -222,10 -221,11 +221,11 @@@ break; }
- if (proto && proto->rx) - proto->rx(mhi_netdev, skb); - else - netif_rx(skb); + u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); + u64_stats_inc(&mhi_netdev->stats.rx_packets); + u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); + u64_stats_update_end(&mhi_netdev->stats.rx_syncp); + netif_rx(skb); }
/* Refill if RX buffers queue becomes low */ @@@ -248,7 -248,6 +248,6 @@@ static void mhi_net_ul_callback(struct
u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); if (unlikely(mhi_res->transaction_status)) { - /* MHI layer stopping/resetting the UL channel */ if (mhi_res->transaction_status == -ENOTCONN) { u64_stats_update_end(&mhi_netdev->stats.tx_syncp); @@@ -302,78 -301,47 +301,47 @@@ static void mhi_net_rx_refill_work(stru schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); }
- static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id, - struct netlink_ext_ack *extack) + static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev) { - const struct mhi_device_info *info; - struct mhi_device *mhi_dev = ctxt; struct mhi_net_dev *mhi_netdev; int err;
- info = (struct mhi_device_info *)mhi_dev->id->driver_data; - - /* For now we only support one link (link context 0), driver must be - * reworked to break 1:1 relationship for net MBIM and to forward setup - * call to rmnet(QMAP) otherwise. - */ - if (if_id != 0) - return -EINVAL; - - if (dev_get_drvdata(&mhi_dev->dev)) - return -EBUSY; - - mhi_netdev = wwan_netdev_drvpriv(ndev); + mhi_netdev = netdev_priv(ndev);
dev_set_drvdata(&mhi_dev->dev, mhi_netdev); mhi_netdev->ndev = ndev; mhi_netdev->mdev = mhi_dev; mhi_netdev->skbagg_head = NULL; - mhi_netdev->proto = info->proto; + mhi_netdev->mru = mhi_dev->mhi_cntrl->mru;
INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); u64_stats_init(&mhi_netdev->stats.rx_syncp); u64_stats_init(&mhi_netdev->stats.tx_syncp);
/* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev, 0); + err = mhi_prepare_for_transfer(mhi_dev); if (err) goto out_err;
/* Number of transfer descriptors determines size of the queue */ mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- if (extack) - err = register_netdevice(ndev); - else - err = register_netdev(ndev); + err = register_netdev(ndev); if (err) - goto out_err; - - if (mhi_netdev->proto) { - err = mhi_netdev->proto->init(mhi_netdev); - if (err) - goto out_err_proto; - } + return err;
return 0;
- out_err_proto: - unregister_netdevice(ndev); out_err: free_netdev(ndev); return err; }
- static void mhi_net_dellink(void *ctxt, struct net_device *ndev, - struct list_head *head) + static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev) { - struct mhi_net_dev *mhi_netdev = wwan_netdev_drvpriv(ndev); - struct mhi_device *mhi_dev = ctxt; + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
- if (head) - unregister_netdevice_queue(ndev, head); - else - unregister_netdev(ndev); + unregister_netdev(ndev);
mhi_unprepare_from_transfer(mhi_dev);
@@@ -382,65 -350,34 +350,34 @@@ dev_set_drvdata(&mhi_dev->dev, NULL); }
- static const struct wwan_ops mhi_wwan_ops = { - .priv_size = sizeof(struct mhi_net_dev), - .setup = mhi_net_setup, - .newlink = mhi_net_newlink, - .dellink = mhi_net_dellink, - }; - static int mhi_net_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) { const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data; - struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; struct net_device *ndev; int err;
- err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev, - WWAN_NO_DEFAULT_LINK); - if (err) - return err; - - if (!create_default_iface) - return 0; - - /* Create a default interface which is used as either RMNET real-dev, - * MBIM link 0 or ip link 0) - */ ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname, NET_NAME_PREDICTABLE, mhi_net_setup); - if (!ndev) { - err = -ENOMEM; - goto err_unregister; - } + if (!ndev) + return -ENOMEM;
SET_NETDEV_DEV(ndev, &mhi_dev->dev);
- err = mhi_net_newlink(mhi_dev, ndev, 0, NULL); - if (err) - goto err_release; + err = mhi_net_newlink(mhi_dev, ndev); + if (err) { + free_netdev(ndev); + return err; + }
return 0; - - err_release: - free_netdev(ndev); - err_unregister: - wwan_unregister_ops(&cntrl->mhi_dev->dev); - - return err; }
static void mhi_net_remove(struct mhi_device *mhi_dev) { struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); - struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; - - /* WWAN core takes care of removing remaining links */ - wwan_unregister_ops(&cntrl->mhi_dev->dev);
- if (create_default_iface) - mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL); + mhi_net_dellink(mhi_dev, mhi_netdev->ndev); }
static const struct mhi_device_info mhi_hwip0 = { @@@ -451,18 -388,11 +388,11 @@@ static const struct mhi_device_info mhi .netname = "mhi_swip%d", };
- static const struct mhi_device_info mhi_hwip0_mbim = { - .netname = "mhi_mbim%d", - .proto = &proto_mbim, - }; - static const struct mhi_device_id mhi_net_id_table[] = { /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */ { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)&mhi_hwip0 }, /* Software data PATH (to modem CPU) */ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 }, - /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */ - { .chan = "IP_HW0_MBIM", .driver_data = (kernel_ulong_t)&mhi_hwip0_mbim }, {} }; MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); diff --combined drivers/net/usb/asix_devices.c index dc87e8caf954,cb01897c7a5d..30821f6a6d7a --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@@ -197,7 -197,7 +197,7 @@@ static const struct net_device_ops ax88 .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = asix_ioctl, + .ndo_eth_ioctl = asix_ioctl, .ndo_set_rx_mode = ax88172_set_multicast, };
@@@ -354,23 -354,24 +354,23 @@@ out static int ax88772_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; - int ret, embd_phy; + struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl; + int ret;
ret = asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5, in_pm); if (ret < 0) goto out;
- embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); - - ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; }
- if (embd_phy) { + if (priv->embd_phy) { ret = asix_sw_reset(dev, AX_SWRESET_IPPD, in_pm); if (ret < 0) goto out; @@@ -448,16 -449,17 +448,16 @@@ out static int ax88772a_hw_reset(struct usbnet *dev, int in_pm) { struct asix_data *data = (struct asix_data *)&dev->data; - int ret, embd_phy; + struct asix_common_private *priv = dev->driver_priv; u16 rx_ctl, phy14h, phy15h, phy16h; u8 chipcode = 0; + int ret;
ret = asix_write_gpio(dev, AX_GPIO_RSE, 5, in_pm); if (ret < 0) goto out;
- embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); - - ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy | + ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, priv->embd_phy | AX_PHYSEL_SSEN, 0, 0, NULL, in_pm); if (ret < 0) { netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); @@@ -587,7 -589,7 +587,7 @@@ static const struct net_device_ops ax88 .ndo_get_stats64 = dev_get_tstats64, .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = asix_set_multicast, };
@@@ -681,6 -683,12 +681,6 @@@ static int ax88772_init_phy(struct usbn struct asix_common_private *priv = dev->driver_priv; int ret;
- ret = asix_read_phy_addr(dev, true); - if (ret < 0) - return ret; - - priv->phy_addr = ret; - snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT, priv->mdio->id, priv->phy_addr);
@@@ -706,14 -714,7 +706,13 @@@ static int ax88772_bind(struct usbnet * u8 buf[ETH_ALEN] = {0}, chipcode = 0; struct asix_common_private *priv; int ret, i; - u32 phyid;
+ priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev->driver_priv = priv; + usbnet_get_endpoints(dev, intf);
/* Maybe the boot loader passed the MAC address via device tree */ @@@ -749,13 -750,6 +748,13 @@@ dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
+ ret = asix_read_phy_addr(dev, true); + if (ret < 0) + return ret; + + priv->phy_addr = ret; + priv->embd_phy = ((priv->phy_addr & 0x1f) == 0x10); + asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0); chipcode &= AX_CHIPCODE_MASK;
@@@ -767,10 -761,6 +766,6 @@@ return ret; }
- /* Read PHYID register *AFTER* the PHY was reset properly */ - phyid = asix_get_phyid(dev); - netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid); - /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { /* hard_mtu is still the default - the device does not support @@@ -778,6 -768,12 +773,6 @@@ dev->rx_urb_size = 2048; }
- priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - dev->driver_priv = priv; - priv->presvd_phy_bmcr = 0; priv->presvd_phy_advertise = 0; if (chipcode == AX_AX88772_CHIPCODE) { @@@ -816,12 -812,6 +811,12 @@@ static void ax88772_unbind(struct usbne asix_rx_fixup_common_free(dev->driver_priv); }
+static void ax88178_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + asix_rx_fixup_common_free(dev->driver_priv); + kfree(dev->driver_priv); +} + static const struct ethtool_ops ax88178_ethtool_ops = { .get_drvinfo = asix_get_drvinfo, .get_link = asix_get_link, @@@ -1105,7 -1095,7 +1100,7 @@@ static const struct net_device_ops ax88 .ndo_set_mac_address = asix_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = asix_set_multicast, - .ndo_do_ioctl = asix_ioctl, + .ndo_eth_ioctl = asix_ioctl, .ndo_change_mtu = ax88178_change_mtu, };
@@@ -1220,6 -1210,7 +1215,7 @@@ static const struct driver_info ax88772 .unbind = ax88772_unbind, .status = asix_status, .reset = ax88772_reset, + .stop = ax88772_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET, .rx_fixup = asix_rx_fixup_common, @@@ -1230,7 -1221,7 +1226,7 @@@ static const struct driver_info ax88178_info = { .description = "ASIX AX88178 USB 2.0 Ethernet", .bind = ax88178_bind, - .unbind = ax88772_unbind, + .unbind = ax88178_unbind, .status = asix_status, .link_reset = ax88178_link_reset, .reset = ax88178_reset, diff --combined drivers/net/usb/pegasus.c index 9f9dd0de33cb,36dafcb3d04a..6a92a3fef75e --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@@ -446,7 -446,7 +446,7 @@@ static int enable_net_traffic(struct ne write_mii_word(pegasus, 0, 0x1b, &auxmode); }
- return 0; + return ret; fail: netif_dbg(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; @@@ -835,7 -835,7 +835,7 @@@ static int pegasus_open(struct net_devi if (!pegasus->rx_skb) goto exit;
- res = set_registers(pegasus, EthID, 6, net->dev_addr); + set_registers(pegasus, EthID, 6, net->dev_addr);
usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), @@@ -1001,7 -1001,8 +1001,8 @@@ static const struct ethtool_ops ops = .set_link_ksettings = pegasus_set_link_ksettings, };
- static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) + static int pegasus_siocdevprivate(struct net_device *net, struct ifreq *rq, + void __user *udata, int cmd) { __u16 *data = (__u16 *) &rq->ifr_ifru; pegasus_t *pegasus = netdev_priv(net); @@@ -1269,7 -1270,7 +1270,7 @@@ static int pegasus_resume(struct usb_in static const struct net_device_ops pegasus_netdev_ops = { .ndo_open = pegasus_open, .ndo_stop = pegasus_close, - .ndo_do_ioctl = pegasus_ioctl, + .ndo_siocdevprivate = pegasus_siocdevprivate, .ndo_start_xmit = pegasus_start_xmit, .ndo_set_rx_mode = pegasus_set_multicast, .ndo_tx_timeout = pegasus_tx_timeout, diff --combined drivers/net/wwan/mhi_wwan_mbim.c index 000000000000,377529bbf124..71bf9b4f769f mode 000000,100644..100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@@ -1,0 -1,658 +1,658 @@@ + // SPDX-License-Identifier: GPL-2.0-or-later + /* MHI MBIM Network driver - Network/MBIM over MHI bus + * + * Copyright (C) 2021 Linaro Ltd loic.poulain@linaro.org + * + * This driver copy some code from cdc_ncm, which is: + * Copyright (C) ST-Ericsson 2010-2012 + * and cdc_mbim, which is: + * Copyright (c) 2012 Smith Micro Software, Inc. + * Copyright (c) 2012 Bj��rn Mork bjorn@mork.no + * + */ + + #include <linux/ethtool.h> + #include <linux/if_arp.h> + #include <linux/if_vlan.h> + #include <linux/ip.h> + #include <linux/mhi.h> + #include <linux/mii.h> + #include <linux/mod_devicetable.h> + #include <linux/module.h> + #include <linux/netdevice.h> + #include <linux/skbuff.h> + #include <linux/u64_stats_sync.h> + #include <linux/usb.h> + #include <linux/usb/cdc.h> + #include <linux/usb/usbnet.h> + #include <linux/usb/cdc_ncm.h> + #include <linux/wwan.h> + + /* 3500 allows to optimize skb allocation, the skbs will basically fit in + * one 4K page. Large MBIM packets will simply be split over several MHI + * transfers and chained by the MHI net layer (zerocopy). + */ + #define MHI_DEFAULT_MRU 3500 + + #define MHI_MBIM_DEFAULT_MTU 1500 + #define MHI_MAX_BUF_SZ 0xffff + + #define MBIM_NDP16_SIGN_MASK 0x00ffffff + + #define MHI_MBIM_LINK_HASH_SIZE 8 + #define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE) + + struct mhi_mbim_link { + struct mhi_mbim_context *mbim; + struct net_device *ndev; + unsigned int session; + + /* stats */ + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_errors; + u64_stats_t tx_dropped; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; + + struct hlist_node hlnode; + }; + + struct mhi_mbim_context { + struct mhi_device *mdev; + struct sk_buff *skbagg_head; + struct sk_buff *skbagg_tail; + unsigned int mru; + u32 rx_queue_sz; + u16 rx_seq; + u16 tx_seq; + struct delayed_work rx_refill; + spinlock_t tx_lock; + struct hlist_head link_list[MHI_MBIM_LINK_HASH_SIZE]; + }; + + struct mbim_tx_hdr { + struct usb_cdc_ncm_nth16 nth16; + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16[2]; + } __packed; + + static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim, + unsigned int session) + { + struct mhi_mbim_link *link; + + hlist_for_each_entry_rcu(link, &mbim->link_list[LINK_HASH(session)], hlnode) { + if (link->session == session) + return link; + } + + return NULL; + } + + static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session, + u16 tx_seq) + { + unsigned int dgram_size = skb->len; + struct usb_cdc_ncm_nth16 *nth16; + struct usb_cdc_ncm_ndp16 *ndp16; + struct mbim_tx_hdr *mbim_hdr; + + /* Only one NDP is sent, containing the IP packet (no aggregation) */ + + /* Ensure we have enough headroom for crafting MBIM header */ + if (skb_cow_head(skb, sizeof(struct mbim_tx_hdr))) { + dev_kfree_skb_any(skb); + return NULL; + } + + mbim_hdr = skb_push(skb, sizeof(struct mbim_tx_hdr)); + + /* Fill NTB header */ + nth16 = &mbim_hdr->nth16; + nth16->dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN); + nth16->wHeaderLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + nth16->wSequence = cpu_to_le16(tx_seq); + nth16->wBlockLength = cpu_to_le16(skb->len); + nth16->wNdpIndex = cpu_to_le16(sizeof(struct usb_cdc_ncm_nth16)); + + /* Fill the unique NDP */ + ndp16 = &mbim_hdr->ndp16; + ndp16->dwSignature = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN | (session << 24)); + ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + + sizeof(struct usb_cdc_ncm_dpe16) * 2); + ndp16->wNextNdpIndex = 0; + + /* Datagram follows the mbim header */ + ndp16->dpe16[0].wDatagramIndex = cpu_to_le16(sizeof(struct mbim_tx_hdr)); + ndp16->dpe16[0].wDatagramLength = cpu_to_le16(dgram_size); + + /* null termination */ + ndp16->dpe16[1].wDatagramIndex = 0; + ndp16->dpe16[1].wDatagramLength = 0; + + return skb; + } + + static netdev_tx_t mhi_mbim_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) + { + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + struct mhi_mbim_context *mbim = link->mbim; + unsigned long flags; + int err = -ENOMEM; + + /* Serialize MHI channel queuing and MBIM seq */ + spin_lock_irqsave(&mbim->tx_lock, flags); + + skb = mbim_tx_fixup(skb, link->session, mbim->tx_seq); + if (unlikely(!skb)) + goto exit_unlock; + + err = mhi_queue_skb(mbim->mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); + + if (mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE)) + netif_stop_queue(ndev); + + if (!err) + mbim->tx_seq++; + + exit_unlock: + spin_unlock_irqrestore(&mbim->tx_lock, flags); + + if (unlikely(err)) { + net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", + ndev->name, err); + dev_kfree_skb_any(skb); + goto exit_drop; + } + + return NETDEV_TX_OK; + + exit_drop: + u64_stats_update_begin(&link->tx_syncp); + u64_stats_inc(&link->tx_dropped); + u64_stats_update_end(&link->tx_syncp); + + return NETDEV_TX_OK; + } + + static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *skb) + { + struct usb_cdc_ncm_nth16 *nth16; + int len; + + if (skb->len < sizeof(struct usb_cdc_ncm_nth16) + + sizeof(struct usb_cdc_ncm_ndp16)) { + net_err_ratelimited("frame too short\n"); + return -EINVAL; + } + + nth16 = (struct usb_cdc_ncm_nth16 *)skb->data; + + if (nth16->dwSignature != cpu_to_le32(USB_CDC_NCM_NTH16_SIGN)) { + net_err_ratelimited("invalid NTH16 signature <%#010x>\n", + le32_to_cpu(nth16->dwSignature)); + return -EINVAL; + } + + /* No limit on the block length, except the size of the data pkt */ + len = le16_to_cpu(nth16->wBlockLength); + if (len > skb->len) { + net_err_ratelimited("NTB does not fit into the skb %u/%u\n", + len, skb->len); + return -EINVAL; + } + + if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) && + (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) && + !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) { + net_err_ratelimited("sequence number glitch prev=%d curr=%d\n", + mbim->rx_seq, le16_to_cpu(nth16->wSequence)); + } + mbim->rx_seq = le16_to_cpu(nth16->wSequence); + + return le16_to_cpu(nth16->wNdpIndex); + } + + static int mbim_rx_verify_ndp16(struct sk_buff *skb, struct usb_cdc_ncm_ndp16 *ndp16) + { + int ret; + + if (le16_to_cpu(ndp16->wLength) < USB_CDC_NCM_NDP16_LENGTH_MIN) { + net_err_ratelimited("invalid DPT16 length <%u>\n", + le16_to_cpu(ndp16->wLength)); + return -EINVAL; + } + + ret = ((le16_to_cpu(ndp16->wLength) - sizeof(struct usb_cdc_ncm_ndp16)) + / sizeof(struct usb_cdc_ncm_dpe16)); + ret--; /* Last entry is always a NULL terminator */ + + if (sizeof(struct usb_cdc_ncm_ndp16) + + ret * sizeof(struct usb_cdc_ncm_dpe16) > skb->len) { + net_err_ratelimited("Invalid nframes = %d\n", ret); + return -EINVAL; + } + + return ret; + } + + static void mhi_mbim_rx(struct mhi_mbim_context *mbim, struct sk_buff *skb) + { + int ndpoffset; + + /* Check NTB header and retrieve first NDP offset */ + ndpoffset = mbim_rx_verify_nth16(mbim, skb); + if (ndpoffset < 0) { + net_err_ratelimited("mbim: Incorrect NTB header\n"); + goto error; + } + + /* Process each NDP */ + while (1) { + struct usb_cdc_ncm_ndp16 ndp16; + struct usb_cdc_ncm_dpe16 dpe16; + struct mhi_mbim_link *link; + int nframes, n, dpeoffset; + unsigned int session; + + if (skb_copy_bits(skb, ndpoffset, &ndp16, sizeof(ndp16))) { + net_err_ratelimited("mbim: Incorrect NDP offset (%u)\n", + ndpoffset); + goto error; + } + + /* Check NDP header and retrieve number of datagrams */ + nframes = mbim_rx_verify_ndp16(skb, &ndp16); + if (nframes < 0) { + net_err_ratelimited("mbim: Incorrect NDP16\n"); + goto error; + } + + /* Only IP data type supported, no DSS in MHI context */ + if ((ndp16.dwSignature & cpu_to_le32(MBIM_NDP16_SIGN_MASK)) + != cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN)) { + net_err_ratelimited("mbim: Unsupported NDP type\n"); + goto next_ndp; + } + + session = (le32_to_cpu(ndp16.dwSignature) & ~MBIM_NDP16_SIGN_MASK) >> 24; + + rcu_read_lock(); + + link = mhi_mbim_get_link_rcu(mbim, session); + if (!link) { + net_err_ratelimited("mbim: bad packet session (%u)\n", session); + goto unlock; + } + + /* de-aggregate and deliver IP packets */ + dpeoffset = ndpoffset + sizeof(struct usb_cdc_ncm_ndp16); + for (n = 0; n < nframes; n++, dpeoffset += sizeof(dpe16)) { + u16 dgram_offset, dgram_len; + struct sk_buff *skbn; + + if (skb_copy_bits(skb, dpeoffset, &dpe16, sizeof(dpe16))) + break; + + dgram_offset = le16_to_cpu(dpe16.wDatagramIndex); + dgram_len = le16_to_cpu(dpe16.wDatagramLength); + + if (!dgram_offset || !dgram_len) + break; /* null terminator */ + + skbn = netdev_alloc_skb(link->ndev, dgram_len); + if (!skbn) + continue; + + skb_put(skbn, dgram_len); + skb_copy_bits(skb, dgram_offset, skbn->data, dgram_len); + + switch (skbn->data[0] & 0xf0) { + case 0x40: + skbn->protocol = htons(ETH_P_IP); + break; + case 0x60: + skbn->protocol = htons(ETH_P_IPV6); + break; + default: + net_err_ratelimited("%s: unknown protocol\n", + link->ndev->name); + dev_kfree_skb_any(skbn); + u64_stats_update_begin(&link->rx_syncp); + u64_stats_inc(&link->rx_errors); + u64_stats_update_end(&link->rx_syncp); + continue; + } + + u64_stats_update_begin(&link->rx_syncp); + u64_stats_inc(&link->rx_packets); + u64_stats_add(&link->rx_bytes, skbn->len); + u64_stats_update_end(&link->rx_syncp); + + netif_rx(skbn); + } + unlock: + rcu_read_unlock(); + next_ndp: + /* Other NDP to process? */ + ndpoffset = (int)le16_to_cpu(ndp16.wNextNdpIndex); + if (!ndpoffset) + break; + } + + /* free skb */ + dev_consume_skb_any(skb); + return; + error: + dev_kfree_skb_any(skb); + } + + static struct sk_buff *mhi_net_skb_agg(struct mhi_mbim_context *mbim, + struct sk_buff *skb) + { + struct sk_buff *head = mbim->skbagg_head; + struct sk_buff *tail = mbim->skbagg_tail; + + /* This is non-paged skb chaining using frag_list */ + if (!head) { + mbim->skbagg_head = skb; + return skb; + } + + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = skb; + else + tail->next = skb; + + head->len += skb->len; + head->data_len += skb->len; + head->truesize += skb->truesize; + + mbim->skbagg_tail = skb; + + return mbim->skbagg_head; + } + + static void mhi_net_rx_refill_work(struct work_struct *work) + { + struct mhi_mbim_context *mbim = container_of(work, struct mhi_mbim_context, + rx_refill.work); + struct mhi_device *mdev = mbim->mdev; + int err; + + while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) { + struct sk_buff *skb = alloc_skb(MHI_DEFAULT_MRU, GFP_KERNEL); + + if (unlikely(!skb)) + break; + + err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, + MHI_DEFAULT_MRU, MHI_EOT); + if (unlikely(err)) { + kfree_skb(skb); + break; + } + + /* Do not hog the CPU if rx buffers are consumed faster than + * queued (unlikely). + */ + cond_resched(); + } + + /* If we're still starved of rx buffers, reschedule later */ + if (mhi_get_free_desc_count(mdev, DMA_FROM_DEVICE) == mbim->rx_queue_sz) + schedule_delayed_work(&mbim->rx_refill, HZ / 2); + } + + static void mhi_mbim_dl_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) + { + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct sk_buff *skb = mhi_res->buf_addr; + int free_desc_count; + + free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); + + if (unlikely(mhi_res->transaction_status)) { + switch (mhi_res->transaction_status) { + case -EOVERFLOW: + /* Packet has been split over multiple transfers */ + skb_put(skb, mhi_res->bytes_xferd); + mhi_net_skb_agg(mbim, skb); + break; + case -ENOTCONN: + /* MHI layer stopping/resetting the DL channel */ + dev_kfree_skb_any(skb); + return; + default: + /* Unknown error, simply drop */ + dev_kfree_skb_any(skb); + } + } else { + skb_put(skb, mhi_res->bytes_xferd); + + if (mbim->skbagg_head) { + /* Aggregate the final fragment */ + skb = mhi_net_skb_agg(mbim, skb); + mbim->skbagg_head = NULL; + } + + mhi_mbim_rx(mbim, skb); + } + + /* Refill if RX buffers queue becomes low */ + if (free_desc_count >= mbim->rx_queue_sz / 2) + schedule_delayed_work(&mbim->rx_refill, 0); + } + + static void mhi_mbim_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) + { + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&link->rx_syncp); + stats->rx_packets = u64_stats_read(&link->rx_packets); + stats->rx_bytes = u64_stats_read(&link->rx_bytes); + stats->rx_errors = u64_stats_read(&link->rx_errors); + } while (u64_stats_fetch_retry_irq(&link->rx_syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&link->tx_syncp); + stats->tx_packets = u64_stats_read(&link->tx_packets); + stats->tx_bytes = u64_stats_read(&link->tx_bytes); + stats->tx_errors = u64_stats_read(&link->tx_errors); + stats->tx_dropped = u64_stats_read(&link->tx_dropped); + } while (u64_stats_fetch_retry_irq(&link->tx_syncp, start)); + } + + static void mhi_mbim_ul_callback(struct mhi_device *mhi_dev, + struct mhi_result *mhi_res) + { + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct sk_buff *skb = mhi_res->buf_addr; + struct net_device *ndev = skb->dev; + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + /* Hardware has consumed the buffer, so free the skb (which is not + * freed by the MHI stack) and perform accounting. + */ + dev_consume_skb_any(skb); + + u64_stats_update_begin(&link->tx_syncp); + if (unlikely(mhi_res->transaction_status)) { + /* MHI layer stopping/resetting the UL channel */ + if (mhi_res->transaction_status == -ENOTCONN) { + u64_stats_update_end(&link->tx_syncp); + return; + } + + u64_stats_inc(&link->tx_errors); + } else { + u64_stats_inc(&link->tx_packets); + u64_stats_add(&link->tx_bytes, mhi_res->bytes_xferd); + } + u64_stats_update_end(&link->tx_syncp); + + if (netif_queue_stopped(ndev) && !mhi_queue_is_full(mbim->mdev, DMA_TO_DEVICE)) + netif_wake_queue(ndev); + } + + static int mhi_mbim_ndo_open(struct net_device *ndev) + { + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + /* Feed the MHI rx buffer pool */ + schedule_delayed_work(&link->mbim->rx_refill, 0); + + /* Carrier is established via out-of-band channel (e.g. qmi) */ + netif_carrier_on(ndev); + + netif_start_queue(ndev); + + return 0; + } + + static int mhi_mbim_ndo_stop(struct net_device *ndev) + { + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + return 0; + } + + static const struct net_device_ops mhi_mbim_ndo = { + .ndo_open = mhi_mbim_ndo_open, + .ndo_stop = mhi_mbim_ndo_stop, + .ndo_start_xmit = mhi_mbim_ndo_xmit, + .ndo_get_stats64 = mhi_mbim_ndo_get_stats64, + }; + + static int mhi_mbim_newlink(void *ctxt, struct net_device *ndev, u32 if_id, + struct netlink_ext_ack *extack) + { + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + struct mhi_mbim_context *mbim = ctxt; + + link->session = if_id; + link->mbim = mbim; + link->ndev = ndev; + u64_stats_init(&link->rx_syncp); + u64_stats_init(&link->tx_syncp); + + rcu_read_lock(); + if (mhi_mbim_get_link_rcu(mbim, if_id)) { + rcu_read_unlock(); + return -EEXIST; + } + rcu_read_unlock(); + + /* Already protected by RTNL lock */ + hlist_add_head_rcu(&link->hlnode, &mbim->link_list[LINK_HASH(if_id)]); + + return register_netdevice(ndev); + } + + static void mhi_mbim_dellink(void *ctxt, struct net_device *ndev, + struct list_head *head) + { + struct mhi_mbim_link *link = wwan_netdev_drvpriv(ndev); + + hlist_del_init_rcu(&link->hlnode); + synchronize_rcu(); + + unregister_netdevice_queue(ndev, head); + } + + static void mhi_mbim_setup(struct net_device *ndev) + { + ndev->header_ops = NULL; /* No header */ + ndev->type = ARPHRD_RAWIP; + ndev->needed_headroom = sizeof(struct mbim_tx_hdr); + ndev->hard_header_len = 0; + ndev->addr_len = 0; + ndev->flags = IFF_POINTOPOINT | IFF_NOARP; + ndev->netdev_ops = &mhi_mbim_ndo; + ndev->mtu = MHI_MBIM_DEFAULT_MTU; + ndev->min_mtu = ETH_MIN_MTU; + ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom; + ndev->tx_queue_len = 1000; + } + + static const struct wwan_ops mhi_mbim_wwan_ops = { + .priv_size = sizeof(struct mhi_mbim_link), + .setup = mhi_mbim_setup, + .newlink = mhi_mbim_newlink, + .dellink = mhi_mbim_dellink, + }; + + static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) + { + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + struct mhi_mbim_context *mbim; + int err; + + mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL); + if (!mbim) + return -ENOMEM; + + spin_lock_init(&mbim->tx_lock); + dev_set_drvdata(&mhi_dev->dev, mbim); + mbim->mdev = mhi_dev; + mbim->mru = mhi_dev->mhi_cntrl->mru ? mhi_dev->mhi_cntrl->mru : MHI_DEFAULT_MRU; + + INIT_DELAYED_WORK(&mbim->rx_refill, mhi_net_rx_refill_work); + + /* Start MHI channels */ - err = mhi_prepare_for_transfer(mhi_dev, 0); ++ err = mhi_prepare_for_transfer(mhi_dev); + if (err) + return err; + + /* Number of transfer descriptors determines size of the queue */ + mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); + + /* Register wwan link ops with MHI controller representing WWAN instance */ + return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0); + } + + static void mhi_mbim_remove(struct mhi_device *mhi_dev) + { + struct mhi_mbim_context *mbim = dev_get_drvdata(&mhi_dev->dev); + struct mhi_controller *cntrl = mhi_dev->mhi_cntrl; + + mhi_unprepare_from_transfer(mhi_dev); + cancel_delayed_work_sync(&mbim->rx_refill); + wwan_unregister_ops(&cntrl->mhi_dev->dev); + kfree_skb(mbim->skbagg_head); + dev_set_drvdata(&mhi_dev->dev, NULL); + } + + static const struct mhi_device_id mhi_mbim_id_table[] = { + /* Hardware accelerated data PATH (to modem IPA), MBIM protocol */ + { .chan = "IP_HW0_MBIM", .driver_data = 0 }, + {} + }; + MODULE_DEVICE_TABLE(mhi, mhi_mbim_id_table); + + static struct mhi_driver mhi_mbim_driver = { + .probe = mhi_mbim_probe, + .remove = mhi_mbim_remove, + .dl_xfer_cb = mhi_mbim_dl_callback, + .ul_xfer_cb = mhi_mbim_ul_callback, + .id_table = mhi_mbim_id_table, + .driver = { + .name = "mhi_wwan_mbim", + .owner = THIS_MODULE, + }, + }; + + module_mhi_driver(mhi_mbim_driver); + + MODULE_AUTHOR("Loic Poulain loic.poulain@linaro.org"); + MODULE_DESCRIPTION("Network/MBIM over MHI"); + MODULE_LICENSE("GPL v2"); diff --combined drivers/pci/pci.h index 78557d2c6612,2f52110cac97..1cce56c2aea0 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@@ -33,32 -33,10 +33,32 @@@ enum pci_mmap_api int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai, enum pci_mmap_api mmap_api);
-int pci_probe_reset_function(struct pci_dev *dev); +bool pci_reset_supported(struct pci_dev *dev); +void pci_init_reset_methods(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); int pci_bus_error_reset(struct pci_dev *dev);
+struct pci_cap_saved_data { + u16 cap_nr; + bool cap_extended; + unsigned int size; + u32 data[]; +}; + +struct pci_cap_saved_state { + struct hlist_node next; + struct pci_cap_saved_data cap; +}; + +void pci_allocate_cap_save_buffers(struct pci_dev *dev); +void pci_free_cap_save_buffers(struct pci_dev *dev); +int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size); +int pci_add_ext_cap_save_buffer(struct pci_dev *dev, + u16 cap, unsigned int size); +struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); +struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, + u16 cap); + #define PCI_PM_D2_DELAY 200 /* usec; see PCIe r4.0, sec 5.9.1 */ #define PCI_PM_D3HOT_WAIT 10 /* msec */ #define PCI_PM_D3COLD_WAIT 100 /* msec */ @@@ -122,6 -100,8 +122,6 @@@ void pci_pm_init(struct pci_dev *dev) void pci_ea_init(struct pci_dev *dev); void pci_msi_init(struct pci_dev *dev); void pci_msix_init(struct pci_dev *dev); -void pci_allocate_cap_save_buffers(struct pci_dev *dev); -void pci_free_cap_save_buffers(struct pci_dev *dev); bool pci_bridge_d3_possible(struct pci_dev *dev); void pci_bridge_d3_update(struct pci_dev *dev); void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev); @@@ -617,28 -597,20 +617,25 @@@ static inline void pcie_ecrc_get_policy
#ifdef CONFIG_PCIE_PTM void pci_ptm_init(struct pci_dev *dev); - int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); #else static inline void pci_ptm_init(struct pci_dev *dev) { } - static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) - { return -EINVAL; } #endif
struct pci_dev_reset_methods { u16 vendor; u16 device; - int (*reset)(struct pci_dev *dev, int probe); + int (*reset)(struct pci_dev *dev, bool probe); +}; + +struct pci_reset_fn_method { + int (*reset_fn)(struct pci_dev *pdev, bool probe); + char *name; };
#ifdef CONFIG_PCI_QUIRKS -int pci_dev_specific_reset(struct pci_dev *dev, int probe); +int pci_dev_specific_reset(struct pci_dev *dev, bool probe); #else -static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe) +static inline int pci_dev_specific_reset(struct pci_dev *dev, bool probe) { return -ENOTTY; } @@@ -726,15 -698,7 +723,15 @@@ static inline int pci_aer_raw_clear_sta #ifdef CONFIG_ACPI int pci_acpi_program_hp_params(struct pci_dev *dev); extern const struct attribute_group pci_dev_acpi_attr_group; +void pci_set_acpi_fwnode(struct pci_dev *dev); +int pci_dev_acpi_reset(struct pci_dev *dev, bool probe); #else +static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe) +{ + return -ENOTTY; +} + +static inline void pci_set_acpi_fwnode(struct pci_dev *dev) {} static inline int pci_acpi_program_hp_params(struct pci_dev *dev) { return -ENODEV; @@@ -745,6 -709,4 +742,6 @@@ extern const struct attribute_group aspm_ctrl_attr_group; #endif
+extern const struct attribute_group pci_dev_reset_method_attr_group; + #endif /* DRIVERS_PCI_H */ diff --combined drivers/pci/pcie/ptm.c index 4810faa67f52,8a4ad974c5ac..368a254e3124 --- a/drivers/pci/pcie/ptm.c +++ b/drivers/pci/pcie/ptm.c @@@ -60,8 -60,10 +60,8 @@@ void pci_save_ptm_state(struct pci_dev return;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM); - if (!save_state) { - pci_err(dev, "no suspend buffer for PTM\n"); + if (!save_state) return; - }
cap = (u16 *)&save_state->cap.data[0]; pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap); @@@ -202,3 -204,12 +202,12 @@@ int pci_enable_ptm(struct pci_dev *dev return 0; } EXPORT_SYMBOL(pci_enable_ptm); + + bool pcie_ptm_enabled(struct pci_dev *dev) + { + if (!dev) + return false; + + return dev->ptm_enabled; + } + EXPORT_SYMBOL(pcie_ptm_enabled); diff --combined drivers/s390/net/qeth_core_main.c index f96755a0a261,5b973f377504..41ca6273b750 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -57,8 -57,7 +57,7 @@@ struct qeth_dbf_info qeth_dbf[QETH_DBF_ }; EXPORT_SYMBOL_GPL(qeth_dbf);
- struct kmem_cache *qeth_core_header_cache; - EXPORT_SYMBOL_GPL(qeth_core_header_cache); + static struct kmem_cache *qeth_core_header_cache; static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev; @@@ -101,8 -100,6 +100,6 @@@ static const char *qeth_get_cardname(st return " OSD Express"; case QETH_CARD_TYPE_IQD: return " HiperSockets"; - case QETH_CARD_TYPE_OSN: - return " OSN QDIO"; case QETH_CARD_TYPE_OSM: return " OSM QDIO"; case QETH_CARD_TYPE_OSX: @@@ -157,8 -154,6 +154,6 @@@ const char *qeth_get_cardname_short(str } case QETH_CARD_TYPE_IQD: return "HiperSockets"; - case QETH_CARD_TYPE_OSN: - return "OSN"; case QETH_CARD_TYPE_OSM: return "OSM_1000"; case QETH_CARD_TYPE_OSX: @@@ -431,6 -426,13 +426,13 @@@ static enum iucv_tx_notify qeth_compute return n; }
+ static void qeth_put_cmd(struct qeth_cmd_buffer *iob) + { + if (refcount_dec_and_test(&iob->ref_count)) { + kfree(iob->data); + kfree(iob); + } + } static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, void *data) { @@@ -499,12 -501,11 +501,11 @@@ static void qeth_dequeue_cmd(struct qet spin_unlock_irq(&card->lock); }
- void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) + static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) { iob->rc = reason; complete(&iob->done); } - EXPORT_SYMBOL_GPL(qeth_notify_cmd);
static void qeth_flush_local_addrs4(struct qeth_card *card) { @@@ -781,10 -782,7 +782,7 @@@ static struct qeth_ipa_cmd *qeth_check_ QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA_REPLY(cmd)) { - if (cmd->hdr.command != IPA_CMD_SETCCID && - cmd->hdr.command != IPA_CMD_DELCCID && - cmd->hdr.command != IPA_CMD_MODCCID && - cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) + if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); return cmd; } @@@ -819,8 -817,6 +817,6 @@@ if (card->discipline->control_event_handler(card, cmd)) return cmd; return NULL; - case IPA_CMD_MODCCID: - return cmd; case IPA_CMD_REGISTER_LOCAL_ADDR: if (cmd->hdr.prot_version == QETH_PROT_IPV4) qeth_add_local_addrs4(card, &cmd->data.local_addrs4); @@@ -877,15 -873,6 +873,6 @@@ static int qeth_check_idx_response(stru return 0; }
- void qeth_put_cmd(struct qeth_cmd_buffer *iob) - { - if (refcount_dec_and_test(&iob->ref_count)) { - kfree(iob->data); - kfree(iob); - } - } - EXPORT_SYMBOL_GPL(qeth_put_cmd); - static void qeth_release_buffer_cb(struct qeth_card *card, struct qeth_cmd_buffer *iob, unsigned int data_length) @@@ -899,9 -886,9 +886,9 @@@ static void qeth_cancel_cmd(struct qeth qeth_put_cmd(iob); }
- struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, - unsigned int length, unsigned int ccws, - long timeout) + static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, + unsigned int length, + unsigned int ccws, long timeout) { struct qeth_cmd_buffer *iob;
@@@ -927,7 -914,6 +914,6 @@@ iob->length = length; return iob; } - EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
static void qeth_issue_next_read_cb(struct qeth_card *card, struct qeth_cmd_buffer *iob, @@@ -958,11 -944,6 +944,6 @@@ cmd = qeth_check_ipa_data(card, cmd); if (!cmd) goto out; - if (IS_OSN(card) && card->osn_info.assist_cb && - cmd->hdr.command != IPA_CMD_STARTLAN) { - card->osn_info.assist_cb(card->dev, cmd); - goto out; - } }
/* match against pending cmd requests */ @@@ -1835,7 -1816,7 +1816,7 @@@ static enum qeth_discipline_id qeth_enf { enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
- if (IS_OSM(card) || IS_OSN(card)) + if (IS_OSM(card)) disc = QETH_DISCIPLINE_LAYER2; else if (IS_VM_NIC(card)) disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : @@@ -1885,7 -1866,6 +1866,6 @@@ static void qeth_idx_init(struct qeth_c card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; break; case QETH_CARD_TYPE_OSD: - case QETH_CARD_TYPE_OSN: card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; break; default: @@@ -2442,9 -2422,7 +2422,7 @@@ static int qeth_ulp_enable_cb(struct qe
static u8 qeth_mpc_select_prot_type(struct qeth_card *card) { - if (IS_OSN(card)) - return QETH_PROT_OSN2; - return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; + return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3; }
static int qeth_ulp_enable(struct qeth_card *card) @@@ -3000,10 -2978,8 +2978,8 @@@ static void qeth_ipa_finalize_cmd(struc __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; }
- void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, - u16 cmd_length, - bool (*match)(struct qeth_cmd_buffer *iob, - struct qeth_cmd_buffer *reply)) + static void qeth_prepare_ipa_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, u16 cmd_length) { u8 prot_type = qeth_mpc_select_prot_type(card); u16 total_length = iob->length; @@@ -3011,7 -2987,6 +2987,6 @@@ qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, iob->data); iob->finalize = qeth_ipa_finalize_cmd; - iob->match = match;
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); @@@ -3022,7 -2997,6 +2997,6 @@@ &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); } - EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, struct qeth_cmd_buffer *reply) @@@ -3046,7 -3020,8 +3020,8 @@@ struct qeth_cmd_buffer *qeth_ipa_alloc_ if (!iob) return NULL;
- qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply); + qeth_prepare_ipa_cmd(card, iob, data_length); + iob->match = qeth_ipa_match_reply;
hdr = &__ipa_cmd(iob)->hdr; hdr->command = cmd_code; @@@ -3804,10 -3779,14 +3779,10 @@@ static void qeth_qdio_output_handler(st unsigned long card_ptr) { struct qeth_card *card = (struct qeth_card *) card_ptr; - struct net_device *dev = card->dev;
- QETH_CARD_TEXT(card, 6, "qdouhdl"); - if (qdio_error & QDIO_ERROR_FATAL) { - QETH_CARD_TEXT(card, 2, "achkcond"); - netif_tx_stop_all_queues(dev); - qeth_schedule_recovery(card); - } + QETH_CARD_TEXT(card, 2, "achkcond"); + netif_tx_stop_all_queues(card->dev); + qeth_schedule_recovery(card); }
/** @@@ -3890,7 -3869,8 +3865,8 @@@ static int qeth_get_elements_for_frags( * Returns the number of pages, and thus QDIO buffer elements, needed to map the * skb's data (both its linear part and paged fragments). */ - unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) + static unsigned int qeth_count_elements(struct sk_buff *skb, + unsigned int data_offset) { unsigned int elements = qeth_get_elements_for_frags(skb); addr_t end = (addr_t)skb->data + skb_headlen(skb); @@@ -3900,7 -3880,6 +3876,6 @@@ elements += qeth_get_elements_for_range(start, end); return elements; } - EXPORT_SYMBOL_GPL(qeth_count_elements);
#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ MAX_TCP_HEADER) @@@ -4188,10 -4167,11 +4163,11 @@@ static int __qeth_xmit(struct qeth_car return 0; }
- int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, unsigned int hd_len, - int elements_needed) + static int qeth_do_send_packet(struct qeth_card *card, + struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len, + unsigned int elements_needed) { unsigned int start_index = queue->next_buf_to_fill; struct qeth_qdio_out_buffer *buffer; @@@ -4271,7 -4251,6 +4247,6 @@@ out netif_tx_start_queue(txq); return rc; } - EXPORT_SYMBOL_GPL(qeth_do_send_packet);
static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len, struct sk_buff *skb, @@@ -4550,7 -4529,6 +4525,6 @@@ static int qeth_mdio_read(struct net_de case MII_BMCR: /* Basic mode control register */ rc = BMCR_FULLDPLX; if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && - (card->info.link_type != QETH_LINK_TYPE_OSN) && (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) rc |= BMCR_SPEED100; @@@ -5262,10 -5240,6 +5236,6 @@@ static struct ccw_device_id qeth_ids[] .driver_info = QETH_CARD_TYPE_OSD}, {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), .driver_info = QETH_CARD_TYPE_IQD}, - #ifdef CONFIG_QETH_OSN - {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), - .driver_info = QETH_CARD_TYPE_OSN}, - #endif {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), .driver_info = QETH_CARD_TYPE_OSM}, #ifdef CONFIG_QETH_OSX @@@ -5624,14 -5598,6 +5594,6 @@@ static void qeth_receive_skb(struct qet bool is_cso;
switch (hdr->hdr.l2.id) { - case QETH_HEADER_TYPE_OSN: - skb_push(skb, sizeof(*hdr)); - skb_copy_to_linear_data(skb, hdr, sizeof(*hdr)); - QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); - QETH_CARD_STAT_INC(card, rx_packets); - - card->osn_info.data_cb(skb); - return; #if IS_ENABLED(CONFIG_QETH_L3) case QETH_HEADER_TYPE_LAYER3: qeth_l3_rebuild_skb(card, skb, hdr); @@@ -5746,16 -5712,6 +5708,6 @@@ next_packet linear_len = sizeof(struct iphdr); headroom = ETH_HLEN; break; - case QETH_HEADER_TYPE_OSN: - skb_len = hdr->hdr.osn.pdu_length; - if (!IS_OSN(card)) { - QETH_CARD_STAT_INC(card, rx_dropped_notsupp); - goto walk_packet; - } - - linear_len = skb_len; - headroom = sizeof(struct qeth_hdr); - break; default: if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) QETH_CARD_STAT_INC(card, rx_frame_errors); @@@ -5773,8 -5729,7 +5725,7 @@@
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || (skb_len > READ_ONCE(priv->rx_copybreak) && - !atomic_read(&card->force_alloc_skb) && - !IS_OSN(card)); + !atomic_read(&card->force_alloc_skb));
if (use_rx_sg) { /* QETH_CQ_ENABLED only: */ @@@ -6331,14 -6286,9 +6282,9 @@@ void qeth_remove_discipline(struct qeth card->discipline = NULL; }
- const struct device_type qeth_generic_devtype = { + static const struct device_type qeth_generic_devtype = { .name = "qeth_generic", }; - EXPORT_SYMBOL_GPL(qeth_generic_devtype); - - static const struct device_type qeth_osn_devtype = { - .name = "qeth_osn", - };
#define DBF_NAME_LEN 20
@@@ -6421,10 -6371,6 +6367,6 @@@ static struct net_device *qeth_alloc_ne case QETH_CARD_TYPE_OSM: dev = alloc_etherdev(sizeof(*priv)); break; - case QETH_CARD_TYPE_OSN: - dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN, - ether_setup); - break; default: dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); } @@@ -6438,23 -6384,19 +6380,19 @@@
dev->ml_priv = card; dev->watchdog_timeo = QETH_TX_TIMEOUT; - dev->min_mtu = IS_OSN(card) ? 64 : 576; + dev->min_mtu = 576; /* initialized when device first goes online: */ dev->max_mtu = 0; dev->mtu = 0; SET_NETDEV_DEV(dev, &card->gdev->dev); netif_carrier_off(dev);
- if (IS_OSN(card)) { - dev->ethtool_ops = &qeth_osn_ethtool_ops; - } else { - dev->ethtool_ops = &qeth_ethtool_ops; - dev->priv_flags &= ~IFF_TX_SKB_SHARING; - dev->hw_features |= NETIF_F_SG; - dev->vlan_features |= NETIF_F_SG; - if (IS_IQD(card)) - dev->features |= NETIF_F_SG; - } + dev->ethtool_ops = &qeth_ethtool_ops; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->hw_features |= NETIF_F_SG; + dev->vlan_features |= NETIF_F_SG; + if (IS_IQD(card)) + dev->features |= NETIF_F_SG;
return dev; } @@@ -6517,10 -6459,7 +6455,7 @@@ static int qeth_core_probe_device(struc if (rc) goto err_chp_desc;
- if (IS_OSN(card)) - gdev->dev.groups = qeth_osn_dev_groups; - else - gdev->dev.groups = qeth_dev_groups; + gdev->dev.groups = qeth_dev_groups;
enforced_disc = qeth_enforce_discipline(card); switch (enforced_disc) { @@@ -6534,8 -6473,6 +6469,6 @@@ if (rc) goto err_setup_disc;
- gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : - card->discipline->devtype; break; }
@@@ -6653,36 -6590,42 +6586,42 @@@ static struct ccwgroup_driver qeth_core .shutdown = qeth_core_shutdown, };
- struct qeth_card *qeth_get_card_by_busid(char *bus_id) - { - struct ccwgroup_device *gdev; - struct qeth_card *card; - - gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); - if (!gdev) - return NULL; - - card = dev_get_drvdata(&gdev->dev); - put_device(&gdev->dev); - return card; - } - EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); - - int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) { struct qeth_card *card = dev->ml_priv; - struct mii_ioctl_data *mii_data; int rc = 0;
switch (cmd) { case SIOC_QETH_ADP_SET_SNMP_CONTROL: - rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); + rc = qeth_snmp_command(card, data); break; case SIOC_QETH_GET_CARD_TYPE: if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && !IS_VM_NIC(card)) return 1; return 0; + case SIOC_QETH_QUERY_OAT: + rc = qeth_query_oat_command(card, data); + break; + default: + if (card->discipline->do_ioctl) + rc = card->discipline->do_ioctl(dev, rq, data, cmd); + else + rc = -EOPNOTSUPP; + } + if (rc) + QETH_CARD_TEXT_(card, 2, "ioce%x", rc); + return rc; + } + EXPORT_SYMBOL_GPL(qeth_siocdevprivate); + + int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) + { + struct qeth_card *card = dev->ml_priv; + struct mii_ioctl_data *mii_data; + int rc = 0; + + switch (cmd) { case SIOCGMIIPHY: mii_data = if_mii(rq); mii_data->phy_id = 0; @@@ -6695,14 -6638,8 +6634,8 @@@ mii_data->val_out = qeth_mdio_read(dev, mii_data->phy_id, mii_data->reg_num); break; - case SIOC_QETH_QUERY_OAT: - rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); - break; default: - if (card->discipline->do_ioctl) - rc = card->discipline->do_ioctl(dev, rq, cmd); - else - rc = -EOPNOTSUPP; + return -EOPNOTSUPP; } if (rc) QETH_CARD_TEXT_(card, 2, "ioce%x", rc); diff --combined include/linux/filter.h index c1711c9f9439,1797e8506929..a8108890629b --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -5,6 -5,8 +5,6 @@@ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__
-#include <stdarg.h> - #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/compat.h> @@@ -572,7 -574,8 +572,8 @@@ struct bpf_prog kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ - call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_func_ip:1; /* Do we call get_func_ip() */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@@ -773,6 -776,10 +774,10 @@@ static inline u32 bpf_prog_run_clear_cb
DECLARE_BPF_DISPATCHER(xdp)
+ DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); + + u32 xdp_master_redirect(struct xdp_buff *xdp); + static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) { @@@ -780,7 -787,14 +785,14 @@@ * under local_bh_disable(), which provides the needed RCU protection * for accessing map entries. */ - return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + u32 act = __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + + if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { + if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) + act = xdp_master_redirect(xdp); + } + + return act; }
void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); diff --combined include/linux/memcontrol.h index 24797929d8a1,f0ee30881ca9..20151c4f1e0e --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@@ -612,15 -612,12 +612,15 @@@ static inline bool mem_cgroup_disabled( return !cgroup_subsys_enabled(memory_cgrp_subsys); }
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { + *min = *low = 0; + if (mem_cgroup_disabled()) - return 0; + return;
/* * There is no reclaim protection applied to a targeted reclaim. @@@ -656,10 -653,13 +656,10 @@@ * */ if (root == memcg) - return 0; - - if (in_low_reclaim) - return READ_ONCE(memcg->memory.emin); + return;
- return max(READ_ONCE(memcg->memory.emin), - READ_ONCE(memcg->memory.elow)); + *min = READ_ONCE(memcg->memory.emin); + *low = READ_ONCE(memcg->memory.elow); }
void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@@ -1147,12 -1147,11 +1147,12 @@@ static inline void memcg_memory_event_m { }
-static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { - return 0; + *min = *low = 0; }
static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@@ -1582,7 -1581,8 +1582,8 @@@ static inline void mem_cgroup_flush_for #endif /* CONFIG_CGROUP_WRITEBACK */
struct sock; - bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); + bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, + gfp_t gfp_mask); void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); #ifdef CONFIG_MEMCG extern struct static_key_false memcg_sockets_enabled_key; diff --combined include/linux/mhi.h index 944aa3aa3035,c493a80cb453..beb918328eef --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@@ -356,6 -356,7 +356,7 @@@ struct mhi_controller_config * @fbc_download: MHI host needs to do complete image transfer (optional) * @wake_set: Device wakeup set flag * @irq_flags: irq flags passed to request_irq (optional) + * @mru: the default MRU for the MHI device * * Fields marked as (required) need to be populated by the controller driver * before calling mhi_register_controller(). For the fields marked as (optional) @@@ -448,6 -449,7 +449,7 @@@ struct mhi_controller bool fbc_download; bool wake_set; unsigned long irq_flags; + u32 mru; };
/** @@@ -719,8 -721,13 +721,8 @@@ void mhi_device_put(struct mhi_device * * host and device execution environments match and * channels are in a DISABLED state. * @mhi_dev: Device associated with the channels - * @flags: MHI channel flags */ -int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, - unsigned int flags); - -/* Automatically allocate and queue inbound buffers */ -#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0) +int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/** * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer. diff --combined include/linux/pci.h index 7ed95f11c6bd,947430637cac..76e3a9254a3d --- a/include/linux/pci.h +++ b/include/linux/pci.h @@@ -49,12 -49,6 +49,12 @@@ PCI_STATUS_SIG_TARGET_ABORT | \ PCI_STATUS_PARITY)
+/* Number of reset methods used in pci_reset_fn_methods array in pci.c */ +#define PCI_NUM_RESET_METHODS 7 + +#define PCI_RESET_PROBE true +#define PCI_RESET_DO_RESET false + /* * The PCI interface treats multi-function devices as independent * devices. The slot/function address of each device is encoded @@@ -294,14 -288,21 +294,14 @@@ enum pci_bus_speed enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
-struct pci_cap_saved_data { - u16 cap_nr; - bool cap_extended; - unsigned int size; - u32 data[]; -}; - -struct pci_cap_saved_state { - struct hlist_node next; - struct pci_cap_saved_data cap; +struct pci_vpd { + struct mutex lock; + unsigned int len; + u8 cap; };
struct irq_affinity; struct pcie_link_state; -struct pci_vpd; struct pci_sriov; struct pci_p2pdma; struct rcec_ea; @@@ -332,7 -333,6 +332,7 @@@ struct pci_dev struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ struct pci_dev *rcec; /* Associated RCEC device */ #endif + u32 devcap; /* PCIe Device Capabilities */ u8 pcie_cap; /* PCIe capability offset */ u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ @@@ -427,6 -427,7 +427,6 @@@ unsigned int state_saved:1; unsigned int is_physfn:1; unsigned int is_virtfn:1; - unsigned int reset_fn:1; unsigned int is_hotplug_bridge:1; unsigned int shpc_managed:1; /* SHPC owned by shpchp */ unsigned int is_thunderbolt:1; /* Thunderbolt controller */ @@@ -472,7 -473,7 +472,7 @@@ #ifdef CONFIG_PCI_MSI const struct attribute_group **msi_irq_groups; #endif - struct pci_vpd *vpd; + struct pci_vpd vpd; #ifdef CONFIG_PCIE_DPC u16 dpc_cap; unsigned int dpc_rp_extensions:1; @@@ -504,9 -505,6 +504,9 @@@ char *driver_override; /* Driver name to force a match */
unsigned long priv_flags; /* Private flags for the PCI driver */ + + /* These methods index pci_reset_fn_methods[] */ + u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ };
static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@@ -528,16 -526,6 +528,16 @@@ static inline int pci_channel_offline(s return (pdev->error_state != pci_channel_io_normal); }
+/* + * Currently in ACPI spec, for each PCI host bridge, PCI Segment + * Group number is limited to a 16-bit value, therefore (int)-1 is + * not a valid PCI domain number, and can be used as a sentinel + * value indicating ->domain_nr is not set by the driver (and + * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with + * pci_bus_find_domain_nr()). + */ +#define PCI_DOMAIN_NR_NOT_SET (-1) + struct pci_host_bridge { struct device dev; struct pci_bus *bus; /* Root bus */ @@@ -545,7 -533,6 +545,7 @@@ struct pci_ops *child_ops; void *sysdata; int busnr; + int domain_nr; struct list_head windows; /* resource_entry */ struct list_head dma_ranges; /* dma ranges resource list */ u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ @@@ -1241,7 -1228,7 +1241,7 @@@ u32 pcie_bandwidth_available(struct pci enum pci_bus_speed *speed, enum pcie_link_width *width); void pcie_print_link_status(struct pci_dev *dev); -bool pcie_has_flr(struct pci_dev *dev); +int pcie_reset_flr(struct pci_dev *dev, bool probe); int pcie_flr(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); @@@ -1291,6 -1278,12 +1291,6 @@@ int pci_load_saved_state(struct pci_de struct pci_saved_state *state); int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); -struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); -struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, - u16 cap); -int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size); -int pci_add_ext_cap_save_buffer(struct pci_dev *dev, - u16 cap, unsigned int size); int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); @@@ -1627,6 -1620,16 +1627,16 @@@ static inline bool pci_aer_available(vo
bool pci_ats_disabled(void);
+ #ifdef CONFIG_PCIE_PTM + int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); + bool pcie_ptm_enabled(struct pci_dev *dev); + #else + static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) + { return -EINVAL; } + static inline bool pcie_ptm_enabled(struct pci_dev *dev) + { return false; } + #endif + void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); @@@ -1747,9 -1750,8 +1757,9 @@@ static inline void pci_disable_device(s static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; } static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY; } -static inline int __pci_register_driver(struct pci_driver *drv, - struct module *owner) +static inline int __must_check __pci_register_driver(struct pci_driver *drv, + struct module *owner, + const char *mod_name) { return 0; } static inline int pci_register_driver(struct pci_driver *drv) { return 0; } @@@ -1889,7 -1891,9 +1899,7 @@@ int pci_iobar_pfn(struct pci_dev *pdev #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) #define pci_resource_len(dev,bar) \ - ((pci_resource_start((dev), (bar)) == 0 && \ - pci_resource_end((dev), (bar)) == \ - pci_resource_start((dev), (bar))) ? 0 : \ + ((pci_resource_end((dev), (bar)) == 0) ? 0 : \ \ (pci_resource_end((dev), (bar)) - \ pci_resource_start((dev), (bar)) + 1)) @@@ -2331,15 -2335,6 +2341,15 @@@ static inline u8 pci_vpd_info_field_siz return info_field[2]; }
+/** + * pci_vpd_alloc - Allocate buffer and read VPD into it + * @dev: PCI device + * @size: pointer to field where VPD length is returned + * + * Returns pointer to allocated buffer or an ERR_PTR in case of failure + */ +void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size); + /** * pci_vpd_find_tag - Locates the Resource Data Type tag provided * @buf: Pointer to buffered vpd data @@@ -2364,28 -2359,6 +2374,28 @@@ int pci_vpd_find_tag(const u8 *buf, uns int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, unsigned int len, const char *kw);
+/** + * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section + * @buf: Pointer to buffered VPD data + * @len: The length of the buffer area in which to search + * @kw: The keyword to search for + * @size: Pointer to field where length of found keyword data is returned + * + * Returns the index of the information field keyword data or -ENOENT if + * not found. + */ +int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, + const char *kw, unsigned int *size); + +/** + * pci_vpd_check_csum - Check VPD checksum + * @buf: Pointer to buffered VPD data + * @len: VPD size + * + * Returns 1 if VPD has no checksum, otherwise 0 or an errno + */ +int pci_vpd_check_csum(const void *buf, unsigned int len); + /* PCI <-> OF binding helpers */ #ifdef CONFIG_OF struct device_node; diff --combined kernel/bpf/verifier.c index 49f07e2bf23b,e5f2b23bb7c9..9134aedfdb7d --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@@ -255,6 -255,7 +255,7 @@@ struct bpf_call_arg_meta int mem_size; u64 msize_max_value; int ref_obj_id; + int map_uid; int func_id; struct btf *btf; u32 btf_id; @@@ -734,6 -735,10 +735,10 @@@ static void print_verifier_state(struc if (state->refs[i].id) verbose(env, ",%d", state->refs[i].id); } + if (state->in_callback_fn) + verbose(env, " cb"); + if (state->in_async_callback_fn) + verbose(env, " async_cb"); verbose(env, "\n"); }
@@@ -1135,6 -1140,10 +1140,10 @@@ static void mark_ptr_not_null_reg(struc if (map->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = map->inner_map_meta; + /* transfer reg's id which is unique for every map_lookup_elem + * as UID of the inner map. + */ + reg->map_uid = reg->id; } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { reg->type = PTR_TO_XDP_SOCK; } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || @@@ -1522,6 -1531,54 +1531,54 @@@ static void init_func_state(struct bpf_ init_reg_state(env, state); }
+ /* Similar to push_stack(), but for async callbacks */ + static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx, + int subprog) + { + struct bpf_verifier_stack_elem *elem; + struct bpf_func_state *frame; + + elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); + if (!elem) + goto err; + + elem->insn_idx = insn_idx; + elem->prev_insn_idx = prev_insn_idx; + elem->next = env->head; + elem->log_pos = env->log.len_used; + env->head = elem; + env->stack_size++; + if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { + verbose(env, + "The sequence of %d jumps is too complex for async cb.\n", + env->stack_size); + goto err; + } + /* Unlike push_stack() do not copy_verifier_state(). + * The caller state doesn't matter. + * This is async callback. It starts in a fresh stack. + * Initialize it similar to do_check_common(). + */ + elem->st.branches = 1; + frame = kzalloc(sizeof(*frame), GFP_KERNEL); + if (!frame) + goto err; + init_func_state(env, frame, + BPF_MAIN_FUNC /* callsite */, + 0 /* frameno within this callchain */, + subprog /* subprog number within this prog */); + elem->st.frame[0] = frame; + return &elem->st; + err: + free_verifier_state(env->cur_state, true); + env->cur_state = NULL; + /* pop all elements and return */ + while (!pop_stack(env, NULL, NULL, false)); + return NULL; + } + + enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ @@@ -3217,6 -3274,15 +3274,15 @@@ static int check_map_access(struct bpf_ return -EACCES; } } + if (map_value_has_timer(map)) { + u32 t = map->timer_off; + + if (reg->smin_value + off < t + sizeof(struct bpf_timer) && + t < reg->umax_value + off + size) { + verbose(env, "bpf_timer cannot be accessed directly by load/store\n"); + return -EACCES; + } + } return err; }
@@@ -3619,6 -3685,8 +3685,8 @@@ process_func continue_func: subprog_end = subprog[idx + 1].start; for (; i < subprog_end; i++) { + int next_insn; + if (!bpf_pseudo_call(insn + i) && !bpf_pseudo_func(insn + i)) continue; /* remember insn and function to return to */ @@@ -3626,13 -3694,22 +3694,22 @@@ ret_prog[frame] = idx;
/* find the callee */ - i = i + insn[i].imm + 1; - idx = find_subprog(env, i); + next_insn = i + insn[i].imm + 1; + idx = find_subprog(env, next_insn); if (idx < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", - i); + next_insn); return -EFAULT; } + if (subprog[idx].is_async_cb) { + if (subprog[idx].has_tail_call) { + verbose(env, "verifier bug. subprog has tail_call and async cb\n"); + return -EFAULT; + } + /* async callbacks don't increase bpf prog stack size */ + continue; + } + i = next_insn;
if (subprog[idx].has_tail_call) tail_call_reachable = true; @@@ -4634,6 -4711,54 +4711,54 @@@ static int process_spin_lock(struct bpf return 0; }
+ static int process_timer_func(struct bpf_verifier_env *env, int regno, + struct bpf_call_arg_meta *meta) + { + struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; + bool is_const = tnum_is_const(reg->var_off); + struct bpf_map *map = reg->map_ptr; + u64 val = reg->var_off.value; + + if (!is_const) { + verbose(env, + "R%d doesn't have constant offset. bpf_timer has to be at the constant offset\n", + regno); + return -EINVAL; + } + if (!map->btf) { + verbose(env, "map '%s' has to have BTF in order to use bpf_timer\n", + map->name); + return -EINVAL; + } + if (!map_value_has_timer(map)) { + if (map->timer_off == -E2BIG) + verbose(env, + "map '%s' has more than one 'struct bpf_timer'\n", + map->name); + else if (map->timer_off == -ENOENT) + verbose(env, + "map '%s' doesn't have 'struct bpf_timer'\n", + map->name); + else + verbose(env, + "map '%s' is not a struct type or bpf_timer is mangled\n", + map->name); + return -EINVAL; + } + if (map->timer_off != val + reg->off) { + verbose(env, "off %lld doesn't point to 'struct bpf_timer' that is at %d\n", + val + reg->off, map->timer_off); + return -EINVAL; + } + if (meta->map_ptr) { + verbose(env, "verifier bug. Two map pointers in a timer helper\n"); + return -EFAULT; + } + meta->map_uid = reg->map_uid; + meta->map_ptr = map; + return 0; + } + static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || @@@ -4766,6 -4891,7 +4891,7 @@@ static const struct bpf_reg_types percp static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } }; static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } }; static const struct bpf_reg_types const_str_ptr_types = { .types = { PTR_TO_MAP_VALUE } }; + static const struct bpf_reg_types timer_types = { .types = { PTR_TO_MAP_VALUE } };
static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_MAP_KEY] = &map_key_value_types, @@@ -4797,6 -4923,7 +4923,7 @@@ [ARG_PTR_TO_FUNC] = &func_ptr_types, [ARG_PTR_TO_STACK_OR_NULL] = &stack_ptr_types, [ARG_PTR_TO_CONST_STR] = &const_str_ptr_types, + [ARG_PTR_TO_TIMER] = &timer_types, };
static int check_reg_type(struct bpf_verifier_env *env, u32 regno, @@@ -4926,7 -5053,29 +5053,29 @@@ skip_type_check
if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ + if (meta->map_ptr) { + /* Use map_uid (which is unique id of inner map) to reject: + * inner_map1 = bpf_map_lookup_elem(outer_map, key1) + * inner_map2 = bpf_map_lookup_elem(outer_map, key2) + * if (inner_map1 && inner_map2) { + * timer = bpf_map_lookup_elem(inner_map1); + * if (timer) + * // mismatch would have been allowed + * bpf_timer_init(timer, inner_map2); + * } + * + * Comparing map_ptr is enough to distinguish normal and outer maps. + */ + if (meta->map_ptr != reg->map_ptr || + meta->map_uid != reg->map_uid) { + verbose(env, + "timer pointer in R1 map_uid=%d doesn't match map pointer in R2 map_uid=%d\n", + meta->map_uid, reg->map_uid); + return -EINVAL; + } + } meta->map_ptr = reg->map_ptr; + meta->map_uid = reg->map_uid; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within @@@ -4978,6 -5127,9 +5127,9 @@@ verbose(env, "verifier internal error\n"); return -EFAULT; } + } else if (arg_type == ARG_PTR_TO_TIMER) { + if (process_timer_func(env, regno, meta)) + return -EACCES; } else if (arg_type == ARG_PTR_TO_FUNC) { meta->subprogno = reg->subprogno; } else if (arg_type_is_mem_ptr(arg_type)) { @@@ -5150,6 -5302,8 +5302,6 @@@ static int check_map_func_compatibility case BPF_MAP_TYPE_RINGBUF: if (func_id != BPF_FUNC_ringbuf_output && func_id != BPF_FUNC_ringbuf_reserve && - func_id != BPF_FUNC_ringbuf_submit && - func_id != BPF_FUNC_ringbuf_discard && func_id != BPF_FUNC_ringbuf_query) goto error; break; @@@ -5258,12 -5412,6 +5410,12 @@@ if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; + case BPF_FUNC_ringbuf_output: + case BPF_FUNC_ringbuf_reserve: + case BPF_FUNC_ringbuf_query: + if (map->map_type != BPF_MAP_TYPE_RINGBUF) + goto error; + break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; @@@ -5597,6 -5745,31 +5749,31 @@@ static int __check_func_call(struct bpf } }
+ if (insn->code == (BPF_JMP | BPF_CALL) && + insn->imm == BPF_FUNC_timer_set_callback) { + struct bpf_verifier_state *async_cb; + + /* there is no real recursion here. timer callbacks are async */ + env->subprog_info[subprog].is_async_cb = true; + async_cb = push_async_cb(env, env->subprog_info[subprog].start, + *insn_idx, subprog); + if (!async_cb) + return -EFAULT; + callee = async_cb->frame[0]; + callee->async_entry_cnt = caller->async_entry_cnt + 1; + + /* Convert bpf_timer_set_callback() args into timer callback args */ + err = set_callee_state_cb(env, caller, callee, *insn_idx); + if (err) + return err; + + clear_caller_saved_regs(env, caller->regs); + mark_reg_unknown(env, caller->regs, BPF_REG_0); + caller->regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; + /* continue with next insn after call */ + return 0; + } + callee = kzalloc(sizeof(*callee), GFP_KERNEL); if (!callee) return -ENOMEM; @@@ -5724,6 -5897,35 +5901,35 @@@ static int set_map_elem_callback_state( return 0; }
+ static int set_timer_callback_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, + int insn_idx) + { + struct bpf_map *map_ptr = caller->regs[BPF_REG_1].map_ptr; + + /* bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn); + * callback_fn(struct bpf_map *map, void *key, void *value); + */ + callee->regs[BPF_REG_1].type = CONST_PTR_TO_MAP; + __mark_reg_known_zero(&callee->regs[BPF_REG_1]); + callee->regs[BPF_REG_1].map_ptr = map_ptr; + + callee->regs[BPF_REG_2].type = PTR_TO_MAP_KEY; + __mark_reg_known_zero(&callee->regs[BPF_REG_2]); + callee->regs[BPF_REG_2].map_ptr = map_ptr; + + callee->regs[BPF_REG_3].type = PTR_TO_MAP_VALUE; + __mark_reg_known_zero(&callee->regs[BPF_REG_3]); + callee->regs[BPF_REG_3].map_ptr = map_ptr; + + /* unused */ + __mark_reg_not_init(env, &callee->regs[BPF_REG_4]); + __mark_reg_not_init(env, &callee->regs[BPF_REG_5]); + callee->in_async_callback_fn = true; + return 0; + } + static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; @@@ -5937,6 -6139,29 +6143,29 @@@ static int check_bpf_snprintf_call(stru return err; }
+ static int check_get_func_ip(struct bpf_verifier_env *env) + { + enum bpf_attach_type eatype = env->prog->expected_attach_type; + enum bpf_prog_type type = resolve_prog_type(env->prog); + int func_id = BPF_FUNC_get_func_ip; + + if (type == BPF_PROG_TYPE_TRACING) { + if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT && + eatype != BPF_MODIFY_RETURN) { + verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", + func_id_name(func_id), func_id); + return -ENOTSUPP; + } + return 0; + } else if (type == BPF_PROG_TYPE_KPROBE) { + return 0; + } + + verbose(env, "func %s#%d not supported for program type %d\n", + func_id_name(func_id), func_id, type); + return -ENOTSUPP; + } + static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx_p) { @@@ -6051,6 -6276,13 +6280,13 @@@ return -EINVAL; }
+ if (func_id == BPF_FUNC_timer_set_callback) { + err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, + set_timer_callback_state); + if (err < 0) + return -EINVAL; + } + if (func_id == BPF_FUNC_snprintf) { err = check_bpf_snprintf_call(env, regs); if (err < 0) @@@ -6086,6 -6318,7 +6322,7 @@@ return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; + regs[BPF_REG_0].map_uid = meta.map_uid; if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; if (map_value_has_spin_lock(meta.map_ptr)) @@@ -6207,6 -6440,12 +6444,12 @@@ if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) env->prog->call_get_stack = true;
+ if (func_id == BPF_FUNC_get_func_ip) { + if (check_get_func_ip(env)) + return -ENOTSUPP; + env->prog->call_get_func_ip = true; + } + if (changes_data) clear_all_pkt_pointers(env); return 0; @@@ -9087,7 -9326,8 +9330,8 @@@ static int check_return_code(struct bpf struct tnum range = tnum_range(0, 1); enum bpf_prog_type prog_type = resolve_prog_type(env->prog); int err; - const bool is_subprog = env->cur_state->frame[0]->subprogno; + struct bpf_func_state *frame = env->cur_state->frame[0]; + const bool is_subprog = frame->subprogno;
/* LSM and struct_ops func-ptr's return type could be "void" */ if (!is_subprog && @@@ -9112,6 -9352,22 +9356,22 @@@ }
reg = cur_regs(env) + BPF_REG_0; + + if (frame->in_async_callback_fn) { + /* enforce return zero from async callbacks like timer */ + if (reg->type != SCALAR_VALUE) { + verbose(env, "In async callback the register R0 is not a known value (%s)\n", + reg_type_str[reg->type]); + return -EINVAL; + } + + if (!tnum_in(tnum_const(0), reg->var_off)) { + verbose_invalid_scalar(env, reg, &range, "async callback", "R0"); + return -EINVAL; + } + return 0; + } + if (is_subprog) { if (reg->type != SCALAR_VALUE) { verbose(env, "At subprogram exit the register R0 is not a scalar value (%s)\n", @@@ -9326,8 -9582,12 +9586,12 @@@ static int visit_func_call_insn(int t, init_explored_state(env, t + 1); if (visit_callee) { init_explored_state(env, t); - ret = push_insn(t, t + insns[t].imm + 1, BRANCH, - env, false); + ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env, + /* It's ok to allow recursion from CFG point of + * view. __check_func_call() will do the actual + * check. + */ + bpf_pseudo_func(insns + t)); } return ret; } @@@ -9355,6 -9615,13 +9619,13 @@@ static int visit_insn(int t, int insn_c return DONE_EXPLORING;
case BPF_CALL: + if (insns[t].imm == BPF_FUNC_timer_set_callback) + /* Mark this call insn to trigger is_state_visited() check + * before call itself is processed by __check_func_call(). + * Otherwise new async state will be pushed for further + * exploration. + */ + init_explored_state(env, t); return visit_func_call_insn(t, insn_cnt, insns, env, insns[t].src_reg == BPF_PSEUDO_CALL);
@@@ -10363,9 -10630,25 +10634,25 @@@ static int is_state_visited(struct bpf_ states_cnt++; if (sl->state.insn_idx != insn_idx) goto next; + if (sl->state.branches) { - if (states_maybe_looping(&sl->state, cur) && - states_equal(env, &sl->state, cur)) { + struct bpf_func_state *frame = sl->state.frame[sl->state.curframe]; + + if (frame->in_async_callback_fn && + frame->async_entry_cnt != cur->frame[cur->curframe]->async_entry_cnt) { + /* Different async_entry_cnt means that the verifier is + * processing another entry into async callback. + * Seeing the same state is not an indication of infinite + * loop or infinite recursion. + * But finding the same state doesn't mean that it's safe + * to stop processing the current state. The previous state + * hasn't yet reached bpf_exit, since state.branches > 0. + * Checking in_async_callback_fn alone is not enough either. + * Since the verifier still needs to catch infinite loops + * inside async callbacks. + */ + } else if (states_maybe_looping(&sl->state, cur) && + states_equal(env, &sl->state, cur)) { verbose_linfo(env, insn_idx, "; "); verbose(env, "infinite loop detected at insn %d\n", insn_idx); return -EINVAL; @@@ -11414,10 -11697,11 +11701,11 @@@ static void convert_pseudo_ld_imm64(str * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ - static int adjust_insn_aux_data(struct bpf_verifier_env *env, - struct bpf_prog *new_prog, u32 off, u32 cnt) + static void adjust_insn_aux_data(struct bpf_verifier_env *env, + struct bpf_insn_aux_data *new_data, + struct bpf_prog *new_prog, u32 off, u32 cnt) { - struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; + struct bpf_insn_aux_data *old_data = env->insn_aux_data; struct bpf_insn *insn = new_prog->insnsi; u32 old_seen = old_data[off].seen; u32 prog_len; @@@ -11430,12 -11714,9 +11718,9 @@@ old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
if (cnt == 1) - return 0; + return; prog_len = new_prog->len; - new_data = vzalloc(array_size(prog_len, - sizeof(struct bpf_insn_aux_data))); - if (!new_data) - return -ENOMEM; + memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); @@@ -11446,7 -11727,6 +11731,6 @@@ } env->insn_aux_data = new_data; vfree(old_data); - return 0; }
static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) @@@ -11481,6 -11761,14 +11765,14 @@@ static struct bpf_prog *bpf_patch_insn_ const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; + struct bpf_insn_aux_data *new_data = NULL; + + if (len > 1) { + new_data = vzalloc(array_size(env->prog->len + len - 1, + sizeof(struct bpf_insn_aux_data))); + if (!new_data) + return NULL; + }
new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (IS_ERR(new_prog)) { @@@ -11488,10 -11776,10 +11780,10 @@@ verbose(env, "insn %d cannot be patched due to 16-bit range\n", env->insn_aux_data[off].orig_idx); + vfree(new_data); return NULL; } - if (adjust_insn_aux_data(env, new_prog, off, len)) - return NULL; + adjust_insn_aux_data(env, new_data, new_prog, off, len); adjust_subprog_starts(env, off, len); adjust_poke_descs(new_prog, off, len); return new_prog; @@@ -12342,6 -12630,7 +12634,7 @@@ static int do_misc_fixups(struct bpf_ve { struct bpf_prog *prog = env->prog; bool expect_blinding = bpf_jit_blinding_enabled(prog); + enum bpf_prog_type prog_type = resolve_prog_type(prog); struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; @@@ -12559,6 -12848,39 +12852,39 @@@ continue; }
+ if (insn->imm == BPF_FUNC_timer_set_callback) { + /* The verifier will process callback_fn as many times as necessary + * with different maps and the register states prepared by + * set_timer_callback_state will be accurate. + * + * The following use case is valid: + * map1 is shared by prog1, prog2, prog3. + * prog1 calls bpf_timer_init for some map1 elements + * prog2 calls bpf_timer_set_callback for some map1 elements. + * Those that were not bpf_timer_init-ed will return -EINVAL. + * prog3 calls bpf_timer_start for some map1 elements. + * Those that were not both bpf_timer_init-ed and + * bpf_timer_set_callback-ed will return -EINVAL. + */ + struct bpf_insn ld_addrs[2] = { + BPF_LD_IMM64(BPF_REG_3, (long)prog->aux), + }; + + insn_buf[0] = ld_addrs[0]; + insn_buf[1] = ld_addrs[1]; + insn_buf[2] = *insn; + cnt = 3; + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + goto patch_call_imm; + } + /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. @@@ -12675,6 -12997,21 +13001,21 @@@ patch_map_ops_generic continue; }
+ /* Implement bpf_get_func_ip inline. */ + if (prog_type == BPF_PROG_TYPE_TRACING && + insn->imm == BPF_FUNC_get_func_ip) { + /* Load IP address from ctx - 8 */ + insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); + + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); + if (!new_prog) + return -ENOMEM; + + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + continue; + } + patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed diff --combined kernel/fork.c index 44f4c2d83763,e8b41e212110..c97e85245dfc --- a/kernel/fork.c +++ b/kernel/fork.c @@@ -828,10 -828,10 +828,10 @@@ void __init fork_init(void for (i = 0; i < MAX_PER_NAMESPACE_UCOUNTS; i++) init_user_ns.ucount_max[i] = max_threads/2;
- set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, task_rlimit(&init_task, RLIMIT_NPROC)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, task_rlimit(&init_task, RLIMIT_MSGQUEUE)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, task_rlimit(&init_task, RLIMIT_SIGPENDING)); - set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, task_rlimit(&init_task, RLIMIT_MEMLOCK)); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); + set_rlimit_ucount_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY);
#ifdef CONFIG_VMAP_STACK cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", @@@ -2083,6 -2083,7 +2083,7 @@@ static __latent_entropy struct task_str #endif #ifdef CONFIG_BPF_SYSCALL RCU_INIT_POINTER(p->bpf_storage, NULL); + p->bpf_ctx = NULL; #endif
/* Perform scheduler related setup. Assign this task to a CPU. */ diff --combined net/core/rtnetlink.c index 662eb1c37f47,2dcf1c084b20..972c8cb303a5 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@@ -710,15 -710,8 +710,8 @@@ out int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) { struct sock *rtnl = net->rtnl; - int err = 0; - - NETLINK_CB(skb).dst_group = group; - if (echo) - refcount_inc(&skb->users); - netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL); - if (echo) - err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); - return err; + + return nlmsg_notify(rtnl, skb, pid, group, echo, GFP_KERNEL); }
int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid) @@@ -733,12 -726,8 +726,8 @@@ void rtnl_notify(struct sk_buff *skb, s struct nlmsghdr *nlh, gfp_t flags) { struct sock *rtnl = net->rtnl; - int report = 0; - - if (nlh) - report = nlmsg_report(nlh);
- nlmsg_notify(rtnl, skb, pid, group, report, flags); + nlmsg_notify(rtnl, skb, pid, group, nlmsg_report(nlh), flags); } EXPORT_SYMBOL(rtnl_notify);
@@@ -1970,6 -1959,13 +1959,13 @@@ static bool link_master_filtered(struc return false;
master = netdev_master_upper_dev_get(dev); + + /* 0 is already used to denote IFLA_MASTER wasn't passed, therefore need + * another invalid value for ifindex to denote "no master". + */ + if (master_idx == -1) + return !!master; + if (!master || master->ifindex != master_idx) return true;
@@@ -2268,7 -2264,8 +2264,8 @@@ invalid_attr return -EINVAL; }
- static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) + static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[], + struct netlink_ext_ack *extack) { if (dev) { if (tb[IFLA_ADDRESS] && @@@ -2295,7 -2292,7 +2292,7 @@@ return -EOPNOTSUPP;
if (af_ops->validate_link_af) { - err = af_ops->validate_link_af(dev, af); + err = af_ops->validate_link_af(dev, af, extack); if (err < 0) return err; } @@@ -2603,12 -2600,11 +2600,12 @@@ static int do_setlink(const struct sk_b const struct net_device_ops *ops = dev->netdev_ops; int err;
- err = validate_linkmsg(dev, tb); + err = validate_linkmsg(dev, tb, extack); if (err < 0) return err;
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD] || tb[IFLA_TARGET_NETNSID]) { + const char *pat = ifname && ifname[0] ? ifname : NULL; struct net *net; int new_ifindex;
@@@ -2624,7 -2620,7 +2621,7 @@@ else new_ifindex = 0;
- err = __dev_change_net_namespace(dev, net, ifname, new_ifindex); + err = __dev_change_net_namespace(dev, net, pat, new_ifindex); put_net(net); if (err) goto errout; @@@ -3302,7 -3298,7 +3299,7 @@@ replay m_ops = master_dev->rtnl_link_ops; }
- err = validate_linkmsg(dev, tb); + err = validate_linkmsg(dev, tb, extack); if (err < 0) return err;
diff --combined net/ipv4/ip_gre.c index 95419b7adf5c,6ebf05859acb..177d26d8fb9c --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@@ -473,8 -473,6 +473,8 @@@ static void __gre_xmit(struct sk_buff *
static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); }
@@@ -925,7 -923,7 +925,7 @@@ static const struct net_device_ops ipgr .ndo_stop = ipgre_close, #endif .ndo_start_xmit = ipgre_xmit, - .ndo_do_ioctl = ip_tunnel_ioctl, + .ndo_siocdevprivate = ip_tunnel_siocdevprivate, .ndo_change_mtu = ip_tunnel_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_get_iflink = ip_tunnel_get_iflink, diff --combined net/ipv4/route.c index a6f20ee35335,b181773d7ad3..1e3b18797070 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@@ -276,12 -276,13 +276,13 @@@ static int rt_cpu_seq_show(struct seq_f struct rt_cache_stat *st = v;
if (v == SEQ_START_TOKEN) { - seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); + seq_puts(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); return 0; }
- seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " - " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", + seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x " + "%08x %08x %08x %08x %08x %08x " + "%08x %08x %08x %08x\n", dst_entries_get_slow(&ipv4_dst_ops), 0, /* st->in_hit */ st->in_slow_tot, @@@ -600,14 -601,14 +601,14 @@@ static struct fib_nh_exception *fnhe_ol return oldest; }
-static inline u32 fnhe_hashfun(__be32 daddr) +static u32 fnhe_hashfun(__be32 daddr) { - static u32 fnhe_hashrnd __read_mostly; - u32 hval; + static siphash_key_t fnhe_hash_key __read_mostly; + u64 hval;
- net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); - hval = jhash_1word((__force u32)daddr, fnhe_hashrnd); - return hash_32(hval, FNHE_HASH_SHIFT); + net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key)); + hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key); + return hash_64(hval, FNHE_HASH_SHIFT); }
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) @@@ -1299,26 -1300,7 +1300,7 @@@ static unsigned int ipv4_default_advmss
INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst) { - const struct rtable *rt = (const struct rtable *)dst; - unsigned int mtu = rt->rt_pmtu; - - if (!mtu || time_after_eq(jiffies, rt->dst.expires)) - mtu = dst_metric_raw(dst, RTAX_MTU); - - if (mtu) - goto out; - - mtu = READ_ONCE(dst->dev->mtu); - - if (unlikely(ip_mtu_locked(dst))) { - if (rt->rt_uses_gateway && mtu > 576) - mtu = 576; - } - - out: - mtu = min_t(unsigned int, mtu, IP_MAX_MTU); - - return mtu - lwtunnel_headroom(dst->lwtstate, mtu); + return ip_dst_mtu_maybe_forward(dst, false); } EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
@@@ -2831,8 -2813,7 +2813,7 @@@ struct dst_entry *ipv4_blackhole_route( new->output = dst_discard_out;
new->dev = net->loopback_dev; - if (new->dev) - dev_hold(new->dev); + dev_hold(new->dev);
rt->rt_is_input = ort->rt_is_input; rt->rt_iif = ort->rt_iif; diff --combined net/ipv6/ip6_fib.c index ef75c9b05f17,a8f118e469b7..1bec5b22f80d --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@@ -1341,7 -1341,7 +1341,7 @@@ static void __fib6_update_sernum_upto_r struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, lockdep_is_held(&rt->fib6_table->tb6_lock));
- /* paired with smp_rmb() in rt6_get_cookie_safe() */ + /* paired with smp_rmb() in fib6_get_cookie_safe() */ smp_wmb(); while (fn) { fn->fn_sernum = sernum; @@@ -2449,8 -2449,8 +2449,8 @@@ int __init fib6_init(void int ret = -ENOMEM;
fib6_node_kmem = kmem_cache_create("fib6_nodes", - sizeof(struct fib6_node), - 0, SLAB_HWCACHE_ALIGN, + sizeof(struct fib6_node), 0, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); if (!fib6_node_kmem) goto out; diff --combined net/ipv6/ip6_gre.c index 7a5e90e09363,3ad201d372d8..7baf41d160f5 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@@ -629,8 -629,6 +629,8 @@@ drop
static int gre_handle_offloads(struct sk_buff *skb, bool csum) { + if (csum && skb_checksum_start(skb) < skb->data) + return -EINVAL; return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } @@@ -1246,8 -1244,9 +1246,9 @@@ static void ip6gre_tnl_parm_to_user(str memcpy(u->name, p->name, sizeof(u->name)); }
- static int ip6gre_tunnel_ioctl(struct net_device *dev, - struct ifreq *ifr, int cmd) + static int ip6gre_tunnel_siocdevprivate(struct net_device *dev, + struct ifreq *ifr, void __user *data, + int cmd) { int err = 0; struct ip6_tnl_parm2 p; @@@ -1261,7 -1260,7 +1262,7 @@@ switch (cmd) { case SIOCGETTUNNEL: if (dev == ign->fb_tunnel_dev) { - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { + if (copy_from_user(&p, data, sizeof(p))) { err = -EFAULT; break; } @@@ -1272,7 -1271,7 +1273,7 @@@ } memset(&p, 0, sizeof(p)); ip6gre_tnl_parm_to_user(&p, &t->parms); - if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) + if (copy_to_user(data, &p, sizeof(p))) err = -EFAULT; break;
@@@ -1283,7 -1282,7 +1284,7 @@@ goto done;
err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) + if (copy_from_user(&p, data, sizeof(p))) goto done;
err = -EINVAL; @@@ -1320,7 -1319,7 +1321,7 @@@
memset(&p, 0, sizeof(p)); ip6gre_tnl_parm_to_user(&p, &t->parms); - if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) + if (copy_to_user(data, &p, sizeof(p))) err = -EFAULT; } else err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); @@@ -1333,7 -1332,7 +1334,7 @@@
if (dev == ign->fb_tunnel_dev) { err = -EFAULT; - if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) + if (copy_from_user(&p, data, sizeof(p))) goto done; err = -ENOENT; ip6gre_tnl_parm_from_user(&p1, &p); @@@ -1400,7 -1399,7 +1401,7 @@@ static const struct net_device_ops ip6g .ndo_init = ip6gre_tunnel_init, .ndo_uninit = ip6gre_tunnel_uninit, .ndo_start_xmit = ip6gre_tunnel_xmit, - .ndo_do_ioctl = ip6gre_tunnel_ioctl, + .ndo_siocdevprivate = ip6gre_tunnel_siocdevprivate, .ndo_change_mtu = ip6_tnl_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_get_iflink = ip6_tnl_get_iflink, diff --combined net/ipv6/route.c index c5e8ecb96426,6cf4bb89ca69..f34137d5bf85 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -41,7 -41,6 +41,7 @@@ #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/jhash.h> +#include <linux/siphash.h> #include <net/net_namespace.h> #include <net/snmp.h> #include <net/ipv6.h> @@@ -1485,24 -1484,17 +1485,24 @@@ static void rt6_exception_remove_oldest static u32 rt6_exception_hash(const struct in6_addr *dst, const struct in6_addr *src) { - static u32 seed __read_mostly; - u32 val; + static siphash_key_t rt6_exception_key __read_mostly; + struct { + struct in6_addr dst; + struct in6_addr src; + } __aligned(SIPHASH_ALIGNMENT) combined = { + .dst = *dst, + }; + u64 val;
- net_get_random_once(&seed, sizeof(seed)); - val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed); + net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
#ifdef CONFIG_IPV6_SUBTREES if (src) - val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val); + combined.src = *src; #endif - return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); + val = siphash(&combined, sizeof(combined), &rt6_exception_key); + + return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); }
/* Helper function to find the cached rt in the hash table @@@ -3209,25 -3201,7 +3209,7 @@@ static unsigned int ip6_default_advmss(
INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst) { - struct inet6_dev *idev; - unsigned int mtu; - - mtu = dst_metric_raw(dst, RTAX_MTU); - if (mtu) - goto out; - - mtu = IPV6_MIN_MTU; - - rcu_read_lock(); - idev = __in6_dev_get(dst->dev); - if (idev) - mtu = idev->cnf.mtu6; - rcu_read_unlock(); - - out: - mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); - - return mtu - lwtunnel_headroom(dst->lwtstate, mtu); + return ip6_dst_mtu_maybe_forward(dst, false); } EXPORT_INDIRECT_CALLABLE(ip6_mtu);
@@@ -3652,8 -3626,7 +3634,7 @@@ out if (err) { lwtstate_put(fib6_nh->fib_nh_lws); fib6_nh->fib_nh_lws = NULL; - if (dev) - dev_put(dev); + dev_put(dev); }
return err; @@@ -6646,7 -6619,7 +6627,7 @@@ int __init ip6_route_init(void ret = -ENOMEM; ip6_dst_ops_template.kmem_cachep = kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, - SLAB_HWCACHE_ALIGN, NULL); + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); if (!ip6_dst_ops_template.kmem_cachep) goto out;
diff --combined net/qrtr/qrtr.c index 0c30908628ba,6c61b7b1838f..b8508e35d20e --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@@ -493,7 -493,7 +493,7 @@@ int qrtr_endpoint_post(struct qrtr_endp goto err; }
- if (len != ALIGN(size, 4) + hdrlen) + if (!size || len != ALIGN(size, 4) + hdrlen) goto err;
if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA && @@@ -1157,14 -1157,14 +1157,14 @@@ static int qrtr_ioctl(struct socket *so rc = put_user(len, (int __user *)argp); break; case SIOCGIFADDR: - if (copy_from_user(&ifr, argp, sizeof(ifr))) { + if (get_user_ifreq(&ifr, NULL, argp)) { rc = -EFAULT; break; }
sq = (struct sockaddr_qrtr *)&ifr.ifr_addr; *sq = ipc->us; - if (copy_to_user(argp, &ifr, sizeof(ifr))) { + if (put_user_ifreq(&ifr, argp)) { rc = -EFAULT; break; } diff --combined net/sched/sch_ets.c index c76701ac35ab,925924fab1ab..1f857ffd1ac2 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@@ -390,7 -390,7 +390,7 @@@ static struct ets_class *ets_classify(s *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (TC_H_MAJ(skb->priority) != sch->handle) { fl = rcu_dereference_bh(q->filter_list); - err = tcf_classify(skb, fl, &res, false); + err = tcf_classify(skb, NULL, fl, &res, false); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: @@@ -660,13 -660,6 +660,13 @@@ static int ets_qdisc_change(struct Qdis sch_tree_lock(sch);
q->nbands = nbands; + for (i = nstrict; i < q->nstrict; i++) { + INIT_LIST_HEAD(&q->classes[i].alist); + if (q->classes[i].qdisc->q.qlen) { + list_add_tail(&q->classes[i].alist, &q->active); + q->classes[i].deficit = quanta[i]; + } + } q->nstrict = nstrict; memcpy(q->prio2band, priomap, sizeof(priomap));
diff --combined tools/testing/selftests/Makefile index dd0388eab94d,da9e8b699e42..c852eb40c4f7 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@@ -35,10 -35,10 +35,11 @@@ TARGETS += memory-hotplu TARGETS += mincore TARGETS += mount TARGETS += mount_setattr +TARGETS += move_mount_set_group TARGETS += mqueue TARGETS += nci TARGETS += net + TARGETS += net/af_unix TARGETS += net/forwarding TARGETS += net/mptcp TARGETS += netfilter
linux-merge@lists.open-mesh.org