The following commit has been merged in the master branch: commit 28d1e23190bf1dcd6d42d8a4065f997b821a979e Merge: c4431a27035cc4587cd35b45888881af10a5c4d1 d95a93a9b71677a43f967a1b7986decab84b7765 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Sun Jul 24 12:18:39 2016 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS index 5727f96,06e8411..27f745c --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -288,7 -288,6 +288,7 @@@ F: include/linux/acpi. F: include/acpi/ F: Documentation/acpi/ F: Documentation/ABI/testing/sysfs-bus-acpi +F: Documentation/ABI/testing/configfs-acpi F: drivers/pci/*acpi* F: drivers/pci/*/*acpi* F: drivers/pci/*/*/*acpi* @@@ -1527,7 -1526,6 +1527,7 @@@ M: David Brown <david.brown@linaro.org L: linux-arm-msm@vger.kernel.org L: linux-soc@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/soc/qcom/ F: arch/arm/boot/dts/qcom-*.dts F: arch/arm/boot/dts/qcom-*.dtsi F: arch/arm/mach-qcom/ @@@ -1605,10 -1603,8 +1605,10 @@@ F: arch/arm/mach-s3c24* F: arch/arm/mach-s3c64xx/ F: arch/arm/mach-s5p*/ F: arch/arm/mach-exynos*/ -F: drivers/*/*s3c2410* -F: drivers/*/*/*s3c2410* +F: drivers/*/*s3c24* +F: drivers/*/*/*s3c24* +F: drivers/*/*s3c64xx* +F: drivers/*/*s5pv210* F: drivers/memory/samsung/* F: drivers/soc/samsung/* F: drivers/spi/spi-s3c* @@@ -1651,13 -1647,6 +1651,13 @@@ L: linux-media@vger.kernel.or S: Maintained F: drivers/media/platform/s5p-tv/
+ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT +M: Kyungmin Park kyungmin.park@samsung.com +L: linux-arm-kernel@lists.infradead.org +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/staging/media/platform/s5p-cec/ + ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT M: Andrzej Pietrasiewicz andrzej.p@samsung.com M: Jacek Anaszewski j.anaszewski@samsung.com @@@ -1680,6 -1669,7 +1680,6 @@@ F: arch/arm/boot/dts/sh F: arch/arm/configs/shmobile_defconfig F: arch/arm/include/debug/renesas-scif.S F: arch/arm/mach-shmobile/ -F: drivers/sh/ F: drivers/soc/renesas/ F: include/linux/soc/renesas/
@@@ -1704,6 -1694,8 +1704,6 @@@ S: Maintaine F: drivers/edac/altera_edac.
ARM/STI ARCHITECTURE -M: Srinivas Kandagatla srinivas.kandagatla@gmail.com -M: Maxime Coquelin maxime.coquelin@st.com M: Patrice Chotard patrice.chotard@st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kernel@stlinux.com @@@ -1736,7 -1728,6 +1736,7 @@@ F: drivers/ata/ahci_st.
ARM/STM32 ARCHITECTURE M: Maxime Coquelin mcoquelin.stm32@gmail.com +M: Alexandre Torgue alexandre.torgue@st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git @@@ -1747,7 -1738,8 +1747,7 @@@ ARM/TANGO ARCHITECTUR M: Marc Gonzalez marc_gonzalez@sigmadesigns.com L: linux-arm-kernel@lists.infradead.org S: Maintained -F: arch/arm/mach-tango/ -F: arch/arm/boot/dts/tango* +N: tango
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT M: Lennert Buytenhek kernel@wantstofly.org @@@ -1834,6 -1826,7 +1834,6 @@@ L: linux-arm-kernel@lists.infradead.or T: git git://git.linaro.org/people/ulfh/clk.git S: Maintained F: drivers/clk/ux500/ -F: include/linux/platform_data/clk-ux500.h
ARM/VERSATILE EXPRESS PLATFORM M: Liviu Dudau liviu.dudau@arm.com @@@ -2304,6 -2297,7 +2304,7 @@@ S: Maintaine F: Documentation/ABI/testing/sysfs-class-net-batman-adv F: Documentation/ABI/testing/sysfs-class-net-mesh F: Documentation/networking/batman-adv.txt + F: include/uapi/linux/batman_adv.h F: net/batman-adv/
BAYCOM/HDLCDRV DRIVERS FOR AX.25 @@@ -2467,6 -2461,14 +2468,14 @@@ L: netdev@vger.kernel.or S: Supported F: drivers/net/ethernet/broadcom/b44.*
+ BROADCOM B53 ETHERNET SWITCH DRIVER + M: Florian Fainelli f.fainelli@gmail.com + L: netdev@vger.kernel.org + L: openwrt-devel@lists.openwrt.org (subscribers-only) + S: Supported + F: drivers/net/dsa/b53/* + F: include/linux/platform_data/b53.h + BROADCOM GENET ETHERNET DRIVER M: Florian Fainelli f.fainelli@gmail.com L: netdev@vger.kernel.org @@@ -2491,14 -2493,17 +2500,14 @@@ BROADCOM BCM281XX/BCM11XXX/BCM216XX AR M: Florian Fainelli f.fainelli@gmail.com M: Ray Jui rjui@broadcom.com M: Scott Branden sbranden@broadcom.com -L: bcm-kernel-feedback-list@broadcom.com +M: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/mach-bcm S: Maintained +N: bcm281* +N: bcm113* +N: bcm216* +N: kona F: arch/arm/mach-bcm/ -F: arch/arm/boot/dts/bcm113* -F: arch/arm/boot/dts/bcm216* -F: arch/arm/boot/dts/bcm281* -F: arch/arm64/boot/dts/broadcom/ -F: arch/arm/configs/bcm_defconfig -F: drivers/mmc/host/sdhci-bcm-kona.c -F: drivers/clocksource/bcm_kona_timer.c
BROADCOM BCM2835 ARM ARCHITECTURE M: Stephen Warren swarren@wwwdotorg.org @@@ -2521,21 -2526,20 +2530,21 @@@ F: arch/mips/include/asm/mach-bcm47xx/
BROADCOM BCM5301X ARM ARCHITECTURE M: Hauke Mehrtens hauke@hauke-m.de +M: Rafał Miłecki zajec5@gmail.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org S: Maintained F: arch/arm/mach-bcm/bcm_5301x.c -F: arch/arm/boot/dts/bcm5301x.dtsi +F: arch/arm/boot/dts/bcm5301x*.dtsi F: arch/arm/boot/dts/bcm470*
BROADCOM BCM63XX ARM ARCHITECTURE M: Florian Fainelli f.fainelli@gmail.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/stblinux.git S: Maintained -F: arch/arm/mach-bcm/bcm63xx.c -F: arch/arm/include/debug/bcm63xx.S +N: bcm63xx
BROADCOM BCM63XX/BCM33XX UDC DRIVER M: Kevin Cernekee cernekee@gmail.com @@@ -2547,8 -2551,8 +2556,8 @@@ BROADCOM BCM7XXX ARM ARCHITECTUR M: Brian Norris computersforpeace@gmail.com M: Gregory Fong gregory.0xf0@gmail.com M: Florian Fainelli f.fainelli@gmail.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/stblinux.git S: Maintained F: arch/arm/mach-bcm/*brcmstb* @@@ -2581,12 -2585,11 +2590,11 @@@ S: Supporte F: drivers/net/ethernet/broadcom/tg3.*
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER - M: Brett Rudley brudley@broadcom.com - M: Arend van Spriel arend@broadcom.com - M: Franky (Zhenhui) Lin frankyl@broadcom.com - M: Hante Meuleman meuleman@broadcom.com + M: Arend van Spriel arend.vanspriel@broadcom.com + M: Franky Lin franky.lin@broadcom.com + M: Hante Meuleman hante.meuleman@broadcom.com L: linux-wireless@vger.kernel.org - L: brcm80211-dev-list@broadcom.com + L: brcm80211-dev-list.pdl@broadcom.com S: Supported F: drivers/net/wireless/broadcom/brcm80211/
@@@ -2606,13 -2609,13 +2614,13 @@@ BROADCOM IPROC ARM ARCHITECTUR M: Ray Jui rjui@broadcom.com M: Scott Branden sbranden@broadcom.com M: Jon Mason jonmason@broadcom.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/cygnus-linux.git S: Maintained N: iproc N: cygnus -N: nsp +N: bcm[-_]nsp N: bcm9113* N: bcm9583* N: bcm9585* @@@ -2623,9 -2626,6 +2631,9 @@@ N: bcm583 N: bcm585* N: bcm586* N: bcm88312 +F: arch/arm64/boot/dts/broadcom/ns2* +F: drivers/clk/bcm/clk-ns* +F: drivers/pinctrl/bcm/pinctrl-ns*
BROADCOM BRCMSTB GPIO DRIVER M: Gregory Fong gregory.0xf0@gmail.com @@@ -2670,8 -2670,8 +2678,8 @@@ F: drivers/net/ethernet/broadcom/bcmsys
BROADCOM VULCAN ARM64 SOC M: Jayachandran C. jchandra@broadcom.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: arch/arm64/boot/dts/broadcom/vulcan*
@@@ -2821,6 -2821,7 +2829,7 @@@ W: https://github.com/linux-ca T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained + F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h F: include/linux/can/platform/ @@@ -2860,22 -2861,6 +2869,22 @@@ F: drivers/net/ieee802154/cc2520. F: include/linux/spi/cc2520.h F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
+CEC DRIVER +M: Hans Verkuil hans.verkuil@cisco.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +W: http://linuxtv.org +S: Supported +F: Documentation/cec.txt +F: Documentation/DocBook/media/v4l/cec* +F: drivers/staging/media/cec/ +F: drivers/media/cec-edid.c +F: drivers/media/rc/keymaps/rc-cec.c +F: include/media/cec.h +F: include/media/cec-edid.h +F: include/linux/cec.h +F: include/linux/cec-funcs.h + CELL BROADBAND ENGINE ARCHITECTURE M: Arnd Bergmann arnd@arndb.de L: linuxppc-dev@lists.ozlabs.org @@@ -4501,7 -4486,7 +4510,7 @@@ S: Orpha F: fs/efs/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER -M: Thadeu Lima de Souza Cascardo cascardo@linux.vnet.ibm.com +M: Douglas Miller dougmill@linux.vnet.ibm.com L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/ibm/ehea/ @@@ -4911,6 -4896,13 +4920,13 @@@ F: drivers/net/ethernet/freescale/gianf X: drivers/net/ethernet/freescale/gianfar_ptp.c F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+ FREESCALE QUICC ENGINE UCC HDLC DRIVER + M: Zhao Qiang qiang.zhao@nxp.com + L: netdev@vger.kernel.org + L: linuxppc-dev@lists.ozlabs.org + S: Maintained + F: drivers/net/wan/fsl_ucc_hdlc* + FREESCALE QUICC ENGINE UCC UART DRIVER M: Timur Tabi timur@tabi.org L: linuxppc-dev@lists.ozlabs.org @@@ -4966,13 -4958,6 +4982,13 @@@ F: Documentation/filesystems/caching F: fs/fscache/ F: include/linux/fscache*.h
+FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT +M: Theodore Y. Ts'o tytso@mit.edu +M: Jaegeuk Kim jaegeuk@kernel.org +S: Supported +F: fs/crypto/ +F: include/linux/fscrypto.h + F2FS FILE SYSTEM M: Jaegeuk Kim jaegeuk@kernel.org M: Changman Lee cm224.lee@samsung.com @@@ -5201,10 -5186,10 +5217,10 @@@ S: Maintaine F: drivers/media/usb/gspca/m5602/
GSPCA PAC207 SONIXB SUBDRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Odd Fixes F: drivers/media/usb/gspca/pac207.c
GSPCA SN9C20X SUBDRIVER @@@ -5222,10 -5207,10 +5238,10 @@@ S: Maintaine F: drivers/media/usb/gspca/t613.c
GSPCA USB WEBCAM DRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Odd Fixes F: drivers/media/usb/gspca/
GUID PARTITION TABLE (GPT) @@@ -5453,6 -5438,15 +5469,15 @@@ F: include/uapi/linux/if_hippi. F: net/802/hippi.c F: drivers/net/hippi/
+ HISILICON NETWORK SUBSYSTEM DRIVER + M: Yisen Zhuang yisen.zhuang@huawei.com + M: Salil Mehta salil.mehta@huawei.com + L: netdev@vger.kernel.org + W: http://www.hisilicon.com + S: Maintained + F: drivers/net/ethernet/hisilicon/ + F: Documentation/devicetree/bindings/net/hisilicon*.txt + HISILICON SAS Controller M: John Garry john.garry@huawei.com W: http://www.hisilicon.com @@@ -6905,7 -6899,6 +6930,7 @@@ F: drivers/crypto/nx F: drivers/crypto/vmx/ F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmvnic.* +F: drivers/pci/hotplug/pnv_php.c F: drivers/pci/hotplug/rpa* F: drivers/scsi/ibmvscsi/ N: opal @@@ -7205,6 -7198,12 +7230,12 @@@ W: http://www.kernel.org/doc/man-page L: linux-man@vger.kernel.org S: Maintained
+ MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER + M: Andrew Lunn andrew@lunn.ch + M: Vivien Didelot vivien.didelot@savoirfairelinux.com + S: Maintained + F: drivers/net/dsa/mv88e6xxx/ + MARVELL ARMADA DRM SUPPORT M: Russell King rmk+kernel@armlinux.org.uk S: Maintained @@@ -7212,11 -7211,6 +7243,6 @@@ F: drivers/gpu/drm/armada F: include/uapi/drm/armada_drm.h F: Documentation/devicetree/bindings/display/armada/
- MARVELL 88E6352 DSA support - M: Guenter Roeck linux@roeck-us.net - S: Maintained - F: drivers/net/dsa/mv88e6352.c - MARVELL CRYPTO DRIVER M: Boris Brezillon boris.brezillon@free-electrons.com M: Arnaud Ebalard arno@natisbad.org @@@ -7352,16 -7346,6 +7378,16 @@@ L: linux-iio@vger.kernel.or S: Maintained F: drivers/iio/potentiometer/mcp4531.c
+MEDIA DRIVERS FOR RENESAS - FCP +M: Laurent Pinchart laurent.pinchart@ideasonboard.com +L: linux-media@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Supported +F: Documentation/devicetree/bindings/media/renesas,fcp.txt +F: drivers/media/platform/rcar-fcp.c +F: include/media/rcar-fcp.h + MEDIA DRIVERS FOR RENESAS - VSP1 M: Laurent Pinchart laurent.pinchart@ideasonboard.com L: linux-media@vger.kernel.org @@@ -7371,18 -7355,8 +7397,18 @@@ S: Supporte F: Documentation/devicetree/bindings/media/renesas,vsp1.txt F: drivers/media/platform/vsp1/
+MEDIA DRIVERS FOR HELENE +M: Abylay Ospan aospan@netup.ru +L: linux-media@vger.kernel.org +W: https://linuxtv.org +W: http://netup.tv/ +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/dvb-frontends/helene* + MEDIA DRIVERS FOR ASCOT2E M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7392,7 -7366,6 +7418,7 @@@ F: drivers/media/dvb-frontends/ascot2e
MEDIA DRIVERS FOR CXD2841ER M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7402,7 -7375,6 +7428,7 @@@ F: drivers/media/dvb-frontends/cxd2841e
MEDIA DRIVERS FOR HORUS3A M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7412,7 -7384,6 +7438,7 @@@ F: drivers/media/dvb-frontends/horus3a
MEDIA DRIVERS FOR LNBH25 M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7422,7 -7393,6 +7448,7 @@@ F: drivers/media/dvb-frontends/lnbh25
MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7532,7 -7502,6 +7558,7 @@@ Q: http://patchwork.ozlabs.org/project/ T: git git://git.infradead.org/linux-mtd.git T: git git://git.infradead.org/l2-mtd.git S: Maintained +F: Documentation/devicetree/bindings/mtd/ F: drivers/mtd/ F: include/linux/mtd/ F: include/uapi/mtd/ @@@ -7672,8 -7641,10 +7698,8 @@@ L: linux-media@vger.kernel.or W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ -T: git git://linuxtv.org/anttip/media_tree.git S: Maintained -F: drivers/staging/media/mn88472/ -F: drivers/media/dvb-frontends/mn88472.h +F: drivers/media/dvb-frontends/mn88472*
MN88473 MEDIA DRIVER M: Antti Palosaari crope@iki.fi @@@ -8750,7 -8721,6 +8776,7 @@@ L: linux-pci@vger.kernel.or Q: http://patchwork.ozlabs.org/project/linux-pci/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git S: Supported +F: Documentation/devicetree/bindings/pci/ F: Documentation/PCI/ F: drivers/pci/ F: include/linux/pci* @@@ -8896,15 -8866,6 +8922,15 @@@ S: Maintaine F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt F: drivers/pci/host/pci-xgene-msi.c
+PCIE DRIVER FOR AXIS ARTPEC +M: Niklas Cassel niklas.cassel@axis.com +M: Jesper Nilsson jesper.nilsson@axis.com +L: linux-arm-kernel@axis.com +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/axis,artpec* +F: drivers/pci/host/*artpec* + PCIE DRIVER FOR HISILICON M: Zhou Wang wangzhou1@hisilicon.com M: Gabriele Paoloni gabriele.paoloni@huawei.com @@@ -9164,12 -9125,6 +9190,12 @@@ F: drivers/firmware/psci. F: include/linux/psci.h F: include/uapi/linux/psci.h
+POWERNV OPERATOR PANEL LCD DISPLAY DRIVER +M: Suraj Jitindar Singh sjitindarsingh@gmail.com +L: linuxppc-dev@lists.ozlabs.org +S: Maintained +F: drivers/char/powernv-op-panel.c + PNP SUPPORT M: "Rafael J. Wysocki" rafael.j.wysocki@intel.com S: Maintained @@@ -9294,13 -9249,6 +9320,13 @@@ F: include/linux/tracehook. F: include/uapi/linux/ptrace.h F: kernel/ptrace.c
+PULSE8-CEC DRIVER +M: Hans Verkuil hverkuil@xs4all.nl +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/staging/media/pulse8-cec + PVRUSB2 VIDEO4LINUX DRIVER M: Mike Isely isely@pobox.com L: pvrusb2@isely.net (subscribers-only) @@@ -9312,10 -9260,10 +9338,10 @@@ F: Documentation/video4linux/README.pvr F: drivers/media/usb/pvrusb2/
PWC WEBCAM DRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Odd Fixes F: drivers/media/usb/pwc/*
PWM FAN DRIVER @@@ -9530,14 -9478,14 +9556,14 @@@ F: drivers/video/fbdev/aty/radeon F: include/uapi/linux/radeonfb.h
RADIOSHARK RADIO DRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/radio/radio-shark.c
RADIOSHARK2 RADIO DRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained @@@ -9611,7 -9559,7 +9637,7 @@@ M: Florian Fainelli <florian@openwrt.or S: Maintained
RDC R6040 FAST ETHERNET DRIVER - M: Florian Fainelli florian@openwrt.org + M: Florian Fainelli f.fainelli@gmail.com L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/rdc/r6040.c @@@ -9762,7 -9710,6 +9788,6 @@@ F: Documentation/ABI/*/sysfs-driver-hid
ROCKER DRIVER M: Jiri Pirko jiri@resnulli.us - M: Scott Feldman sfeldma@gmail.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/rocker/ @@@ -10359,10 -10306,9 +10384,9 @@@ W: http://www.avagotech.co S: Supported F: drivers/scsi/be2iscsi/
- Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER + Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) M: Sathya Perla sathya.perla@broadcom.com M: Ajit Khaparde ajit.khaparde@broadcom.com - M: Padmanabh Ratnakar padmanabh.ratnakar@broadcom.com M: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com M: Somnath Kotur somnath.kotur@broadcom.com L: netdev@vger.kernel.org @@@ -11393,6 -11339,11 +11417,6 @@@ F: Documentation/thermal/cpu-cooling-ap F: drivers/thermal/cpu_cooling.c F: include/linux/cpu_cooling.h
-THINGM BLINK(1) USB RGB LED DRIVER -M: Vivien Didelot vivien.didelot@savoirfairelinux.com -S: Maintained -F: drivers/hid/hid-thingm.c - THINKPAD ACPI EXTRAS DRIVER M: Henrique de Moraes Holschuh ibm-acpi@hmh.eng.br L: ibm-acpi-devel@lists.sourceforge.net diff --combined arch/arm/boot/dts/am33xx.dtsi index af18c47,7fa2951..98748c6 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@@ -45,9 -45,19 +45,9 @@@ device_type = "cpu"; reg = <0>;
- /* - * To consider voltage drop between PMIC and SoC, - * tolerance value is reduced to 2% from 4% and - * voltage value is increased as a precaution. - */ - operating-points = < - /* kHz uV */ - 720000 1285000 - 600000 1225000 - 500000 1125000 - 275000 1125000 - >; - voltage-tolerance = <2>; /* 2 percentage */ + operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_conf 0x7fc 0x1fff 0>; + ti,syscon-rev = <&scm_conf 0x600>;
clocks = <&dpll_mpu_ck>; clock-names = "cpu"; @@@ -56,78 -66,6 +56,78 @@@ }; };
+ cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + + /* + * The three following nodes are marked with opp-suspend + * because the can not be enabled simultaneously on a + * single SoC. + */ + opp50@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <950000 931000 969000>; + opp-supported-hw = <0x06 0x0010>; + opp-suspend; + }; + + opp100@275000000 { + opp-hz = /bits/ 64 <275000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x01 0x00FF>; + opp-suspend; + }; + + opp100@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x06 0x0020>; + opp-suspend; + }; + + opp100@500000000 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + opp100@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x06 0x0040>; + }; + + opp120@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + opp120@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0x06 0x0080>; + }; + + oppturbo@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + oppturbo@800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0x06 0x0100>; + }; + + oppnitro@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1325000 1298500 1351500>; + opp-supported-hw = <0x04 0x0200>; + }; + }; + pmu { compatible = "arm,cortex-a8-pmu"; interrupts = <3>; @@@ -249,7 -187,7 +249,7 @@@ reg = <0x49000000 0x10000>; reg-names = "edma3_cc"; interrupts = <12 13 14>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -741,24 -679,20 +741,24 @@@ 0x48300200 0x48300200 0x80>; /* EHRPWM */
ecap0: ecap@48300100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48300100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <31>; interrupt-names = "ecap0"; - ti,hwmods = "ecap0"; status = "disabled"; };
ehrpwm0: pwm@48300200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48300200 0x80>; - ti,hwmods = "ehrpwm0"; + clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -775,24 -709,20 +775,24 @@@ 0x48302200 0x48302200 0x80>; /* EHRPWM */
ecap1: ecap@48302100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48302100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <47>; interrupt-names = "ecap1"; - ti,hwmods = "ecap1"; status = "disabled"; };
ehrpwm1: pwm@48302200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48302200 0x80>; - ti,hwmods = "ehrpwm1"; + clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -809,24 -739,20 +809,24 @@@ 0x48304200 0x48304200 0x80>; /* EHRPWM */
ecap2: ecap@48304100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48304100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <61>; interrupt-names = "ecap2"; - ti,hwmods = "ecap2"; status = "disabled"; };
ehrpwm2: pwm@48304200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48304200 0x80>; - ti,hwmods = "ehrpwm2"; + clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -840,7 -766,6 +840,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -863,7 -788,7 +862,7 @@@ status = "disabled";
davinci_mdio: mdio@4a101000 { - compatible = "ti,davinci_mdio"; + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; #address-cells = <1>; #size-cells = <0>; ti,hwmods = "davinci_mdio"; diff --combined arch/arm/boot/dts/am4372.dtsi index e7b53ef,cd81ecf..0fadae5 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@@ -44,49 -44,10 +44,49 @@@ clocks = <&dpll_mpu_ck>; clock-names = "cpu";
+ operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_conf 0x610 0x3f 0>; + ti,syscon-rev = <&scm_conf 0x600>; + clock-latency = <300000>; /* From omap-cpufreq driver */ }; };
+ cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + + opp50@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <950000 931000 969000>; + opp-supported-hw = <0xFF 0x01>; + opp-suspend; + }; + + opp100@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0xFF 0x04>; + }; + + opp120@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0xFF 0x08>; + }; + + oppturbo@800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0xFF 0x10>; + }; + + oppnitro@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1325000 1298500 1351500>; + opp-supported-hw = <0xFF 0x20>; + }; + }; + gic: interrupt-controller@48241000 { compatible = "arm,cortex-a9-gic"; interrupt-controller; @@@ -238,7 -199,7 +238,7 @@@ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -665,7 -626,6 +665,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -675,7 -635,7 +674,7 @@@ syscon = <&scm_conf>;
davinci_mdio: mdio@4a101000 { - compatible = "ti,am4372-mdio","ti,davinci_mdio"; + compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio"; reg = <0x4a101000 0x100>; #address-cells = <1>; #size-cells = <0>; @@@ -711,24 -671,18 +710,24 @@@ status = "disabled";
ecap0: ecap@48300100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48300100 0x80>; - ti,hwmods = "ecap0"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm0: pwm@48300200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48300200 0x80>; - ti,hwmods = "ehrpwm0"; + clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -743,24 -697,18 +742,24 @@@ status = "disabled";
ecap1: ecap@48302100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48302100 0x80>; - ti,hwmods = "ecap1"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm1: pwm@48302200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48302200 0x80>; - ti,hwmods = "ehrpwm1"; + clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -775,24 -723,18 +774,24 @@@ status = "disabled";
ecap2: ecap@48304100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48304100 0x80>; - ti,hwmods = "ecap2"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm2: pwm@48304200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48304200 0x80>; - ti,hwmods = "ehrpwm2"; + clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -807,13 -749,10 +806,13 @@@ status = "disabled";
ehrpwm3: pwm@48306200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48306200 0x80>; - ti,hwmods = "ehrpwm3"; + clocks = <&ehrpwm3_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -828,13 -767,10 +827,13 @@@ status = "disabled";
ehrpwm4: pwm@48308200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48308200 0x80>; - ti,hwmods = "ehrpwm4"; + clocks = <&ehrpwm4_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -849,13 -785,10 +848,13 @@@ status = "disabled";
ehrpwm5: pwm@4830a200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x4830a200 0x80>; - ti,hwmods = "ehrpwm5"; + clocks = <&ehrpwm5_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -909,13 -842,6 +908,13 @@@ dma-names = "tx", "rx"; };
+ rng: rng@48310000 { + compatible = "ti,omap4-rng"; + ti,hwmods = "rng"; + reg = <0x48310000 0x2000>; + interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; + }; + mcasp0: mcasp@48038000 { compatible = "ti,am33xx-mcasp-audio"; ti,hwmods = "mcasp0"; diff --combined arch/arm/boot/dts/dm814x.dtsi index a6e0aebc,f23cae0c..68e412c --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@@ -448,7 -448,7 +448,7 @@@ reg = <0x49000000 0x10000>; reg-names = "edma3_cc"; interrupts = <12 13 14>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -509,7 -509,6 +509,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; diff --combined arch/arm/boot/dts/dra7.dtsi index 40b69a5,de559f6..d9bfb94 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@@ -73,49 -73,6 +73,49 @@@ interrupt-parent = <&gic>; };
+ cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0>; + + operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_wkup 0x20c 0xf80000 19>; + ti,syscon-rev = <&scm_wkup 0x204>; + + clocks = <&dpll_mpu_ck>; + clock-names = "cpu"; + + clock-latency = <300000>; /* From omap-cpufreq driver */ + + /* cooling options */ + cooling-min-level = <0>; + cooling-max-level = <2>; + #cooling-cells = <2>; /* min followed by max */ + }; + }; + + cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + opp-shared; + + opp_nom@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1060000 850000 1150000>; + opp-supported-hw = <0xFF 0x01>; + opp-suspend; + }; + + opp_od@1176000000 { + opp-hz = /bits/ 64 <1176000000>; + opp-microvolt = <1160000 885000 1160000>; + opp-supported-hw = <0xFF 0x02>; + }; + }; + /* * The soc node represents the soc top level view. It is used for IPs * that are not memory mapped in the MPU view or for the MPU itself. @@@ -276,11 -233,6 +276,11 @@@ prm_clockdomains: clockdomains { }; }; + + scm_wkup: scm_conf@c000 { + compatible = "syscon"; + reg = <0xc000 0x1000>; + }; };
axi@0 { @@@ -324,7 -276,7 +324,7 @@@ ranges = <0x51800000 0x51800000 0x3000 0x0 0x30000000 0x10000000>; status = "disabled"; - pcie@51000000 { + pcie@51800000 { compatible = "ti,dra7-pcie"; reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; @@@ -352,53 -304,6 +352,53 @@@ }; };
+ ocmcram1: ocmcram@40300000 { + compatible = "mmio-sram"; + reg = <0x40300000 0x80000>; + ranges = <0x0 0x40300000 0x80000>; + #address-cells = <1>; + #size-cells = <1>; + /* + * This is a placeholder for an optional reserved + * region for use by secure software. The size + * of this region is not known until runtime so it + * is set as zero to either be updated to reserve + * space or left unchanged to leave all SRAM for use. + * On HS parts that that require the reserved region + * either the bootloader can update the size to + * the required amount or the node can be overridden + * from the board dts file for the secure platform. + */ + sram-hs@0 { + compatible = "ti,secure-ram"; + reg = <0x0 0x0>; + }; + }; + + /* + * NOTE: ocmcram2 and ocmcram3 are not available on all + * DRA7xx and AM57xx variants. Confirm availability in + * the data manual for the exact part number in use + * before enabling these nodes in the board dts file. + */ + ocmcram2: ocmcram@40400000 { + status = "disabled"; + compatible = "mmio-sram"; + reg = <0x40400000 0x100000>; + ranges = <0x0 0x40400000 0x100000>; + #address-cells = <1>; + #size-cells = <1>; + }; + + ocmcram3: ocmcram@40500000 { + status = "disabled"; + compatible = "mmio-sram"; + reg = <0x40500000 0x100000>; + ranges = <0x0 0x40500000 0x100000>; + #address-cells = <1>; + #size-cells = <1>; + }; + bandgap: bandgap@4a0021e0 { reg = <0x4a0021e0 0xc 0x4a00232c 0xc @@@ -436,7 -341,7 +436,7 @@@ interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -1723,7 -1628,6 +1723,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -1758,7 -1662,7 +1757,7 @@@ status = "disabled";
davinci_mdio: mdio@48485000 { - compatible = "ti,davinci_mdio"; + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; #address-cells = <1>; #size-cells = <0>; ti,hwmods = "davinci_mdio"; @@@ -1840,149 -1744,6 +1839,149 @@@ clock-names = "fck", "sys_clk"; }; }; + + epwmss0: epwmss@4843e000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x4843e000 0x30>; + ti,hwmods = "epwmss0"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm0: pwm@4843e200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x4843e200 0x80>; + clocks = <&ehrpwm0_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap0: ecap@4843e100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x4843e100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + epwmss1: epwmss@48440000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x48440000 0x30>; + ti,hwmods = "epwmss1"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm1: pwm@48440200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x48440200 0x80>; + clocks = <&ehrpwm1_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap1: ecap@48440100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x48440100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + epwmss2: epwmss@48442000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x48442000 0x30>; + ti,hwmods = "epwmss2"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm2: pwm@48442200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x48442200 0x80>; + clocks = <&ehrpwm2_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap2: ecap@48442100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x48442100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + aes1: aes@4b500000 { + compatible = "ti,omap4-aes"; + ti,hwmods = "aes1"; + reg = <0x4b500000 0xa0>; + interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 111 0>, <&edma_xbar 110 0>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + aes2: aes@4b700000 { + compatible = "ti,omap4-aes"; + ti,hwmods = "aes2"; + reg = <0x4b700000 0xa0>; + interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 114 0>, <&edma_xbar 113 0>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + des: des@480a5000 { + compatible = "ti,omap4-des"; + ti,hwmods = "des"; + reg = <0x480a5000 0xa0>; + interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&sdma_xbar 117>, <&sdma_xbar 116>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + sham: sham@53100000 { + compatible = "ti,omap5-sham"; + ti,hwmods = "sham"; + reg = <0x4b101000 0x300>; + interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 119 0>; + dma-names = "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + rng: rng@48090000 { + compatible = "ti,omap4-rng"; + ti,hwmods = "rng"; + reg = <0x48090000 0x2000>; + interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; };
thermal_zones: thermal-zones { diff --combined arch/arm/boot/dts/rk3288.dtsi index 7fa932f,3ebee53..cd33f01 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@@ -539,8 -539,9 +539,9 @@@ gmac: ethernet@ff290000 { compatible = "rockchip,rk3288-gmac"; reg = <0xff290000 0x10000>; - interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "macirq"; + interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq", "eth_wake_irq"; rockchip,grf = <&grf>; clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>, @@@ -826,11 -827,6 +827,11 @@@ #phy-cells = <0>; status = "disabled"; }; + + io_domains: io-domains { + compatible = "rockchip,rk3288-io-voltage-domain"; + status = "disabled"; + }; };
wdt: watchdog@ff800000 { diff --combined arch/arm64/boot/dts/broadcom/ns2-svk.dts index b062a44,ea5603f..0862b3e --- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts +++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts @@@ -40,14 -40,10 +40,14 @@@
aliases { serial0 = &uart3; + serial1 = &uart0; + serial2 = &uart1; + serial3 = &uart2; };
chosen { stdout-path = "serial0:115200n8"; + bootargs = "earlycon=uart8250,mmio32,0x66130000"; };
memory { @@@ -56,6 -52,14 +56,14 @@@ }; };
+ &pci_phy0 { + status = "ok"; + }; + + &pci_phy1 { + status = "ok"; + }; + &pcie0 { status = "ok"; }; @@@ -72,18 -76,6 +80,18 @@@ status = "ok"; };
+&uart0 { + status = "ok"; +}; + +&uart1 { + status = "ok"; +}; + +&uart2 { + status = "ok"; +}; + &uart3 { status = "ok"; }; @@@ -133,18 -125,6 +141,18 @@@ }; };
+&sata_phy0 { + status = "ok"; +}; + +&sata_phy1 { + status = "ok"; +}; + +&sata { + status = "ok"; +}; + &sdio0 { status = "ok"; }; @@@ -161,11 -141,10 +169,19 @@@ }; };
+&pinctrl { + pinctrl-names = "default"; + pinctrl-0 = <&nand_sel>; + nand_sel: nand_sel { + function = "nand"; + groups = "nand_grp"; + }; +}; ++ + &mdio_mux_iproc { + mdio@10 { + gphy0: eth-phy@10 { + reg = <0x10>; + }; + }; + }; diff --combined arch/arm64/boot/dts/broadcom/ns2.dtsi index d1dc812,46b78fa..f53b095 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi @@@ -251,22 -251,6 +251,22 @@@ mmu-masters; };
+ pinctrl: pinctrl@6501d130 { + compatible = "brcm,ns2-pinmux"; + reg = <0x6501d130 0x08>, + <0x660a0028 0x04>, + <0x660009b0 0x40>; + }; + + gpio_aon: gpio@65024800 { + compatible = "brcm,iproc-gpio"; + reg = <0x65024800 0x50>, + <0x65024008 0x18>; + ngpios = <6>; + #gpio-cells = <2>; + gpio-controller; + }; + gic: interrupt-controller@65210000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; @@@ -279,26 -263,45 +279,65 @@@ IRQ_TYPE_LEVEL_HIGH)>; };
+ cci@65590000 { + compatible = "arm,cci-400"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x65590000 0x1000>; + ranges = <0 0x65590000 0x10000>; + + pmu@9000 { + compatible = "arm,cci-400-pmu,r1", + "arm,cci-400-pmu"; + reg = <0x9000 0x4000>; + interrupts = <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + mdio_mux_iproc: mdio-mux@6602023c { + compatible = "brcm,mdio-mux-iproc"; + reg = <0x6602023c 0x14>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + pci_phy0: pci-phy@0 { + compatible = "brcm,ns2-pcie-phy"; + reg = <0x0>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + mdio@7 { + reg = <0x7>; + #address-cells = <1>; + #size-cells = <0>; + + pci_phy1: pci-phy@0 { + compatible = "brcm,ns2-pcie-phy"; + reg = <0x0>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + mdio@10 { + reg = <0x10>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + timer0: timer@66030000 { compatible = "arm,sp804", "arm,primecell"; reg = <0x66030000 0x1000>; @@@ -357,16 -360,6 +396,16 @@@ clock-names = "wdogclk", "apb_pclk"; };
+ gpio_g: gpio@660a0000 { + compatible = "brcm,iproc-gpio"; + reg = <0x660a0000 0x50>; + ngpios = <32>; + #gpio-cells = <2>; + gpio-controller; + interrupt-controller; + interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>; + }; + i2c1: i2c@660b0000 { compatible = "brcm,iproc-i2c"; reg = <0x660b0000 0x100>; @@@ -377,36 -370,6 +416,36 @@@ status = "disabled"; };
+ uart0: serial@66100000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66100000 0x100>; + interrupts = <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: serial@66110000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66110000 0x100>; + interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart2: serial@66120000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66120000 0x100>; + interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + uart3: serial@66130000 { compatible = "snps,dw-apb-uart"; reg = <0x66130000 0x100>; @@@ -444,49 -407,6 +483,49 @@@ reg = <0x66220000 0x28>; };
+ sata_phy: sata_phy@663f0100 { + compatible = "brcm,iproc-ns2-sata-phy"; + reg = <0x663f0100 0x1f00>, + <0x663f004c 0x10>; + reg-names = "phy", "phy-ctrl"; + #address-cells = <1>; + #size-cells = <0>; + + sata_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + status = "disabled"; + }; + + sata_phy1: sata-phy@1 { + reg = <1>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + sata: ahci@663f2000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x663f2000 0x1000>; + reg-names = "ahci"; + interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata0: sata-port@0 { + reg = <0>; + phys = <&sata_phy0>; + phy-names = "sata-phy"; + }; + + sata1: sata-port@1 { + reg = <1>; + phys = <&sata_phy1>; + phy-names = "sata-phy"; + }; + }; + sdio0: sdhci@66420000 { compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66420000 0x100>; diff --combined arch/s390/kernel/perf_cpum_sf.c index 9ea26df,92619cc..53acf2d --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@@ -601,12 -601,17 +601,12 @@@ static void release_pmc_hardware(void
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); on_each_cpu(setup_pmc_cpu, &flags, 1); - perf_release_sampling(); }
static int reserve_pmc_hardware(void) { int flags = PMC_INIT; - int err;
- err = perf_reserve_sampling(); - if (err) - return err; on_each_cpu(setup_pmc_cpu, &flags, 1); if (flags & PMC_FAILURE) { release_pmc_hardware(); @@@ -974,12 -979,15 +974,15 @@@ static int perf_push_sample(struct perf struct pt_regs regs; struct perf_sf_sde_regs *sde_regs; struct perf_sample_data data; - struct perf_raw_record raw; + struct perf_raw_record raw = { + .frag = { + .size = sfr->size, + .data = sfr, + }, + };
/* Setup perf sample */ perf_sample_data_init(&data, 0, event->hw.last_period); - raw.size = sfr->size; - raw.data = sfr; data.raw = &raw;
/* Setup pt_regs to look like an CPU-measurement external interrupt diff --combined drivers/net/ethernet/agere/et131x.c index 821d86c,cd7e2e5..c83ebae --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@@ -440,7 -440,6 +440,6 @@@ struct et131x_adapter struct net_device *netdev; struct pci_dev *pdev; struct mii_bus *mii_bus; - struct phy_device *phydev; struct napi_struct napi;
/* Flags that indicate current state of the adapter */ @@@ -864,7 -863,7 +863,7 @@@ static void et1310_config_mac_regs2(str { int32_t delay = 0; struct mac_regs __iomem *mac = &adapter->regs->mac; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; u32 cfg1; u32 cfg2; u32 ifctrl; @@@ -1035,7 -1034,7 +1034,7 @@@ static void et1310_setup_device_for_uni static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; u32 sa_lo; u32 sa_hi = 0; u32 pf_ctrl = 0; @@@ -1230,7 -1229,7 +1229,7 @@@ out
static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev;
if (!phydev) return -EIO; @@@ -1311,7 -1310,7 +1310,7 @@@ static void et1310_phy_read_mii_bit(str
static void et1310_config_flow_control(struct et131x_adapter *adapter) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev;
if (phydev->duplex == DUPLEX_HALF) { adapter->flow = FLOW_NONE; @@@ -1456,7 -1455,7 +1455,7 @@@ static int et131x_mdio_write(struct mii static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) { u16 data; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev;
et131x_mii_read(adapter, MII_BMCR, &data); data &= ~BMCR_PDOWN; @@@ -1469,7 -1468,7 +1468,7 @@@ static void et131x_xcvr_init(struct et131x_adapter *adapter) { u16 lcr2; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev;
/* Set the LED behavior such that LED 1 indicates speed (off = * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates @@@ -2111,7 -2110,7 +2110,7 @@@ static int et131x_init_recv(struct et13 /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev;
/* For version B silicon, we do not use the RxDMA timer for 10 and 100 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. @@@ -2426,7 -2425,7 +2425,7 @@@ static int nic_send_packet(struct et131 struct sk_buff *skb = tcb->skb; u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; dma_addr_t dma_addr; struct tx_ring *tx_ring = &adapter->tx_ring;
@@@ -2791,22 -2790,6 +2790,6 @@@ static void et131x_handle_send_pkts(str spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); }
- static int et131x_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) - { - struct et131x_adapter *adapter = netdev_priv(netdev); - - return phy_ethtool_gset(adapter->phydev, cmd); - } - - static int et131x_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) - { - struct et131x_adapter *adapter = netdev_priv(netdev); - - return phy_ethtool_sset(adapter->phydev, cmd); - } - static int et131x_get_regs_len(struct net_device *netdev) { #define ET131X_REGS_LEN 256 @@@ -2979,12 -2962,12 +2962,12 @@@ static void et131x_get_drvinfo(struct n }
static struct ethtool_ops et131x_ethtool_ops = { - .get_settings = et131x_get_settings, - .set_settings = et131x_set_settings, .get_drvinfo = et131x_get_drvinfo, .get_regs_len = et131x_get_regs_len, .get_regs = et131x_get_regs, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, };
/* et131x_hwaddr_init - set up the MAC Address */ @@@ -3098,7 -3081,7 +3081,7 @@@ err_out static void et131x_error_timer_handler(unsigned long data) { struct et131x_adapter *adapter = (struct et131x_adapter *)data; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) { /* Bring the device immediately out of coma, to @@@ -3168,7 -3151,7 +3151,7 @@@ static int et131x_adapter_memory_alloc( static void et131x_adjust_link(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = netdev->phydev;
if (!phydev) return; @@@ -3287,7 -3270,6 +3270,6 @@@ static int et131x_mii_probe(struct net_
phydev->advertising = phydev->supported; phydev->autoneg = AUTONEG_ENABLE; - adapter->phydev = phydev;
phy_attached_info(phydev);
@@@ -3323,7 -3305,7 +3305,7 @@@ static void et131x_pci_remove(struct pc
unregister_netdev(netdev); netif_napi_del(&adapter->napi); - phy_disconnect(adapter->phydev); + phy_disconnect(netdev->phydev); mdiobus_unregister(adapter->mii_bus); mdiobus_free(adapter->mii_bus);
@@@ -3338,20 -3320,16 +3320,16 @@@
static void et131x_up(struct net_device *netdev) { - struct et131x_adapter *adapter = netdev_priv(netdev); - et131x_enable_txrx(netdev); - phy_start(adapter->phydev); + phy_start(netdev->phydev); }
static void et131x_down(struct net_device *netdev) { - struct et131x_adapter *adapter = netdev_priv(netdev); - /* Save the timestamp for the TX watchdog, prevent a timeout */ netif_trans_update(netdev);
- phy_stop(adapter->phydev); + phy_stop(netdev->phydev); et131x_disable_txrx(netdev); }
@@@ -3684,12 -3662,10 +3662,10 @@@ static int et131x_close(struct net_devi static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) { - struct et131x_adapter *adapter = netdev_priv(netdev); - - if (!adapter->phydev) + if (!netdev->phydev) return -EINVAL;
- return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); + return phy_mii_ioctl(netdev->phydev, reqbuf, cmd); }
/* et131x_set_packet_filter - Configures the Rx Packet filtering */ @@@ -3851,7 -3827,7 +3827,7 @@@ static void et131x_tx_timeout(struct ne unsigned long flags;
/* If the device is closed, ignore the timeout */ - if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) + if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)) return;
/* Any nonrecoverable hardware error? @@@ -4073,7 -4049,7 +4049,7 @@@ out return rc;
err_phy_disconnect: - phy_disconnect(adapter->phydev); + phy_disconnect(netdev->phydev); err_mdio_unregister: mdiobus_unregister(adapter->mii_bus); err_mdio_free: diff --combined drivers/net/ethernet/aurora/nb8800.c index 1a3555d,0d3aa1d..0d4ea92 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@@ -259,7 -259,6 +259,7 @@@ static void nb8800_receive(struct net_d if (err) { netdev_err(dev, "rx buffer allocation failed\n"); dev->stats.rx_dropped++; + dev_kfree_skb(skb); return; }
@@@ -632,7 -631,7 +632,7 @@@ static void nb8800_mac_config(struct ne static void nb8800_pause_config(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; u32 rxcr;
if (priv->pause_aneg) { @@@ -665,7 -664,7 +665,7 @@@ static void nb8800_link_reconfigure(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; int change = 0;
if (phydev->link) { @@@ -691,7 -690,7 +691,7 @@@ }
if (change) - phy_print_status(priv->phydev); + phy_print_status(phydev); }
static void nb8800_update_mac_addr(struct net_device *dev) @@@ -936,9 -935,10 +936,10 @@@ static int nb8800_dma_stop(struct net_d static void nb8800_pause_adv(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; u32 adv = 0;
- if (!priv->phydev) + if (!phydev) return;
if (priv->pause_rx) @@@ -946,13 -946,14 +947,14 @@@ if (priv->pause_tx) adv ^= ADVERTISED_Asym_Pause;
- priv->phydev->supported |= adv; - priv->phydev->advertising |= adv; + phydev->supported |= adv; + phydev->advertising |= adv; }
static int nb8800_open(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev; int err;
/* clear any pending interrupts */ @@@ -970,10 -971,10 +972,10 @@@ nb8800_mac_rx(dev, true); nb8800_mac_tx(dev, true);
- priv->phydev = of_phy_connect(dev, priv->phy_node, - nb8800_link_reconfigure, 0, - priv->phy_mode); - if (!priv->phydev) + phydev = of_phy_connect(dev, priv->phy_node, + nb8800_link_reconfigure, 0, + priv->phy_mode); + if (!phydev) goto err_free_irq;
nb8800_pause_adv(dev); @@@ -983,7 -984,7 +985,7 @@@ netif_start_queue(dev);
nb8800_start_rx(dev); - phy_start(priv->phydev); + phy_start(phydev);
return 0;
@@@ -998,8 -999,9 +1000,9 @@@ err_free_dma static int nb8800_stop(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev;
- phy_stop(priv->phydev); + phy_stop(phydev);
netif_stop_queue(dev); napi_disable(&priv->napi); @@@ -1008,8 -1010,7 +1011,7 @@@ nb8800_mac_rx(dev, false); nb8800_mac_tx(dev, false);
- phy_disconnect(priv->phydev); - priv->phydev = NULL; + phy_disconnect(phydev);
free_irq(dev->irq, dev);
@@@ -1020,9 -1021,7 +1022,7 @@@
static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct nb8800_priv *priv = netdev_priv(dev); - - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); }
static const struct net_device_ops nb8800_netdev_ops = { @@@ -1036,34 -1035,14 +1036,14 @@@ .ndo_validate_addr = eth_validate_addr, };
- static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) - { - struct nb8800_priv *priv = netdev_priv(dev); - - if (!priv->phydev) - return -ENODEV; - - return phy_ethtool_gset(priv->phydev, cmd); - } - - static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) - { - struct nb8800_priv *priv = netdev_priv(dev); - - if (!priv->phydev) - return -ENODEV; - - return phy_ethtool_sset(priv->phydev, cmd); - } - static int nb8800_nway_reset(struct net_device *dev) { - struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev;
- if (!priv->phydev) + if (!phydev) return -ENODEV;
- return genphy_restart_aneg(priv->phydev); + return genphy_restart_aneg(phydev); }
static void nb8800_get_pauseparam(struct net_device *dev, @@@ -1080,6 -1059,7 +1060,7 @@@ static int nb8800_set_pauseparam(struc struct ethtool_pauseparam *pp) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev;
priv->pause_aneg = pp->autoneg; priv->pause_rx = pp->rx_pause; @@@ -1089,8 -1069,8 +1070,8 @@@
if (!priv->pause_aneg) nb8800_pause_config(dev); - else if (priv->phydev) - phy_start_aneg(priv->phydev); + else if (phydev) + phy_start_aneg(phydev);
return 0; } @@@ -1183,8 -1163,6 +1164,6 @@@ static void nb8800_get_ethtool_stats(st }
static const struct ethtool_ops nb8800_ethtool_ops = { - .get_settings = nb8800_get_settings, - .set_settings = nb8800_set_settings, .nway_reset = nb8800_nway_reset, .get_link = ethtool_op_get_link, .get_pauseparam = nb8800_get_pauseparam, @@@ -1192,6 -1170,8 +1171,8 @@@ .get_sset_count = nb8800_get_sset_count, .get_strings = nb8800_get_strings, .get_ethtool_stats = nb8800_get_ethtool_stats, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, };
static int nb8800_hw_init(struct net_device *dev) @@@ -1438,7 -1418,7 +1419,7 @@@ static int nb8800_probe(struct platform if (ops && ops->reset) { ret = ops->reset(dev); if (ret) - goto err_free_dev; + goto err_disable_clk; }
bus = devm_mdiobus_alloc(&pdev->dev); diff --combined drivers/net/ethernet/broadcom/bgmac.c index 25bbae5,13b0725..c4751ec --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@@ -6,51 -6,27 +6,27 @@@ * Licensed under the GNU/GPL. See COPYING for details. */
- #include "bgmac.h"
- #include <linux/kernel.h> - #include <linux/module.h> - #include <linux/delay.h> + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include <linux/bcma/bcma.h> #include <linux/etherdevice.h> - #include <linux/mii.h> - #include <linux/phy.h> - #include <linux/phy_fixed.h> - #include <linux/interrupt.h> - #include <linux/dma-mapping.h> #include <linux/bcm47xx_nvram.h> + #include "bgmac.h"
- static const struct bcma_device_id bgmac_bcma_tbl[] = { - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), - {}, - }; - MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); - - static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac) - { - switch (bgmac->core->bus->chipinfo.id) { - case BCMA_CHIP_ID_BCM4707: - case BCMA_CHIP_ID_BCM47094: - case BCMA_CHIP_ID_BCM53018: - return true; - default: - return false; - } - } - - static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, + static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, u32 value, int timeout) { u32 val; int i;
for (i = 0; i < timeout / 10; i++) { - val = bcma_read32(core, reg); + val = bgmac_read(bgmac, reg); if ((val & mask) == value) return true; udelay(10); } - pr_err("Timeout waiting for reg 0x%X\n", reg); + dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg); return false; }
@@@ -84,22 -60,22 +60,22 @@@ static void bgmac_dma_tx_reset(struct b udelay(10); } if (i) - bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", - ring->mmio_base, val); + dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", + ring->mmio_base, val);
/* Remove SUSPEND bit */ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); - if (!bgmac_wait_value(bgmac->core, + if (!bgmac_wait_value(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS, BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, 10000)) { - bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", - ring->mmio_base); + dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", + ring->mmio_base); udelay(300); val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) - bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n", + ring->mmio_base); } }
@@@ -109,7 -85,7 +85,7 @@@ static void bgmac_dma_tx_enable(struct u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); - if (bgmac->core->id.rev >= 4) { + if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) { ctl &= ~BGMAC_DMA_TX_BL_MASK; ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
@@@ -152,7 -128,7 +128,7 @@@ static netdev_tx_t bgmac_dma_tx_add(str struct bgmac_dma_ring *ring, struct sk_buff *skb) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct net_device *net_dev = bgmac->net_dev; int index = ring->end % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[index]; @@@ -161,7 -137,7 +137,7 @@@ int i;
if (skb->len > BGMAC_DESC_CTL1_LEN) { - bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); + netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); goto err_drop; }
@@@ -174,7 -150,7 +150,7 @@@ * even when ring->end overflows */ if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { - bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); + netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n"); netif_stop_queue(net_dev); return NETDEV_TX_BUSY; } @@@ -231,7 -207,7 +207,7 @@@ err_dma dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), DMA_TO_DEVICE);
- while (i > 0) { + while (i-- > 0) { int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[index]; u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); @@@ -241,18 -217,20 +217,20 @@@ }
err_dma_head: - bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", - ring->mmio_base); + netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n", + ring->mmio_base);
err_drop: dev_kfree_skb(skb); + net_dev->stats.tx_dropped++; + net_dev->stats.tx_errors++; return NETDEV_TX_OK; }
/* Free transmitted packets */ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; int empty_slot; bool freed = false; unsigned bytes_compl = 0, pkts_compl = 0; @@@ -285,6 -263,8 +263,8 @@@ DMA_TO_DEVICE);
if (slot->skb) { + bgmac->net_dev->stats.tx_bytes += slot->skb->len; + bgmac->net_dev->stats.tx_packets++; bytes_compl += slot->skb->len; pkts_compl++;
@@@ -313,12 -293,12 +293,12 @@@ static void bgmac_dma_rx_reset(struct b return;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); - if (!bgmac_wait_value(bgmac->core, + if (!bgmac_wait_value(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS, BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, 10000)) - bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n", + ring->mmio_base); }
static void bgmac_dma_rx_enable(struct bgmac *bgmac, @@@ -327,7 -307,7 +307,7 @@@ u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); - if (bgmac->core->id.rev >= 4) { + if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { ctl &= ~BGMAC_DMA_RX_BL_MASK; ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
@@@ -348,7 -328,7 +328,7 @@@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, struct bgmac_slot_info *slot) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; dma_addr_t dma_addr; struct bgmac_rx_header *rx; void *buf; @@@ -367,7 -347,7 +347,7 @@@ dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dma_addr)) { - bgmac_err(bgmac, "DMA mapping error\n"); + netdev_err(bgmac->net_dev, "DMA mapping error\n"); put_page(virt_to_head_page(buf)); return -ENOMEM; } @@@ -437,7 -417,7 +417,7 @@@ static int bgmac_dma_rx_read(struct bgm end_slot /= sizeof(struct bgmac_dma_desc);
while (ring->start != end_slot) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; struct sk_buff *skb; @@@ -462,16 -442,19 +442,19 @@@
/* Check for poison and drop or pass the packet */ if (len == 0xdead && flags == 0xbeef) { - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", - ring->start); + netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n", + ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; }
if (len > BGMAC_RX_ALLOC_SIZE) { - bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", - ring->start); + netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n", + ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_length_errors++; + bgmac->net_dev->stats.rx_errors++; break; }
@@@ -480,8 -463,9 +463,9 @@@
skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); if (unlikely(!skb)) { - bgmac_err(bgmac, "build_skb failed\n"); + netdev_err(bgmac->net_dev, "build_skb failed\n"); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; } skb_put(skb, BGMAC_RX_FRAME_OFFSET + @@@ -491,6 -475,8 +475,8 @@@
skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); + bgmac->net_dev->stats.rx_bytes += len; + bgmac->net_dev->stats.rx_packets++; napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); @@@ -534,7 -520,7 +520,7 @@@ static bool bgmac_dma_unaligned(struct static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_dma_desc *dma_desc = ring->cpu_base; struct bgmac_slot_info *slot; int i; @@@ -560,7 -546,7 +546,7 @@@ static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_slot_info *slot; int i;
@@@ -581,7 -567,7 +567,7 @@@ static void bgmac_dma_ring_desc_free(st struct bgmac_dma_ring *ring, int num_slots) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; int size;
if (!ring->cpu_base) @@@ -619,7 -605,7 +605,7 @@@ static void bgmac_dma_free(struct bgma
static int bgmac_dma_alloc(struct bgmac *bgmac) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_dma_ring *ring; static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; @@@ -630,8 -616,8 +616,8 @@@ BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
- if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { - bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); + if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) { + dev_err(bgmac->dev, "Core does not report 64-bit DMA\n"); return -ENOTSUPP; }
@@@ -645,8 -631,8 +631,8 @@@ &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { - bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", + ring->mmio_base); goto err_dma_free; }
@@@ -670,8 -656,8 +656,8 @@@ &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { - bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", + ring->mmio_base); err = -ENOMEM; goto err_dma_free; } @@@ -746,150 -732,6 +732,6 @@@ error return err; }
- /************************************************** - * PHY ops - **************************************************/ - - static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) - { - struct bcma_device *core; - u16 phy_access_addr; - u16 phy_ctl_addr; - u32 tmp; - - BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); - BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); - BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); - BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); - BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); - BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); - BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); - BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); - BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); - BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); - BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); - - if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bgmac->core->bus->drv_gmac_cmn.core; - phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; - phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; - } else { - core = bgmac->core; - phy_access_addr = BGMAC_PHY_ACCESS; - phy_ctl_addr = BGMAC_PHY_CNTL; - } - - tmp = bcma_read32(core, phy_ctl_addr); - tmp &= ~BGMAC_PC_EPA_MASK; - tmp |= phyaddr; - bcma_write32(core, phy_ctl_addr, tmp); - - tmp = BGMAC_PA_START; - tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; - tmp |= reg << BGMAC_PA_REG_SHIFT; - bcma_write32(core, phy_access_addr, tmp); - - if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { - bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n", - phyaddr, reg); - return 0xffff; - } - - return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; - } - - /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ - static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) - { - struct bcma_device *core; - u16 phy_access_addr; - u16 phy_ctl_addr; - u32 tmp; - - if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bgmac->core->bus->drv_gmac_cmn.core; - phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; - phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; - } else { - core = bgmac->core; - phy_access_addr = BGMAC_PHY_ACCESS; - phy_ctl_addr = BGMAC_PHY_CNTL; - } - - tmp = bcma_read32(core, phy_ctl_addr); - tmp &= ~BGMAC_PC_EPA_MASK; - tmp |= phyaddr; - bcma_write32(core, phy_ctl_addr, tmp); - - bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO); - if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) - bgmac_warn(bgmac, "Error setting MDIO int\n"); - - tmp = BGMAC_PA_START; - tmp |= BGMAC_PA_WRITE; - tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; - tmp |= reg << BGMAC_PA_REG_SHIFT; - tmp |= value; - bcma_write32(core, phy_access_addr, tmp); - - if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { - bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n", - phyaddr, reg); - return -ETIMEDOUT; - } - - return 0; - } - - /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ - static void bgmac_phy_init(struct bgmac *bgmac) - { - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; - struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; - u8 i; - - if (ci->id == BCMA_CHIP_ID_BCM5356) { - for (i = 0; i < 5; i++) { - bgmac_phy_write(bgmac, i, 0x1f, 0x008b); - bgmac_phy_write(bgmac, i, 0x15, 0x0100); - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x12, 0x2aaa); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - } - } - if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { - bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); - bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); - for (i = 0; i < 5; i++) { - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x16, 0x5284); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - bgmac_phy_write(bgmac, i, 0x17, 0x0010); - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x16, 0x5296); - bgmac_phy_write(bgmac, i, 0x17, 0x1073); - bgmac_phy_write(bgmac, i, 0x17, 0x9073); - bgmac_phy_write(bgmac, i, 0x16, 0x52b6); - bgmac_phy_write(bgmac, i, 0x17, 0x9273); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - } - } - } - - /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ - static void bgmac_phy_reset(struct bgmac *bgmac) - { - if (bgmac->phyaddr == BGMAC_PHY_NOREGS) - return; - - bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET); - udelay(100); - if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET) - bgmac_err(bgmac, "PHY reset failed\n"); - bgmac_phy_init(bgmac); - }
/************************************************** * Chip ops @@@ -903,14 -745,20 +745,20 @@@ static void bgmac_cmdcfg_maskset(struc { u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); u32 new_val = (cmdcfg & mask) | set; + u32 cmdcfg_sr; + + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
- bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev)); + bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr); udelay(2);
if (new_val != cmdcfg || force) bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev)); + bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr); udelay(2); }
@@@ -939,7 -787,7 +787,7 @@@ static void bgmac_chip_stats_update(str { int i;
- if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) { + if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) { for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) bgmac->mib_tx_regs[i] = bgmac_read(bgmac, @@@ -958,7 -806,7 +806,7 @@@ static void bgmac_clear_mib(struct bgma { int i;
- if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) + if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB) return;
bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); @@@ -988,7 -836,8 +836,8 @@@ static void bgmac_mac_speed(struct bgma set |= BGMAC_CMDCFG_ES_2500; break; default: - bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed); + dev_err(bgmac->dev, "Unsupported speed: %d\n", + bgmac->mac_speed); }
if (bgmac->mac_duplex == DUPLEX_HALF) @@@ -999,17 -848,16 +848,16 @@@
static void bgmac_miiconfig(struct bgmac *bgmac) { - struct bcma_device *core = bgmac->core; - u8 imode; - - if (bgmac_is_bcm4707_family(bgmac)) { - bcma_awrite32(core, BCMA_IOCTL, - bcma_aread32(core, BCMA_IOCTL) | 0x40 | - BGMAC_BCMA_IOCTL_SW_CLKEN); + if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) { + bgmac_idm_write(bgmac, BCMA_IOCTL, + bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 | + BGMAC_BCMA_IOCTL_SW_CLKEN); bgmac->mac_speed = SPEED_2500; bgmac->mac_duplex = DUPLEX_FULL; bgmac_mac_speed(bgmac); } else { + u8 imode; + imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; if (imode == 0 || imode == 1) { @@@ -1023,14 -871,11 +871,11 @@@ /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ static void bgmac_chip_reset(struct bgmac *bgmac) { - struct bcma_device *core = bgmac->core; - struct bcma_bus *bus = core->bus; - struct bcma_chipinfo *ci = &bus->chipinfo; - u32 flags; + u32 cmdcfg_sr; u32 iost; int i;
- if (bcma_core_is_enabled(core)) { + if (bgmac_clk_enabled(bgmac)) { if (!bgmac->stats_grabbed) { /* bgmac_chip_stats_update(bgmac); */ bgmac->stats_grabbed = true; @@@ -1048,38 -893,32 +893,32 @@@ /* TODO: Clear software multicast filter list */ }
- iost = bcma_aread32(core, BCMA_IOST); - if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) + iost = bgmac_idm_read(bgmac, BCMA_IOST); + if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) iost &= ~BGMAC_BCMA_IOST_ATTACHED;
/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ - if (ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM47094) { - flags = 0; + if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) { + u32 flags = 0; if (iost & BGMAC_BCMA_IOST_ATTACHED) { flags = BGMAC_BCMA_IOCTL_SW_CLKEN; if (!bgmac->has_robosw) flags |= BGMAC_BCMA_IOCTL_SW_RESET; } - bcma_core_enable(core, flags); + bgmac_clk_enable(bgmac, flags); }
/* Request Misc PLL for corerev > 2 */ - if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) { + if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) { bgmac_set(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); - bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, + bgmac_wait_value(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, 1000); }
- if (ci->id == BCMA_CHIP_ID_BCM5357 || - ci->id == BCMA_CHIP_ID_BCM4749 || - ci->id == BCMA_CHIP_ID_BCM53572) { - struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; + if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) { u8 et_swtype = 0; u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | BGMAC_CHIPCTL_1_IF_TYPE_MII; @@@ -1087,35 -926,37 +926,37 @@@
if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { if (kstrtou8(buf, 0, &et_swtype)) - bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", - buf); + dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n", + buf); et_swtype &= 0x0f; et_swtype <<= 4; sw_type = et_swtype; - } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) { + } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) { sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; - } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { + } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) { sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | BGMAC_CHIPCTL_1_SW_TYPE_RGMII; } - bcma_chipco_chipctl_maskset(cc, 1, - ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | - BGMAC_CHIPCTL_1_SW_TYPE_MASK), - sw_type); + bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | + BGMAC_CHIPCTL_1_SW_TYPE_MASK), + sw_type); }
if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) - bcma_awrite32(core, BCMA_IOCTL, - bcma_aread32(core, BCMA_IOCTL) & - ~BGMAC_BCMA_IOCTL_SW_RESET); + bgmac_idm_write(bgmac, BCMA_IOCTL, + bgmac_idm_read(bgmac, BCMA_IOCTL) & + ~BGMAC_BCMA_IOCTL_SW_RESET);
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to * be keps until taking MAC out of the reset. */ + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0; + bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE | @@@ -1133,19 -974,20 +974,20 @@@ BGMAC_CMDCFG_PROM | BGMAC_CMDCFG_NLC | BGMAC_CMDCFG_CFE | - BGMAC_CMDCFG_SR(core->id.rev), + cmdcfg_sr, false); bgmac->mac_speed = SPEED_UNKNOWN; bgmac->mac_duplex = DUPLEX_UNKNOWN;
bgmac_clear_mib(bgmac); - if (core->id.id == BCMA_CORE_4706_MAC_GBIT) - bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, - BCMA_GMAC_CMN_PC_MTE); + if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL) + bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0, + BCMA_GMAC_CMN_PC_MTE); else bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); bgmac_miiconfig(bgmac); - bgmac_phy_init(bgmac); + if (bgmac->mii_bus) + bgmac->mii_bus->reset(bgmac->mii_bus);
netdev_reset_queue(bgmac->net_dev); } @@@ -1164,50 -1006,51 +1006,51 @@@ static void bgmac_chip_intrs_off(struc /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ static void bgmac_enable(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; + u32 cmdcfg_sr; u32 cmdcfg; u32 mode; - u32 rxq_ctl; - u32 fl_ctl; - u16 bp_clk; - u8 mdp; + + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), - BGMAC_CMDCFG_SR(bgmac->core->id.rev), true); + cmdcfg_sr, true); udelay(2); cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE; bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; - if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0) + if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); - if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2) - bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0, - BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); - - switch (ci->id) { - case BCMA_CHIP_ID_BCM5357: - case BCMA_CHIP_ID_BCM4749: - case BCMA_CHIP_ID_BCM53572: - case BCMA_CHIP_ID_BCM4716: - case BCMA_CHIP_ID_BCM47162: - fl_ctl = 0x03cb04cb; - if (ci->id == BCMA_CHIP_ID_BCM5357 || - ci->id == BCMA_CHIP_ID_BCM4749 || - ci->id == BCMA_CHIP_ID_BCM53572) + if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) + bgmac_cco_ctl_maskset(bgmac, 1, ~0, + BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); + + if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 | + BGMAC_FEAT_FLW_CTRL2)) { + u32 fl_ctl; + + if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1) fl_ctl = 0x2300e1; + else + fl_ctl = 0x03cb04cb; + bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl); bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff); - break; }
- if (!bgmac_is_bcm4707_family(bgmac)) { + if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) { + u32 rxq_ctl; + u16 bp_clk; + u8 mdp; + rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; - bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / - 1000000; + bp_clk = bgmac_get_bus_clock(bgmac) / 1000000; mdp = (bp_clk * 128 / 1000) - 3; rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); @@@ -1251,7 -1094,7 +1094,7 @@@ static irqreturn_t bgmac_interrupt(int
int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX); if (int_status) - bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status); + dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
/* Disable new interrupts until handling existing ones */ bgmac_chip_intrs_off(bgmac); @@@ -1302,16 -1145,16 +1145,16 @@@ static int bgmac_open(struct net_devic /* Specs say about reclaiming rings here, but we do that in DMA init */ bgmac_chip_init(bgmac);
- err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, + err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED, KBUILD_MODNAME, net_dev); if (err < 0) { - bgmac_err(bgmac, "IRQ request error: %d!\n", err); + dev_err(bgmac->dev, "IRQ request error: %d!\n", err); bgmac_dma_cleanup(bgmac); return err; } napi_enable(&bgmac->napi);
- phy_start(bgmac->phy_dev); + phy_start(net_dev->phydev);
netif_start_queue(net_dev);
@@@ -1324,11 -1167,11 +1167,11 @@@ static int bgmac_stop(struct net_devic
netif_carrier_off(net_dev);
- phy_stop(bgmac->phy_dev); + phy_stop(net_dev->phydev);
napi_disable(&bgmac->napi); bgmac_chip_intrs_off(bgmac); - free_irq(bgmac->core->irq, net_dev); + free_irq(bgmac->irq, net_dev);
bgmac_chip_reset(bgmac); bgmac_dma_cleanup(bgmac); @@@ -1362,12 -1205,10 +1205,10 @@@ static int bgmac_set_mac_address(struc
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { - struct bgmac *bgmac = netdev_priv(net_dev); - if (!netif_running(net_dev)) return -EINVAL;
- return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd); + return phy_mii_ioctl(net_dev->phydev, ifr, cmd); }
static const struct net_device_ops bgmac_netdev_ops = { @@@ -1384,54 -1225,151 +1225,151 @@@ * ethtool_ops **************************************************/
- static int bgmac_get_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) + struct bgmac_stat { + u8 size; + u32 offset; + const char *name; + }; + + static struct bgmac_stat bgmac_get_strings_stats[] = { + { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" }, + { 4, BGMAC_TX_GOOD_PKTS, "tx_good" }, + { 8, BGMAC_TX_OCTETS, "tx_octets" }, + { 4, BGMAC_TX_PKTS, "tx_pkts" }, + { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" }, + { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" }, + { 4, BGMAC_TX_LEN_64, "tx_64" }, + { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" }, + { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" }, + { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" }, + { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" }, + { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" }, + { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" }, + { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" }, + { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" }, + { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" }, + { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" }, + { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" }, + { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" }, + { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" }, + { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" }, + { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" }, + { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" }, + { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" }, + { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" }, + { 4, BGMAC_TX_DEFERED, "tx_defered" }, + { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" }, + { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" }, + { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" }, + { 4, BGMAC_TX_Q0_PKTS, "tx_q0" }, + { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" }, + { 4, BGMAC_TX_Q1_PKTS, "tx_q1" }, + { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" }, + { 4, BGMAC_TX_Q2_PKTS, "tx_q2" }, + { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" }, + { 4, BGMAC_TX_Q3_PKTS, "tx_q3" }, + { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" }, + { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" }, + { 4, BGMAC_RX_GOOD_PKTS, "rx_good" }, + { 8, BGMAC_RX_OCTETS, "rx_octets" }, + { 4, BGMAC_RX_PKTS, "rx_pkts" }, + { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" }, + { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" }, + { 4, BGMAC_RX_LEN_64, "rx_64" }, + { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" }, + { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" }, + { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" }, + { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" }, + { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" }, + { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" }, + { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" }, + { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" }, + { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" }, + { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" }, + { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" }, + { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" }, + { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" }, + { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" }, + { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" }, + { 4, BGMAC_RX_CRC_ERRS, "rx_crc" }, + { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" }, + { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" }, + { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" }, + { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" }, + { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" }, + { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" }, + }; + + #define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats) + + static int bgmac_get_sset_count(struct net_device *dev, int string_set) { - struct bgmac *bgmac = netdev_priv(net_dev); + switch (string_set) { + case ETH_SS_STATS: + return BGMAC_STATS_LEN; + }
- return phy_ethtool_gset(bgmac->phy_dev, cmd); + return -EOPNOTSUPP; }
- static int bgmac_set_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) + static void bgmac_get_strings(struct net_device *dev, u32 stringset, + u8 *data) { - struct bgmac *bgmac = netdev_priv(net_dev); + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < BGMAC_STATS_LEN; i++) + strlcpy(data + i * ETH_GSTRING_LEN, + bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN); + } + + static void bgmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *ss, uint64_t *data) + { + struct bgmac *bgmac = netdev_priv(dev); + const struct bgmac_stat *s; + unsigned int i; + u64 val; + + if (!netif_running(dev)) + return;
- return phy_ethtool_sset(bgmac->phy_dev, cmd); + for (i = 0; i < BGMAC_STATS_LEN; i++) { + s = &bgmac_get_strings_stats[i]; + val = 0; + if (s->size == 8) + val = (u64)bgmac_read(bgmac, s->offset + 4) << 32; + val |= bgmac_read(bgmac, s->offset); + data[i] = val; + } }
static void bgmac_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info)); + strlcpy(info->bus_info, "AXI", sizeof(info->bus_info)); }
static const struct ethtool_ops bgmac_ethtool_ops = { - .get_settings = bgmac_get_settings, - .set_settings = bgmac_set_settings, + .get_strings = bgmac_get_strings, + .get_sset_count = bgmac_get_sset_count, + .get_ethtool_stats = bgmac_get_ethtool_stats, .get_drvinfo = bgmac_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, };
/************************************************** * MII **************************************************/
- static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) - { - return bgmac_phy_read(bus->priv, mii_id, regnum); - } - - static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, - u16 value) - { - return bgmac_phy_write(bus->priv, mii_id, regnum, value); - } - static void bgmac_adjust_link(struct net_device *net_dev) { struct bgmac *bgmac = netdev_priv(net_dev); - struct phy_device *phy_dev = bgmac->phy_dev; + struct phy_device *phy_dev = net_dev->phydev; bool update = false;
if (phy_dev->link) { @@@ -1452,7 -1390,7 +1390,7 @@@ } }
- static int bgmac_fixed_phy_register(struct bgmac *bgmac) + static int bgmac_phy_connect_direct(struct bgmac *bgmac) { struct fixed_phy_status fphy_status = { .link = 1, @@@ -1464,196 -1402,76 +1402,76 @@@
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); if (!phy_dev || IS_ERR(phy_dev)) { - bgmac_err(bgmac, "Failed to register fixed PHY device\n"); + dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; }
err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (err) { - bgmac_err(bgmac, "Connecting PHY failed\n"); + dev_err(bgmac->dev, "Connecting PHY failed\n"); return err; }
- bgmac->phy_dev = phy_dev; - return err; }
- static int bgmac_mii_register(struct bgmac *bgmac) + static int bgmac_phy_connect(struct bgmac *bgmac) { - struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; - int err = 0; - - if (bgmac_is_bcm4707_family(bgmac)) - return bgmac_fixed_phy_register(bgmac); - - mii_bus = mdiobus_alloc(); - if (!mii_bus) - return -ENOMEM; - - mii_bus->name = "bgmac mii bus"; - sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, - bgmac->core->core_unit); - mii_bus->priv = bgmac; - mii_bus->read = bgmac_mii_read; - mii_bus->write = bgmac_mii_write; - mii_bus->parent = &bgmac->core->dev; - mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - - err = mdiobus_register(mii_bus); - if (err) { - bgmac_err(bgmac, "Registration of mii bus failed\n"); - goto err_free_bus; - } - - bgmac->mii_bus = mii_bus;
/* Connect to the PHY */ - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, bgmac->phyaddr); phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phy_dev)) { - bgmac_err(bgmac, "PHY connection failed\n"); - err = PTR_ERR(phy_dev); - goto err_unregister_bus; + dev_err(bgmac->dev, "PHY connecton failed\n"); + return PTR_ERR(phy_dev); } - bgmac->phy_dev = phy_dev; - - return err;
- err_unregister_bus: - mdiobus_unregister(mii_bus); - err_free_bus: - mdiobus_free(mii_bus); - return err; - } - - static void bgmac_mii_unregister(struct bgmac *bgmac) - { - struct mii_bus *mii_bus = bgmac->mii_bus; - - mdiobus_unregister(mii_bus); - mdiobus_free(mii_bus); + return 0; }
- /************************************************** - * BCMA bus ops - **************************************************/ - - /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ - static int bgmac_probe(struct bcma_device *core) + int bgmac_enet_probe(struct bgmac *info) { struct net_device *net_dev; struct bgmac *bgmac; - struct ssb_sprom *sprom = &core->bus->sprom; - u8 *mac; int err;
- switch (core->core_unit) { - case 0: - mac = sprom->et0mac; - break; - case 1: - mac = sprom->et1mac; - break; - case 2: - mac = sprom->et2mac; - break; - default: - pr_err("Unsupported core_unit %d\n", core->core_unit); - return -ENOTSUPP; - } - - if (!is_valid_ether_addr(mac)) { - dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac); - eth_random_addr(mac); - dev_warn(&core->dev, "Using random MAC: %pM\n", mac); - } - - /* This (reset &) enable is not preset in specs or reference driver but - * Broadcom does it in arch PCI code when enabling fake PCI device. - */ - bcma_core_enable(core, 0); - /* Allocation and references */ net_dev = alloc_etherdev(sizeof(*bgmac)); if (!net_dev) return -ENOMEM; + net_dev->netdev_ops = &bgmac_netdev_ops; - net_dev->irq = core->irq; net_dev->ethtool_ops = &bgmac_ethtool_ops; bgmac = netdev_priv(net_dev); + memcpy(bgmac, info, sizeof(*bgmac)); bgmac->net_dev = net_dev; - bgmac->core = core; - bcma_set_drvdata(core, bgmac); - - /* Defaults */ - memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); - - /* On BCM4706 we need common core to access PHY */ - if (core->id.id == BCMA_CORE_4706_MAC_GBIT && - !core->bus->drv_gmac_cmn.core) { - bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n"); - err = -ENODEV; - goto err_netdev_free; - } - bgmac->cmn = core->bus->drv_gmac_cmn.core; - - switch (core->core_unit) { - case 0: - bgmac->phyaddr = sprom->et0phyaddr; - break; - case 1: - bgmac->phyaddr = sprom->et1phyaddr; - break; - case 2: - bgmac->phyaddr = sprom->et2phyaddr; - break; - } - bgmac->phyaddr &= BGMAC_PHY_MASK; - if (bgmac->phyaddr == BGMAC_PHY_MASK) { - bgmac_err(bgmac, "No PHY found\n"); - err = -ENODEV; - goto err_netdev_free; + net_dev->irq = bgmac->irq; + SET_NETDEV_DEV(net_dev, bgmac->dev); + + if (!is_valid_ether_addr(bgmac->mac_addr)) { + dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", + bgmac->mac_addr); + eth_random_addr(bgmac->mac_addr); + dev_warn(bgmac->dev, "Using random MAC: %pM\n", + bgmac->mac_addr); } - bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr, - bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); + ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
- if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { - bgmac_err(bgmac, "PCI setup not implemented\n"); - err = -ENOTSUPP; - goto err_netdev_free; - } + /* This (reset &) enable is not preset in specs or reference driver but + * Broadcom does it in arch PCI code when enabling fake PCI device. + */ + bgmac_clk_enable(bgmac, 0);
bgmac_chip_reset(bgmac);
- /* For Northstar, we have to take all GMAC core out of reset */ - if (bgmac_is_bcm4707_family(bgmac)) { - struct bcma_device *ns_core; - int ns_gmac; - - /* Northstar has 4 GMAC cores */ - for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) { - /* As Northstar requirement, we have to reset all GMACs - * before accessing one. bgmac_chip_reset() call - * bcma_core_enable() for this core. Then the other - * three GMACs didn't reset. We do it here. - */ - ns_core = bcma_find_core_unit(core->bus, - BCMA_CORE_MAC_GBIT, - ns_gmac); - if (ns_core && !bcma_core_is_enabled(ns_core)) - bcma_core_enable(ns_core, 0); - } - } - err = bgmac_dma_alloc(bgmac); if (err) { - bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); + dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); goto err_netdev_free; }
@@@ -1661,22 -1479,14 +1479,14 @@@ if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- /* TODO: reset the external phy. Specs are needed */ - bgmac_phy_reset(bgmac); - - bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & - BGMAC_BFL_ENETROBO); - if (bgmac->has_robosw) - bgmac_warn(bgmac, "Support for Roboswitch not implemented\n"); - - if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) - bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); - netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
- err = bgmac_mii_register(bgmac); + if (!bgmac->mii_bus) + err = bgmac_phy_connect_direct(bgmac); + else + err = bgmac_phy_connect(bgmac); if (err) { - bgmac_err(bgmac, "Cannot register MDIO\n"); + dev_err(bgmac->dev, "Cannot connect to phy\n"); goto err_dma_free; }
@@@ -1686,64 -1496,34 +1496,34 @@@
err = register_netdev(bgmac->net_dev); if (err) { - bgmac_err(bgmac, "Cannot register net device\n"); - goto err_mii_unregister; + dev_err(bgmac->dev, "Cannot register net device\n"); + goto err_phy_disconnect; }
netif_carrier_off(net_dev);
return 0;
- err_mii_unregister: - bgmac_mii_unregister(bgmac); + err_phy_disconnect: + phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); - err_netdev_free: - bcma_set_drvdata(core, NULL); free_netdev(net_dev);
return err; } + EXPORT_SYMBOL_GPL(bgmac_enet_probe);
- static void bgmac_remove(struct bcma_device *core) + void bgmac_enet_remove(struct bgmac *bgmac) { - struct bgmac *bgmac = bcma_get_drvdata(core); - unregister_netdev(bgmac->net_dev); - bgmac_mii_unregister(bgmac); + phy_disconnect(bgmac->net_dev->phydev); netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); - bcma_set_drvdata(core, NULL); free_netdev(bgmac->net_dev); } - - static struct bcma_driver bgmac_bcma_driver = { - .name = KBUILD_MODNAME, - .id_table = bgmac_bcma_tbl, - .probe = bgmac_probe, - .remove = bgmac_remove, - }; - - static int __init bgmac_init(void) - { - int err; - - err = bcma_driver_register(&bgmac_bcma_driver); - if (err) - return err; - pr_info("Broadcom 47xx GBit MAC driver loaded\n"); - - return 0; - } - - static void __exit bgmac_exit(void) - { - bcma_driver_unregister(&bgmac_bcma_driver); - } - - module_init(bgmac_init) - module_exit(bgmac_exit) + EXPORT_SYMBOL_GPL(bgmac_enet_remove);
MODULE_AUTHOR("Rafał Miłecki"); MODULE_LICENSE("GPL"); diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1b0ae4a,492c06b..b83e174 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@@ -56,6 -56,8 +56,8 @@@ static int bnxt_get_coalesce(struct net coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
+ coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; + return 0; }
@@@ -63,6 -65,7 +65,7 @@@ static int bnxt_set_coalesce(struct net struct ethtool_coalesce *coal) { struct bnxt *bp = netdev_priv(dev); + bool update_stats = false; int rc = 0;
bp->rx_coal_ticks = coal->rx_coalesce_usecs; @@@ -76,8 -79,26 +79,26 @@@ bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
- if (netif_running(dev)) - rc = bnxt_hwrm_set_coal(bp); + if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { + u32 stats_ticks = coal->stats_block_coalesce_usecs; + + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); + stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); + bp->stats_coal_ticks = stats_ticks; + update_stats = true; + } + + if (netif_running(dev)) { + if (update_stats) { + rc = bnxt_close_nic(bp, true, false); + if (!rc) + rc = bnxt_open_nic(bp, true, false); + } else { + rc = bnxt_hwrm_set_coal(bp); + } + }
return rc; } @@@ -341,9 -362,13 +362,13 @@@ static void bnxt_get_channels(struct ne channel->max_other = 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) { channel->combined_count = bp->rx_nr_rings; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + channel->combined_count--; } else { - channel->rx_count = bp->rx_nr_rings; - channel->tx_count = bp->tx_nr_rings_per_tc; + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { + channel->rx_count = bp->rx_nr_rings; + channel->tx_count = bp->tx_nr_rings_per_tc; + } } }
@@@ -366,6 -391,10 +391,10 @@@ static int bnxt_set_channels(struct net (channel->rx_count || channel->tx_count)) return -EINVAL;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || + channel->tx_count)) + return -EINVAL; + if (channel->combined_count) sh = true;
@@@ -628,7 -657,66 +657,66 @@@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw return speed_mask; }
- static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) + #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ + { \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 100baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 1000baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 10000baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 25000baseCR_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 40000baseCR4_Full);\ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 50000baseCR2_Full);\ + if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + Pause); \ + if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ + ethtool_link_ksettings_add_link_mode( \ + lk_ksettings, name, Asym_Pause);\ + } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + Asym_Pause); \ + } \ + } + + #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ + { \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100baseT_Full) || \ + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100baseT_Half)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 1000baseT_Full) || \ + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 1000baseT_Half)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 10000baseT_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 25000baseCR_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 40000baseCR4_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 50000baseCR2_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ + } + + static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->auto_link_speeds; u8 fw_pause = 0; @@@ -636,10 -724,11 +724,11 @@@ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) fw_pause = link_info->auto_pause_setting;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); }
- static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) + static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->lp_auto_link_speeds; u8 fw_pause = 0; @@@ -647,16 -736,24 +736,24 @@@ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) fw_pause = link_info->lp_pause;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, + lp_advertising); }
- static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) + static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->support_speeds; - u32 supported;
- supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); - return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); + + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Asym_Pause); + + if (link_info->support_auto_speeds) + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Autoneg); }
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) @@@ -683,65 -780,62 +780,62 @@@ } }
- static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + static int bnxt_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *lk_ksettings) { struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - u16 ethtool_speed; + struct ethtool_link_settings *base = &lk_ksettings->base; + u32 ethtool_speed;
- cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); - - if (link_info->auto_link_speeds) - cmd->supported |= SUPPORTED_Autoneg; + ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); if (link_info->autoneg) { - cmd->advertising = - bnxt_fw_to_ethtool_advertised_spds(link_info); - cmd->advertising |= ADVERTISED_Autoneg; - cmd->autoneg = AUTONEG_ENABLE; + bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); + ethtool_link_ksettings_add_link_mode(lk_ksettings, + advertising, Autoneg); + base->autoneg = AUTONEG_ENABLE; if (link_info->phy_link_status == BNXT_LINK_LINK) - cmd->lp_advertising = - bnxt_fw_to_ethtool_lp_adv(link_info); + bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); if (!netif_carrier_ok(dev)) - cmd->duplex = DUPLEX_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + base->duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + base->duplex = DUPLEX_HALF; } else { - cmd->autoneg = AUTONEG_DISABLE; - cmd->advertising = 0; + base->autoneg = AUTONEG_DISABLE; ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); - cmd->duplex = DUPLEX_HALF; + base->duplex = DUPLEX_HALF; if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + base->duplex = DUPLEX_FULL; } - ethtool_cmd_speed_set(cmd, ethtool_speed); + base->speed = ethtool_speed;
- cmd->port = PORT_NONE; + base->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { - cmd->port = PORT_TP; - cmd->supported |= SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; + base->port = PORT_TP; + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + TP); + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, + TP); } else { - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, + FIBRE);
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) - cmd->port = PORT_DA; + base->port = PORT_DA; else if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) - cmd->port = PORT_FIBRE; + base->port = PORT_FIBRE; } - - if (link_info->transceiver == - PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) - cmd->transceiver = XCVR_INTERNAL; - else - cmd->transceiver = XCVR_EXTERNAL; - cmd->phy_address = link_info->phy_addr; + base->phy_address = link_info->phy_addr;
return 0; } @@@ -815,37 -909,25 +909,25 @@@ u16 bnxt_get_fw_auto_link_speeds(u32 ad return fw_speed_mask; }
- static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + static int bnxt_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *lk_ksettings) { - int rc = 0; struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; + const struct ethtool_link_settings *base = &lk_ksettings->base; u32 speed, fw_advertising = 0; bool set_pause = false; + int rc = 0;
- if (BNXT_VF(bp)) - return rc; - - if (cmd->autoneg == AUTONEG_ENABLE) { - u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info); + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP;
- if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | - ADVERTISED_TP | ADVERTISED_FIBRE)) { - netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", - cmd->advertising); - rc = -EINVAL; - goto set_setting_exit; - } - fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising); - if (fw_advertising & ~link_info->support_speeds) { - netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n", - cmd->advertising); - rc = -EINVAL; - goto set_setting_exit; - } + if (base->autoneg == AUTONEG_ENABLE) { + BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, + advertising); link_info->autoneg |= BNXT_AUTONEG_SPEED; if (!fw_advertising) - link_info->advertising = link_info->support_speeds; + link_info->advertising = link_info->support_auto_speeds; else link_info->advertising = fw_advertising; /* any change to autoneg will cause link change, therefore the @@@ -863,16 -945,12 +945,12 @@@ rc = -EINVAL; goto set_setting_exit; } - /* TODO: currently don't support half duplex */ - if (cmd->duplex == DUPLEX_HALF) { + if (base->duplex == DUPLEX_HALF) { netdev_err(dev, "HALF DUPLEX is not supported!\n"); rc = -EINVAL; goto set_setting_exit; } - /* If received a request for an unknown duplex, assume full*/ - if (cmd->duplex == DUPLEX_UNKNOWN) - cmd->duplex = DUPLEX_FULL; - speed = ethtool_cmd_speed(cmd); + speed = base->speed; fw_speed = bnxt_get_fw_speed(dev, speed); if (!fw_speed) { rc = -EINVAL; @@@ -911,8 -989,8 +989,8 @@@ static int bnxt_set_pauseparam(struct n struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info;
- if (BNXT_VF(bp)) - return rc; + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP;
if (epause->autoneg) { if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) @@@ -1010,6 -1088,8 +1088,8 @@@ static int bnxt_firmware_reset(struct n case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + /* Self-reset APE upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: @@@ -1043,9 -1123,27 +1123,27 @@@ static int bnxt_flash_firmware(struct n case BNX_DIR_TYPE_BOOTCODE_2: code_type = CODE_BOOT; break; + case BNX_DIR_TYPE_CHIMP_PATCH: + code_type = CODE_CHIMP_PATCH; + break; case BNX_DIR_TYPE_APE_FW: code_type = CODE_MCTP_PASSTHRU; break; + case BNX_DIR_TYPE_APE_PATCH: + code_type = CODE_APE_PATCH; + break; + case BNX_DIR_TYPE_KONG_FW: + code_type = CODE_KONG_FW; + break; + case BNX_DIR_TYPE_KONG_PATCH: + code_type = CODE_KONG_PATCH; + break; + case BNX_DIR_TYPE_BONO_FW: + code_type = CODE_BONO_FW; + break; + case BNX_DIR_TYPE_BONO_PATCH: + code_type = CODE_BONO_PATCH; + break; default: netdev_err(dev, "Unsupported directory entry type: %u\n", dir_type); @@@ -1100,6 -1198,8 +1198,8 @@@ static bool bnxt_dir_type_is_ape_bin_fo case BNX_DIR_TYPE_APE_PATCH: case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: return true; }
@@@ -1137,7 -1237,8 +1237,8 @@@ static int bnxt_flash_firmware_from_fil const struct firmware *fw; int rc;
- if (bnxt_dir_type_is_executable(dir_type) == false) + if (dir_type != BNX_DIR_TYPE_UPDATE && + bnxt_dir_type_is_executable(dir_type) == false) return -EINVAL;
rc = request_firmware(&fw, filename, &dev->dev); @@@ -1433,8 -1534,8 +1534,8 @@@ static int bnxt_set_eee(struct net_devi _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); int rc = 0;
- if (BNXT_VF(bp)) - return 0; + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP)) return -EOPNOTSUPP; @@@ -1591,7 -1692,7 +1692,7 @@@ static int bnxt_get_module_eeprom(struc { struct bnxt *bp = netdev_priv(dev); u16 start = eeprom->offset, length = eeprom->len; - int rc; + int rc = 0;
memset(data, 0, eeprom->len);
@@@ -1618,8 -1719,8 +1719,8 @@@ }
const struct ethtool_ops bnxt_ethtool_ops = { - .get_settings = bnxt_get_settings, - .set_settings = bnxt_set_settings, + .get_link_ksettings = bnxt_get_link_ksettings, + .set_link_ksettings = bnxt_set_link_ksettings, .get_pauseparam = bnxt_get_pauseparam, .set_pauseparam = bnxt_set_pauseparam, .get_drvinfo = bnxt_get_drvinfo, diff --combined drivers/net/ethernet/ethoc.c index 4466a11,37a39b4..c044667 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@@ -192,7 -192,6 +192,6 @@@ MODULE_PARM_DESC(buffer_size, "DMA buff * @napi: NAPI structure * @msg_enable: device state flags * @lock: device lock - * @phy: attached PHY * @mdio: MDIO bus for PHY access * @phy_id: address of attached PHY */ @@@ -219,7 -218,6 +218,6 @@@ struct ethoc
spinlock_t lock;
- struct phy_device *phy; struct mii_bus *mdio; struct clk *clk; s8 phy_id; @@@ -694,7 -692,6 +692,6 @@@ static int ethoc_mdio_probe(struct net_ return err; }
- priv->phy = phy; phy->advertising &= ~(ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half); phy->supported &= ~(SUPPORTED_1000baseT_Full | @@@ -724,7 -721,7 +721,7 @@@ static int ethoc_open(struct net_devic netif_start_queue(dev); }
- phy_start(priv->phy); + phy_start(dev->phydev); napi_enable(&priv->napi);
if (netif_msg_ifup(priv)) { @@@ -741,8 -738,8 +738,8 @@@ static int ethoc_stop(struct net_devic
napi_disable(&priv->napi);
- if (priv->phy) - phy_stop(priv->phy); + if (dev->phydev) + phy_stop(dev->phydev);
ethoc_disable_rx_and_tx(priv); free_irq(dev->irq, dev); @@@ -770,7 -767,7 +767,7 @@@ static int ethoc_ioctl(struct net_devic if (!phy) return -ENODEV; } else { - phy = priv->phy; + phy = dev->phydev; }
return phy_mii_ioctl(phy, ifr, cmd); @@@ -860,11 -857,6 +857,11 @@@ static netdev_tx_t ethoc_start_xmit(str unsigned int entry; void *dest;
+ if (skb_put_padto(skb, ETHOC_ZLEN)) { + dev->stats.tx_errors++; + goto out_no_free; + } + if (unlikely(skb->len > ETHOC_BUFSIZ)) { dev->stats.tx_errors++; goto out; @@@ -899,32 -891,9 +896,10 @@@ skb_tx_timestamp(skb); out: dev_kfree_skb(skb); +out_no_free: return NETDEV_TX_OK; }
- static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) - { - struct ethoc *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phy; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_gset(phydev, cmd); - } - - static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) - { - struct ethoc *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phy; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_sset(phydev, cmd); - } - static int ethoc_get_regs_len(struct net_device *netdev) { return ETH_END; @@@ -989,14 -958,14 +964,14 @@@ static int ethoc_set_ringparam(struct n }
const struct ethtool_ops ethoc_ethtool_ops = { - .get_settings = ethoc_get_settings, - .set_settings = ethoc_set_settings, .get_regs_len = ethoc_get_regs_len, .get_regs = ethoc_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = ethoc_get_ringparam, .set_ringparam = ethoc_set_ringparam, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, };
static const struct net_device_ops ethoc_netdev_ops = { @@@ -1092,7 -1061,7 +1067,7 @@@ static int ethoc_probe(struct platform_ if (!priv->iobase) { dev_err(&pdev->dev, "cannot remap I/O memory space\n"); ret = -ENXIO; - goto error; + goto free; }
if (netdev->mem_end) { @@@ -1101,7 -1070,7 +1076,7 @@@ if (!priv->membase) { dev_err(&pdev->dev, "cannot remap memory space\n"); ret = -ENXIO; - goto error; + goto free; } } else { /* Allocate buffer memory */ @@@ -1112,7 -1081,7 +1087,7 @@@ dev_err(&pdev->dev, "cannot allocate %dB buffer\n", buffer_size); ret = -ENOMEM; - goto error; + goto free; } netdev->mem_end = netdev->mem_start + buffer_size; priv->dma_alloc = buffer_size; @@@ -1126,7 -1095,7 +1101,7 @@@ 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); if (num_bd < 4) { ret = -ENODEV; - goto error; + goto free; } priv->num_bd = num_bd; /* num_tx must be a power of two */ @@@ -1139,7 -1108,7 +1114,7 @@@ priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL); if (!priv->vma) { ret = -ENOMEM; - goto error; + goto free; }
/* Allow the platform setup code to pass in a MAC address. */ @@@ -1267,8 -1236,7 +1242,7 @@@ static int ethoc_remove(struct platform
if (netdev) { netif_napi_del(&priv->napi); - phy_disconnect(priv->phy); - priv->phy = NULL; + phy_disconnect(netdev->phydev);
if (priv->mdio) { mdiobus_unregister(priv->mdio); diff --combined drivers/net/ethernet/ezchip/nps_enet.c index 9b7a3f5,25faa3d..f928e6f --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@@ -24,6 -24,14 +24,14 @@@
#define DRV_NAME "nps_mgt_enet"
+ static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv) + { + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; + + return (!tx_ctrl_ct && priv->tx_skb); + } + static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len) { struct nps_enet_priv *priv = netdev_priv(ndev); @@@ -46,16 -54,17 +54,17 @@@ static void nps_enet_read_rx_fifo(struc if (dst_is_aligned) { ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len); reg += len; - } - else { /* !dst_is_aligned */ + } else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); + put_unaligned_be32(buf, reg); } } /* copy last bytes (if any) */ if (last) { u32 buf; + ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1); memcpy((u8 *)reg, &buf, last); } @@@ -140,12 -149,11 +149,11 @@@ static void nps_enet_tx_handler(struct { struct nps_enet_priv *priv = netdev_priv(ndev); u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT; u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
/* Check if we got TX */ - if (!priv->tx_skb || tx_ctrl_ct) + if (!nps_enet_is_tx_pending(priv)) return;
/* Ack Tx ctrl register */ @@@ -183,9 -191,6 +191,6 @@@ static int nps_enet_poll(struct napi_st work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { u32 buf_int_enable_value = 0; - u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = - (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
napi_complete(napi);
@@@ -204,8 -209,7 +209,7 @@@ * the two code lines below will solve this situation by * re-adding ourselves to the poll list. */ - - if (priv->tx_skb && !tx_ctrl_ct) { + if (nps_enet_is_tx_pending(priv)) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); napi_reschedule(napi); } @@@ -230,11 -234,9 +234,9 @@@ static irqreturn_t nps_enet_irq_handler struct net_device *ndev = dev_instance; struct nps_enet_priv *priv = netdev_priv(ndev); u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); - u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
- if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr) + if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); @@@ -285,7 -287,6 +287,7 @@@ static void nps_enet_hw_reset(struct ne ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); usleep_range(10, 20); + ge_rst_value = 0; nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */ @@@ -460,7 -461,6 +462,6 @@@ static void nps_enet_set_rx_mode(struc | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; - }
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value); diff --combined drivers/net/ethernet/freescale/fec.h index dc71a88,92fd5c0..4e98b24 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@@ -442,8 -442,8 +442,10 @@@ struct bufdesc_ex #define FEC_QUIRK_SINGLE_MDIO (1 << 11) /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +/* Interrupt doesn't wake CPU from deep idle */ +#define FEC_QUIRK_ERR006687 (1 << 13) + /* Controller supports interrupt coalesc */ -#define FEC_QUIRK_HAS_COALESCE (1 << 13) ++#define FEC_QUIRK_HAS_COALESCE (1 << 14)
struct bufdesc_prop { int qid; diff --combined drivers/net/ethernet/freescale/fec_main.c index d9ecc30,4040003..01f7e81 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -60,7 -60,6 +60,7 @@@ #include <linux/if_vlan.h> #include <linux/pinctrl/consumer.h> #include <linux/prefetch.h> +#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@@ -112,7 -111,13 +112,13 @@@ static struct platform_device_id fec_de FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + }, { + .name = "imx6ul-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, }, { /* sentinel */ } @@@ -126,6 -131,7 +132,7 @@@ enum imx_fec_type IMX6Q_FEC, MVF600_FEC, IMX6SX_FEC, + IMX6UL_FEC, };
static const struct of_device_id fec_dt_ids[] = { @@@ -135,6 -141,7 +142,7 @@@ { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@@ -2359,9 -2366,6 +2367,6 @@@ static void fec_enet_itr_coal_set(struc struct fec_enet_private *fep = netdev_priv(ndev); int rx_itr, tx_itr;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) - return; - /* Must be greater than zero to avoid unpredictable behavior */ if (!fep->rx_time_itr || !fep->rx_pkts_itr || !fep->tx_time_itr || !fep->tx_pkts_itr) @@@ -2384,10 -2388,12 +2389,12 @@@
writel(tx_itr, fep->hwp + FEC_TXIC0); writel(rx_itr, fep->hwp + FEC_RXIC0); - writel(tx_itr, fep->hwp + FEC_TXIC1); - writel(rx_itr, fep->hwp + FEC_RXIC1); - writel(tx_itr, fep->hwp + FEC_TXIC2); - writel(rx_itr, fep->hwp + FEC_RXIC2); + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); + } }
static int @@@ -2395,7 -2401,7 +2402,7 @@@ fec_enet_get_coalesce(struct net_devic { struct fec_enet_private *fep = netdev_priv(ndev);
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr; @@@ -2413,7 -2419,7 +2420,7 @@@ fec_enet_set_coalesce(struct net_devic struct fec_enet_private *fep = netdev_priv(ndev); unsigned int cycle;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) { @@@ -2819,9 -2825,6 +2826,9 @@@ fec_enet_open(struct net_device *ndev if (ret) goto err_enet_mii_probe;
+ if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@@ -2857,9 -2860,6 +2864,9 @@@ fec_enet_close(struct net_device *ndev
phy_disconnect(ndev->phydev);
+ if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_unused(); + fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); @@@ -3198,7 -3198,12 +3205,12 @@@ static void fec_reset_phy(struct platfo dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } - msleep(msec); + + if (msec > 20) + msleep(msec); + else + usleep_range(msec * 1000, msec * 1000 + 1000); + gpio_set_value_cansleep(phy_reset, !active_high); } #else /* CONFIG_OF */ @@@ -3299,11 -3304,6 +3311,11 @@@ fec_probe(struct platform_device *pdev
platform_set_drvdata(pdev, ndev);
+ if ((of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6dl")) && + !of_property_read_bool(np, "fsl,err006687-workaround-present")) + fep->quirks |= FEC_QUIRK_ERR006687; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
diff --combined drivers/net/ethernet/intel/e1000e/netdev.c index 0382de0,41f32c0..02f4439 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@@ -4352,7 -4352,8 +4352,8 @@@ static cycle_t e1000e_cyclecounter_read
time_delta = systim_next - systim; temp = time_delta; - rem = do_div(temp, incvalue); + /* VMWare users have seen incvalue of zero, don't div / 0 */ + rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
systim = systim_next;
@@@ -7329,7 -7330,8 +7330,7 @@@ err_flashmap err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@@ -7396,7 -7398,8 +7397,7 @@@ static void e1000_remove(struct pci_de if ((adapter->hw.flash_address) && (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
free_netdev(netdev);
diff --combined drivers/net/ethernet/intel/fm10k/fm10k_pci.c index a5bccc80,b8245c7..774a565 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@@ -123,11 -123,24 +123,24 @@@ static void fm10k_service_timer(unsigne static void fm10k_detach_subtask(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; + u32 __iomem *hw_addr; + u32 value;
/* do nothing if device is still present or hw_addr is set */ if (netif_device_present(netdev) || interface->hw.hw_addr) return;
+ /* check the real address space to see if we've recovered */ + hw_addr = READ_ONCE(interface->uc_addr); + value = readl(hw_addr); + if ((~value)) { + interface->hw.hw_addr = interface->uc_addr; + netif_device_attach(netdev); + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + netdev_warn(netdev, "PCIe link restored, device now attached\n"); + return; + } + rtnl_lock();
if (netif_running(netdev)) @@@ -136,11 -149,9 +149,9 @@@ rtnl_unlock(); }
- static void fm10k_reinit(struct fm10k_intfc *interface) + static void fm10k_prepare_for_reset(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; - struct fm10k_hw *hw = &interface->hw; - int err;
WARN_ON(in_interrupt());
@@@ -165,6 -176,19 +176,19 @@@ /* delay any future reset requests */ interface->last_reset = jiffies + (10 * HZ);
+ rtnl_unlock(); + } + + static int fm10k_handle_reset(struct fm10k_intfc *interface) + { + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + rtnl_lock(); + + pci_set_master(interface->pdev); + /* reset and initialize the hardware so it is in a known state */ err = hw->mac.ops.reset_hw(hw); if (err) { @@@ -185,7 -209,7 +209,7 @@@ goto reinit_err; }
- /* reassociate interrupts */ + /* re-associate interrupts */ err = fm10k_mbx_request_irq(interface); if (err) goto err_mbx_irq; @@@ -219,7 -243,7 +243,7 @@@
clear_bit(__FM10K_RESETTING, &interface->state);
- return; + return err; err_open: fm10k_mbx_free_irq(interface); err_mbx_irq: @@@ -230,6 -254,20 +254,20 @@@ reinit_err rtnl_unlock();
clear_bit(__FM10K_RESETTING, &interface->state); + + return err; + } + + static void fm10k_reinit(struct fm10k_intfc *interface) + { + int err; + + fm10k_prepare_for_reset(interface); + + err = fm10k_handle_reset(interface); + if (err) + dev_err(&interface->pdev->dev, + "fm10k_handle_reset failed: %d\n", err); }
static void fm10k_reset_subtask(struct fm10k_intfc *interface) @@@ -372,12 -410,19 +410,19 @@@ void fm10k_update_stats(struct fm10k_in u64 bytes, pkts; int i;
+ /* ensure only one thread updates stats at a time */ + if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) + return; + /* do not allow stats update via service task for next second */ interface->next_stats_update = jiffies + HZ;
/* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { - struct fm10k_ring *tx_ring = interface->tx_ring[i]; + struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]); + + if (!tx_ring) + continue;
restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; @@@ -396,7 -441,10 +441,10 @@@
/* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { - struct fm10k_ring *rx_ring = interface->rx_ring[i]; + struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]); + + if (!rx_ring) + continue;
bytes += rx_ring->stats.bytes; pkts += rx_ring->stats.packets; @@@ -443,6 -491,8 +491,8 @@@ /* Fill out the OS statistics structure */ net_stats->rx_errors = rx_errors; net_stats->rx_dropped = interface->stats.nodesc_drop.count; + + clear_bit(__FM10K_UPDATING_STATS, &interface->state); }
/** @@@ -1566,6 -1616,9 +1616,9 @@@ void fm10k_up(struct fm10k_intfc *inter /* configure interrupts */ hw->mac.ops.update_int_moderator(hw);
+ /* enable statistics capture again */ + clear_bit(__FM10K_UPDATING_STATS, &interface->state); + /* clear down bit to indicate we are ready to go */ clear_bit(__FM10K_DOWN, &interface->state);
@@@ -1598,10 -1651,11 +1651,11 @@@ void fm10k_down(struct fm10k_intfc *int { struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; - int err; + int err, i = 0, count = 0;
/* signal that we are down to the interrupt handler and service task */ - set_bit(__FM10K_DOWN, &interface->state); + if (test_and_set_bit(__FM10K_DOWN, &interface->state)) + return;
/* call carrier off first to avoid false dev_watchdog timeouts */ netif_carrier_off(netdev); @@@ -1613,18 -1667,57 +1667,57 @@@ /* reset Rx filters */ fm10k_reset_rx_state(interface);
- /* allow 10ms for device to quiesce */ - usleep_range(10000, 20000); - /* disable polling routines */ fm10k_napi_disable_all(interface);
/* capture stats one last time before stopping interface */ fm10k_update_stats(interface);
+ /* prevent updating statistics while we're down */ + while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) + usleep_range(1000, 2000); + + /* skip waiting for TX DMA if we lost PCIe link */ + if (FM10K_REMOVED(hw->hw_addr)) + goto skip_tx_dma_drain; + + /* In some rare circumstances it can take a while for Tx queues to + * quiesce and be fully disabled. Attempt to .stop_hw() first, and + * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop + * until the Tx queues have emptied, or until a number of retries. If + * we fail to clear within the retry loop, we will issue a warning + * indicating that Tx DMA is probably hung. Note this means we call + * .stop_hw() twice but this shouldn't cause any problems. + */ + err = hw->mac.ops.stop_hw(hw); + if (err != FM10K_ERR_REQUESTS_PENDING) + goto skip_tx_dma_drain; + + #define TX_DMA_DRAIN_RETRIES 25 + for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) { + usleep_range(10000, 20000); + + /* start checking at the last ring to have pending Tx */ + for (; i < interface->num_tx_queues; i++) + if (fm10k_get_tx_pending(interface->tx_ring[i])) + break; + + /* if all the queues are drained, we can break now */ + if (i == interface->num_tx_queues) + break; + } + + if (count >= TX_DMA_DRAIN_RETRIES) + dev_err(&interface->pdev->dev, + "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n", + count); + skip_tx_dma_drain: /* Disable DMA engine for Tx/Rx */ err = hw->mac.ops.stop_hw(hw); - if (err) + if (err == FM10K_ERR_REQUESTS_PENDING) + dev_err(&interface->pdev->dev, + "due to pending requests hw was not shut down gracefully\n"); + else if (err) dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
/* free any buffers still on the rings */ @@@ -1750,6 -1843,7 +1843,7 @@@ static int fm10k_sw_init(struct fm10k_i
/* Start off interface as being down */ set_bit(__FM10K_DOWN, &interface->state); + set_bit(__FM10K_UPDATING_STATS, &interface->state);
return 0; } @@@ -1869,7 -1963,10 +1963,7 @@@ static int fm10k_probe(struct pci_dev * goto err_dma; }
- err = pci_request_selected_regions(pdev, - pci_select_bars(pdev, - IORESOURCE_MEM), - fm10k_driver_name); + err = pci_request_mem_regions(pdev, fm10k_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed: %d\n", err); @@@ -1973,7 -2070,8 +2067,7 @@@ err_sw_init err_ioremap: free_netdev(netdev); err_alloc_netdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@@ -2021,13 -2119,56 +2115,55 @@@ static void fm10k_remove(struct pci_de
free_netdev(netdev);
- pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev); }
+ static void fm10k_prepare_suspend(struct fm10k_intfc *interface) + { + /* the watchdog task reads from registers, which might appear like + * a surprise remove if the PCIe device is disabled while we're + * stopped. We stop the watchdog task until after we resume software + * activity. + */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + cancel_work_sync(&interface->service_task); + + fm10k_prepare_for_reset(interface); + } + + static int fm10k_handle_resume(struct fm10k_intfc *interface) + { + struct fm10k_hw *hw = &interface->hw; + int err; + + /* reset statistics starting values */ + hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + + err = fm10k_handle_reset(interface); + if (err) + return err; + + /* assume host is not ready, to prevent race with watchdog in case we + * actually don't have connection to the switch + */ + interface->host_ready = false; + fm10k_watchdog_host_not_ready(interface); + + /* force link to stay down for a second to prevent link flutter */ + interface->link_down_event = jiffies + (HZ); + set_bit(__FM10K_LINK_DOWN, &interface->state); + + /* clear the service task disable bit to allow service task to start */ + clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + fm10k_service_event_schedule(interface); + + return err; + } + #ifdef CONFIG_PM /** * fm10k_resume - Restore device to pre-sleep state @@@ -2064,60 -2205,13 +2200,13 @@@ static int fm10k_resume(struct pci_dev /* refresh hw_addr in case it was dropped */ hw->hw_addr = interface->uc_addr;
- /* reset hardware to known state */ - err = hw->mac.ops.init_hw(&interface->hw); - if (err) { - dev_err(&pdev->dev, "init_hw failed: %d\n", err); - return err; - } - - /* reset statistics starting values */ - hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - - rtnl_lock(); - - err = fm10k_init_queueing_scheme(interface); - if (err) - goto err_queueing_scheme; - - err = fm10k_mbx_request_irq(interface); - if (err) - goto err_mbx_irq; - - err = fm10k_hw_ready(interface); + err = fm10k_handle_resume(interface); if (err) - goto err_open; - - err = netif_running(netdev) ? fm10k_open(netdev) : 0; - if (err) - goto err_open; - - rtnl_unlock(); - - /* assume host is not ready, to prevent race with watchdog in case we - * actually don't have connection to the switch - */ - interface->host_ready = false; - fm10k_watchdog_host_not_ready(interface); - - /* clear the service task disable bit to allow service task to start */ - clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); - fm10k_service_event_schedule(interface); - - /* restore SR-IOV interface */ - fm10k_iov_resume(pdev); + return err;
netif_device_attach(netdev);
return 0; - err_open: - fm10k_mbx_free_irq(interface); - err_mbx_irq: - fm10k_clear_queueing_scheme(interface); - err_queueing_scheme: - rtnl_unlock(); - - return err; }
/** @@@ -2137,27 -2231,7 +2226,7 @@@ static int fm10k_suspend(struct pci_de
netif_device_detach(netdev);
- fm10k_iov_suspend(pdev); - - /* the watchdog tasks may read registers, which will appear like a - * surprise-remove event once the PCI device is disabled. This will - * cause us to close the netdevice, so we don't retain the open/closed - * state post-resume. Prevent this by disabling the service task while - * suspended, until we actually resume. - */ - set_bit(__FM10K_SERVICE_DISABLE, &interface->state); - cancel_work_sync(&interface->service_task); - - rtnl_lock(); - - if (netif_running(netdev)) - fm10k_close(netdev); - - fm10k_mbx_free_irq(interface); - - fm10k_clear_queueing_scheme(interface); - - rtnl_unlock(); + fm10k_prepare_suspend(interface);
err = pci_save_state(pdev); if (err) @@@ -2190,17 -2264,7 +2259,7 @@@ static pci_ers_result_t fm10k_io_error_ if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT;
- rtnl_lock(); - - if (netif_running(netdev)) - fm10k_close(netdev); - - fm10k_mbx_free_irq(interface); - - /* free interrupts */ - fm10k_clear_queueing_scheme(interface); - - rtnl_unlock(); + fm10k_prepare_suspend(interface);
/* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@@ -2214,7 -2278,6 +2273,6 @@@ */ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) { - struct fm10k_intfc *interface = pci_get_drvdata(pdev); pci_ers_result_t result;
if (pci_enable_device_mem(pdev)) { @@@ -2232,12 -2295,6 +2290,6 @@@
pci_wake_from_d3(pdev, false);
- /* refresh hw_addr in case it was dropped */ - interface->hw.hw_addr = interface->uc_addr; - - interface->flags |= FM10K_FLAG_RESET_REQUESTED; - fm10k_service_event_schedule(interface); - result = PCI_ERS_RESULT_RECOVERED; }
@@@ -2257,50 -2314,54 +2309,54 @@@ static void fm10k_io_resume(struct pci_ { struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev; - struct fm10k_hw *hw = &interface->hw; - int err = 0; - - /* reset hardware to known state */ - err = hw->mac.ops.init_hw(&interface->hw); - if (err) { - dev_err(&pdev->dev, "init_hw failed: %d\n", err); - return; - } - - /* reset statistics starting values */ - hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - - rtnl_lock(); + int err;
- err = fm10k_init_queueing_scheme(interface); - if (err) { - dev_err(&interface->pdev->dev, - "init_queueing_scheme failed: %d\n", err); - goto unlock; - } + err = fm10k_handle_resume(interface);
- /* reassociate interrupts */ - fm10k_mbx_request_irq(interface); + if (err) + dev_warn(&pdev->dev, + "fm10k_io_resume failed: %d\n", err); + else + netif_device_attach(netdev); + }
- rtnl_lock(); - if (netif_running(netdev)) - err = fm10k_open(netdev); - rtnl_unlock(); + /** + * fm10k_io_reset_notify - called when PCI function is reset + * @pdev: Pointer to PCI device + * + * This callback is called when the PCI function is reset such as from + * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it + * means we should prepare for a function reset. If prepare is false, it means + * the function reset just occurred. + */ + static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare) + { + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + int err = 0;
- /* final check of hardware state before registering the interface */ - err = err ? : fm10k_hw_ready(interface); + if (prepare) { + /* warn incase we have any active VF devices */ + if (pci_num_vf(pdev)) + dev_warn(&pdev->dev, + "PCIe FLR may cause issues for any active VF devices\n");
- if (!err) - netif_device_attach(netdev); + fm10k_prepare_suspend(interface); + } else { + err = fm10k_handle_resume(interface); + }
- unlock: - rtnl_unlock(); + if (err) { + dev_warn(&pdev->dev, + "fm10k_io_reset_notify failed: %d\n", err); + netif_device_detach(interface->netdev); + } }
static const struct pci_error_handlers fm10k_err_handler = { .error_detected = fm10k_io_error_detected, .slot_reset = fm10k_io_slot_reset, .resume = fm10k_io_resume, + .reset_notify = fm10k_io_reset_notify, };
static struct pci_driver fm10k_driver = { diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index eb09f60,2b11405..aceb2bb --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -31,12 -31,7 +31,7 @@@ /* Local includes */ #include "i40e.h" #include "i40e_diag.h" - #if IS_ENABLED(CONFIG_VXLAN) - #include <net/vxlan.h> - #endif - #if IS_ENABLED(CONFIG_GENEVE) - #include <net/geneve.h> - #endif + #include <net/udp_tunnel.h>
const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@@ -45,8 -40,8 +40,8 @@@ #define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1 - #define DRV_VERSION_MINOR 5 - #define DRV_VERSION_BUILD 16 + #define DRV_VERSION_MINOR 6 + #define DRV_VERSION_BUILD 4 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@@ -1344,13 -1339,6 +1339,13 @@@ struct i40e_mac_filter *i40e_add_filter if (!vsi || !macaddr) return NULL;
+ /* Do not allow broadcast filter to be added since broadcast filter + * is added as part of add VSI for any newly created VSI except + * FDIR VSI + */ + if (is_broadcast_ether_addr(macaddr)) + return NULL; + f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); @@@ -1591,14 -1579,8 +1586,8 @@@ static void i40e_vsi_setup_queue_map(st vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ - /* In MFP case we can have a much lower count of MSIx - * vectors available and so we need to lower the used - * q count. - */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) - qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); - else - qcount = vsi->alloc_queue_pairs; + qcount = vsi->alloc_queue_pairs; + num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
@@@ -1852,8 -1834,10 +1841,10 @@@ int i40e_sync_vsi_filters(struct i40e_v { struct list_head tmp_del_list, tmp_add_list; struct i40e_mac_filter *f, *ftmp, *fclone; + struct i40e_hw *hw = &vsi->back->hw; bool promisc_forced_on = false; bool add_happened = false; + char vsi_name[16] = "PF"; int filter_list_len = 0; u32 changed_flags = 0; i40e_status aq_ret = 0; @@@ -1881,6 -1865,11 +1872,11 @@@ INIT_LIST_HEAD(&tmp_del_list); INIT_LIST_HEAD(&tmp_add_list);
+ if (vsi->type == I40E_VSI_SRIOV) + snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); + else if (vsi->type != I40E_VSI_MAIN) + snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
@@@ -1932,7 -1921,7 +1928,7 @@@ if (!list_empty(&tmp_del_list)) { int del_list_size;
- filter_list_len = pf->hw.aq.asq_buf_size / + filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); @@@ -1964,21 -1953,21 +1960,21 @@@
/* flush a full buffer */ if (num_del == filter_list_len) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, - del_list, - num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_ret = + i40e_aq_remove_macvlan(hw, vsi->seid, + del_list, + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0; memset(del_list, 0, del_list_size);
if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { retval = -EIO; dev_err(&pf->pdev->dev, - "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + "ignoring delete macvlan error on %s, err %s, aq_err %s while flushing a full buffer\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); } } /* Release memory for MAC filter entries which were @@@ -1989,17 -1978,17 +1985,17 @@@ }
if (num_del) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, - del_list, num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0;
if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + "ignoring delete macvlan error on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); }
kfree(del_list); @@@ -2010,7 -1999,7 +2006,7 @@@ int add_list_size;
/* do all the adds now */ - filter_list_len = pf->hw.aq.asq_buf_size / + filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data), add_list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); @@@ -2045,10 -2034,10 +2041,10 @@@
/* flush a full buffer */ if (num_add == filter_list_len) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_err = hw->aq.asq_last_status; num_add = 0;
if (aq_ret) @@@ -2063,9 -2052,9 +2059,9 @@@ }
if (num_add) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_err = hw->aq.asq_last_status; num_add = 0; } kfree(add_list); @@@ -2074,16 -2063,18 +2070,18 @@@ if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { retval = i40e_aq_rc_to_posix(aq_ret, aq_err); dev_info(&pf->pdev->dev, - "add filter failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && + "add filter failed on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); + if ((hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { promisc_forced_on = true; set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); - dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); + dev_info(&pf->pdev->dev, "promiscuous mode forced on %s\n", + vsi_name); } } } @@@ -2105,12 -2096,12 +2103,12 @@@ NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multi promisc failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "set multi promisc failed on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); } } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { @@@ -2129,35 -2120,72 +2127,60 @@@ */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + if (cur_promisc) + aq_ret = + i40e_aq_set_default_vsi(hw, + vsi->seid, + NULL); + else + aq_ret = + i40e_aq_clear_default_vsi(hw, + vsi->seid, + NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + hw->aq.asq_last_status); + dev_info(&pf->pdev->dev, + "Set default VSI failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + } } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL, true); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set unicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set unicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set multicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } } - aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); - if (aq_ret) { - retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); - dev_info(&pf->pdev->dev, - "set brdcast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, - hw->aq.asq_last_status)); - } } out: /* if something went wrong then set the changed flag so we try again */ @@@ -3947,6 -3975,7 +3970,7 @@@ static void i40e_vsi_free_irq(struct i4 /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(pf->msix_entries[vector].vector, NULL); + synchronize_irq(pf->msix_entries[vector].vector); free_irq(pf->msix_entries[vector].vector, vsi->q_vectors[i]);
@@@ -4953,7 -4982,6 +4977,6 @@@ static void i40e_dcb_reconfigure(struc if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } - i40e_notify_client_of_l2_param_changes(pf->vsi[v]); } }
@@@ -5178,12 -5206,6 +5201,6 @@@ static void i40e_vsi_reinit_locked(stru usleep_range(1000, 2000); i40e_down(vsi);
- /* Give a VF some time to respond to the reset. The - * two second wait is based upon the watchdog cycle in - * the VF driver. - */ - if (vsi->type == I40E_VSI_SRIOV) - msleep(2000); i40e_up(vsi); clear_bit(__I40E_CONFIG_BUSY, &pf->state); } @@@ -5226,6 -5248,9 +5243,9 @@@ void i40e_down(struct i40e_vsi *vsi i40e_clean_tx_ring(vsi->tx_rings[i]); i40e_clean_rx_ring(vsi->rx_rings[i]); } + + i40e_notify_client_of_netdev_close(vsi, false); + }
/** @@@ -5337,14 -5362,7 +5357,7 @@@ int i40e_open(struct net_device *netdev TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
- #ifdef CONFIG_I40E_VXLAN - vxlan_get_rx_port(netdev); - #endif - #ifdef CONFIG_I40E_GENEVE - if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) - geneve_get_rx_port(netdev); - #endif - + udp_tunnel_get_rx_info(netdev); i40e_notify_client_of_netdev_open(vsi);
return 0; @@@ -5711,6 -5729,8 +5724,8 @@@ static int i40e_handle_lldp_event(struc i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); + /* Notify the client for the DCB changes */ + i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); }
exit: @@@ -5935,7 -5955,6 +5950,6 @@@ static void i40e_fdir_flush_and_replay( if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } - }
/** @@@ -7052,7 -7071,6 +7066,6 @@@ static void i40e_handle_mdd_event(struc **/ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { - #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; @@@ -7087,7 -7105,6 +7100,6 @@@ } } } - #endif }
/** @@@ -7169,7 -7186,7 +7181,7 @@@ static int i40e_set_num_rings_in_vsi(st vsi->alloc_queue_pairs = 1; vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); - vsi->num_q_vectors = 1; + vsi->num_q_vectors = pf->num_fdsb_msix; break;
case I40E_VSI_VMDQ2: @@@ -7553,9 -7570,11 +7565,11 @@@ static int i40e_init_msix(struct i40e_p /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (vectors_left) { + pf->num_fdsb_msix = 1; v_budget++; vectors_left--; } else { + pf->num_fdsb_msix = 0; pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; } } @@@ -7721,11 -7740,10 +7735,11 @@@ * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector * @vsi: the VSI being configured * @v_idx: index of the vector in the vsi struct + * @cpu: cpu to be used on affinity_mask * * We allocate one q_vector. If allocation fails we return -ENOMEM. **/ -static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) +static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu) { struct i40e_q_vector *q_vector;
@@@ -7736,8 -7754,7 +7750,8 @@@
q_vector->vsi = vsi; q_vector->v_idx = v_idx; - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + if (vsi->netdev) netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll, NAPI_POLL_WEIGHT); @@@ -7761,7 -7778,8 +7775,7 @@@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - int v_idx, num_q_vectors; - int err; + int err, v_idx, num_q_vectors, current_cpu;
/* if not MSIX, give the one vector only to the LAN VSI */ if (pf->flags & I40E_FLAG_MSIX_ENABLED) @@@ -7771,15 -7789,10 +7785,15 @@@ else return -EINVAL;
+ current_cpu = cpumask_first(cpu_online_mask); + for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { - err = i40e_vsi_alloc_q_vector(vsi, v_idx); + err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu); if (err) goto err_out; + current_cpu = cpumask_next(current_cpu, cpu_online_mask); + if (unlikely(current_cpu >= nr_cpu_ids)) + current_cpu = cpumask_first(cpu_online_mask); }
return 0; @@@ -8580,7 -8593,9 +8594,9 @@@ bool i40e_set_ntuple(struct i40e_pf *pf /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; - pf->flags |= I40E_FLAG_FD_SB_ENABLED; + /* enable FD_SB only if there is MSI-X vector */ + if (pf->num_fdsb_msix > 0) + pf->flags |= I40E_FLAG_FD_SB_ENABLED; } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@@ -8629,7 -8644,6 +8645,6 @@@ static int i40e_set_features(struct net return 0; }
- #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure @@@ -8649,21 -8663,18 +8664,18 @@@ static u8 i40e_get_udp_port_idx(struct return i; }
- #endif - - #if IS_ENABLED(CONFIG_VXLAN) /** - * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ - static void i40e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + static void i40e_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 next_idx; u8 idx;
@@@ -8671,7 -8682,7 +8683,7 @@@
/* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "vxlan port %d already offloaded\n", + netdev_info(netdev, "port %d already offloaded\n", ntohs(port)); return; } @@@ -8680,131 -8691,75 +8692,75 @@@ next_idx = i40e_get_udp_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", - ntohs(port)); - return; - } - - /* New port: add it and mark its index in the bitmap */ - pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; - pf->pending_udp_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - } - - /** - * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away - * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to - **/ - static void i40e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) - { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 idx; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - } else { - netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", - ntohs(port)); - } - } - #endif - - #if IS_ENABLED(CONFIG_GENEVE) - /** - * i40e_add_geneve_port - Get notifications about GENEVE ports that come up - * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: New UDP port number that GENEVE started listening to - **/ - static void i40e_add_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) - { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 next_idx; - u8 idx; - - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "udp port %d already offloaded\n", + netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", ntohs(port)); return; }
- /* Now check if there is space to add the new port */ - next_idx = i40e_get_udp_port_idx(pf, 0); - - if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; + break; + default: return; }
/* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); }
/** - * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: UDP port number that GENEVE stopped listening to + * @ti: Tunnel endpoint information **/ - static void i40e_del_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + static void i40e_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 idx;
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) + goto not_found;
- dev_info(&pf->pdev->dev, "deleting geneve port %d\n", - ntohs(port)); - } else { - netdev_warn(netdev, "geneve port %d was not found, not deleting\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) + goto not_found; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) + goto not_found; + break; + default: + goto not_found; } + + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + return; + not_found: + netdev_warn(netdev, "UDP port %d was not found, not deleting\n", + ntohs(port)); } - #endif
static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) @@@ -9034,14 -8989,8 +8990,8 @@@ static const struct net_device_ops i40e .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, .ndo_set_vf_trust = i40e_ndo_set_vf_trust, - #if IS_ENABLED(CONFIG_VXLAN) - .ndo_add_vxlan_port = i40e_add_vxlan_port, - .ndo_del_vxlan_port = i40e_del_vxlan_port, - #endif - #if IS_ENABLED(CONFIG_GENEVE) - .ndo_add_geneve_port = i40e_add_geneve_port, - .ndo_del_geneve_port = i40e_del_geneve_port, - #endif + .ndo_udp_tunnel_add = i40e_udp_tunnel_add, + .ndo_udp_tunnel_del = i40e_udp_tunnel_del, .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, @@@ -9225,7 -9174,6 +9175,7 @@@ int i40e_is_vsi_uplink_mode_veb(struct static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; + i40e_status aq_ret = 0; u8 laa_macaddr[ETH_ALEN]; bool found_laa_mac_filter = false; struct i40e_pf *pf = vsi->back; @@@ -9415,18 -9363,6 +9365,18 @@@ vsi->seid = ctxt.seid; vsi->id = ctxt.vsi_number; } + /* Except FDIR VSI, for all othet VSI set the broadcast filter */ + if (vsi->type != I40E_VSI_FDIR) { + aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL); + if (aq_ret) { + ret = i40e_aq_rc_to_posix(aq_ret, + hw->aq.asq_last_status); + dev_info(&pf->pdev->dev, + "set brdcast promisc failed, err %s, aq_err %s\n", + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + } + }
spin_lock_bh(&vsi->mac_filter_list_lock); /* If macvlan filters already exist, force them to get loaded */ @@@ -10147,14 -10083,14 +10097,14 @@@ void i40e_veb_release(struct i40e_veb * static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; - bool is_default = veb->pf->cur_promisc; bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret;
- /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, - veb->enabled_tc, is_default, + veb->enabled_tc, false, &veb->seid, enable_stats, NULL); + + /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", @@@ -10703,12 -10639,8 +10653,8 @@@ static void i40e_print_features(struct } if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += snprintf(&buf[i], REMAIN(i), " DCB"); - #if IS_ENABLED(CONFIG_VXLAN) i += snprintf(&buf[i], REMAIN(i), " VxLAN"); - #endif - #if IS_ENABLED(CONFIG_GENEVE) i += snprintf(&buf[i], REMAIN(i), " Geneve"); - #endif if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE @@@ -10783,7 -10715,8 +10729,7 @@@ static int i40e_probe(struct pci_dev *p }
/* set up pci connections */ - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), i40e_driver_name); + err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); @@@ -11280,7 -11213,8 +11226,7 @@@ err_ioremap kfree(pf); err_pf_alloc: pci_disable_pcie_error_reporting(pdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@@ -11391,7 -11325,8 +11337,7 @@@ static void i40e_remove(struct pci_dev
iounmap(hw->hw_addr); kfree(pf); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@@ -11536,6 -11471,7 +11482,7 @@@ static int i40e_suspend(struct pci_dev { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; + int retval = 0;
set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); @@@ -11547,10 -11483,16 +11494,16 @@@ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_stop_misc_vector(pf); + + retval = pci_save_state(pdev); + if (retval) + return retval; + pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot);
- return 0; + return retval; }
/** diff --combined drivers/net/ethernet/intel/igb/igb_main.c index 1c96fe8,9bcba42..942a89f --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -2027,7 -2027,8 +2027,8 @@@ void igb_reset(struct igb_adapter *adap wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
/* Re-enable PTP, where applicable. */ - igb_ptp_reset(adapter); + if (adapter->ptp_flags & IGB_PTP_ENABLED) + igb_ptp_reset(adapter);
igb_get_phy_info(hw); } @@@ -2323,7 -2324,9 +2324,7 @@@ static int igb_probe(struct pci_dev *pd } }
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), - igb_driver_name); + err = pci_request_mem_regions(pdev, igb_driver_name); if (err) goto err_pci_reg;
@@@ -2747,7 -2750,8 +2748,7 @@@ err_sw_init err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@@ -2912,7 -2916,8 +2913,7 @@@ static void igb_remove(struct pci_dev * pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
kfree(adapter->shadow_vfta); free_netdev(netdev); @@@ -6851,12 -6856,12 +6852,12 @@@ static bool igb_can_reuse_rx_page(struc **/ static bool igb_add_rx_frag(struct igb_ring *rx_ring, struct igb_rx_buffer *rx_buffer, + unsigned int size, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else @@@ -6908,6 -6913,7 +6909,7 @@@ static struct sk_buff *igb_fetch_rx_buf union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); struct igb_rx_buffer *rx_buffer; struct page *page;
@@@ -6943,11 -6949,11 +6945,11 @@@ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - IGB_RX_BUFSZ, + size, DMA_FROM_DEVICE);
/* pull page into skb */ - if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@@ -7523,6 -7529,8 +7525,8 @@@ static int __igb_shutdown(struct pci_de if (netif_running(netdev)) __igb_close(netdev, true);
+ igb_ptp_suspend(adapter); + igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index baf4e46,918b94b..faddd8f --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -50,7 -50,7 +50,7 @@@ #include <linux/if_bridge.h> #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> - #include <net/vxlan.h> + #include <net/udp_tunnel.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> @@@ -2887,7 -2887,7 +2887,7 @@@ int ixgbe_poll(struct napi_struct *napi if (!test_bit(__IXGBE_DOWN, &adapter->state)) ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
- return 0; + return min(work_done, budget - 1); }
/** @@@ -5722,9 -5722,7 +5722,7 @@@ static int ixgbe_sw_init(struct ixgbe_a #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif - #ifdef CONFIG_IXGBE_VXLAN adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; - #endif break; default: break; @@@ -6158,9 -6156,7 +6156,7 @@@ int ixgbe_open(struct net_device *netde ixgbe_up_complete(adapter);
ixgbe_clear_vxlan_port(adapter); - #ifdef CONFIG_IXGBE_VXLAN - vxlan_get_rx_port(netdev); - #endif + udp_tunnel_get_rx_info(netdev);
return 0;
@@@ -7262,14 -7258,12 +7258,12 @@@ static void ixgbe_service_task(struct w ixgbe_service_event_complete(adapter); return; } - #ifdef CONFIG_IXGBE_VXLAN - rtnl_lock(); if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; - vxlan_get_rx_port(adapter->netdev); + udp_tunnel_get_rx_info(adapter->netdev); + rtnl_unlock(); } - rtnl_unlock(); - #endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@@ -7697,7 -7691,6 +7691,6 @@@ static void ixgbe_atr(struct ixgbe_rin /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); - #ifdef CONFIG_IXGBE_VXLAN if (skb->encapsulation && first->protocol == htons(ETH_P_IP) && hdr.ipv4->protocol != IPPROTO_UDP) { @@@ -7708,7 -7701,6 +7701,6 @@@ udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); } - #endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */ switch (hdr.ipv4->version) { @@@ -8308,14 -8300,53 +8300,53 @@@ int ixgbe_setup_tc(struct net_device *d static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls) { + u32 hdl = cls->knode.handle; u32 uhtid = TC_U32_USERHTID(cls->knode.handle); - u32 loc; - int err; + u32 loc = cls->knode.handle & 0xfffff; + int err = 0, i, j; + struct ixgbe_jump_table *jump = NULL; + + if (loc > IXGBE_MAX_HW_ENTRIES) + return -EINVAL;
if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) return -EINVAL;
- loc = cls->knode.handle & 0xfffff; + /* Clear this filter in the link data it is associated with */ + if (uhtid != 0x800) { + jump = adapter->jump_tables[uhtid]; + if (!jump) + return -EINVAL; + if (!test_bit(loc - 1, jump->child_loc_map)) + return -EINVAL; + clear_bit(loc - 1, jump->child_loc_map); + } + + /* Check if the filter being deleted is a link */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + jump = adapter->jump_tables[i]; + if (jump && jump->link_hdl == hdl) { + /* Delete filters in the hardware in the child hash + * table associated with this link + */ + for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { + if (!test_bit(j, jump->child_loc_map)) + continue; + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, + NULL, + j + 1); + spin_unlock(&adapter->fdir_perfect_lock); + clear_bit(j, jump->child_loc_map); + } + /* Remove resources for this link */ + kfree(jump->input); + kfree(jump->mask); + kfree(jump); + adapter->jump_tables[i] = NULL; + return err; + } + }
spin_lock(&adapter->fdir_perfect_lock); err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); @@@ -8549,6 -8580,18 +8580,18 @@@ static int ixgbe_configure_clsu32(struc if (!test_bit(link_uhtid - 1, &adapter->tables)) return err;
+ /* Multiple filters as links to the same hash table are not + * supported. To add a new filter with the same next header + * but different match/jump conditions, create a new hash table + * and link to it. + */ + if (adapter->jump_tables[link_uhtid] && + (adapter->jump_tables[link_uhtid])->link_hdl) { + e_err(drv, "Link filter exists for link: %x\n", + link_uhtid); + return err; + } + for (i = 0; nexthdr[i].jump; i++) { if (nexthdr[i].o != cls->knode.sel->offoff || nexthdr[i].s != cls->knode.sel->offshift || @@@ -8570,6 -8613,8 +8613,8 @@@ } jump->input = input; jump->mask = mask; + jump->link_hdl = cls->knode.handle; + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, &nexthdr[i]); if (!err) { @@@ -8597,6 -8642,20 +8642,20 @@@ if ((adapter->jump_tables[uhtid])->mask) memcpy(mask, (adapter->jump_tables[uhtid])->mask, sizeof(*mask)); + + /* Lookup in all child hash tables if this location is already + * filled with a filter + */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + struct ixgbe_jump_table *link = adapter->jump_tables[i]; + + if (link && (test_bit(loc - 1, link->child_loc_map))) { + e_err(drv, "Filter exists in location: %x\n", + loc); + err = -EINVAL; + goto err_out; + } + } } err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); if (err) @@@ -8628,6 -8687,9 +8687,9 @@@ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock);
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) + set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); + kfree(mask); return err; err_out_w_lock: @@@ -8770,14 -8832,12 +8832,12 @@@ static int ixgbe_set_features(struct ne
netdev->features = features;
- #ifdef CONFIG_IXGBE_VXLAN if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { if (features & NETIF_F_RXCSUM) adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; else ixgbe_clear_vxlan_port(adapter); } - #endif /* CONFIG_IXGBE_VXLAN */
if (need_reset) ixgbe_do_reset(netdev); @@@ -8788,23 -8848,25 +8848,25 @@@ return 0; }
- #ifdef CONFIG_IXGBE_VXLAN /** * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifiying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ - static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) + static void ixgbe_add_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; + __be16 port = ti->port;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) return;
if (adapter->vxlan_port == port) @@@ -8824,30 -8886,31 +8886,31 @@@ /** * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to + * @ti: Tunnel endpoint information **/ - static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) + static void ixgbe_del_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev);
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) return;
- if (adapter->vxlan_port != port) { + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { netdev_info(dev, "Port %d was not found, not deleting\n", - ntohs(port)); + ntohs(ti->port)); return; }
ixgbe_clear_vxlan_port(adapter); adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; } - #endif /* CONFIG_IXGBE_VXLAN */
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, @@@ -9160,10 -9223,8 +9223,8 @@@ static const struct net_device_ops ixgb .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - #ifdef CONFIG_IXGBE_VXLAN - .ndo_add_vxlan_port = ixgbe_add_vxlan_port, - .ndo_del_vxlan_port = ixgbe_del_vxlan_port, - #endif /* CONFIG_IXGBE_VXLAN */ + .ndo_udp_tunnel_add = ixgbe_add_vxlan_port, + .ndo_udp_tunnel_del = ixgbe_del_vxlan_port, .ndo_features_check = ixgbe_features_check, };
@@@ -9331,7 -9392,8 +9392,7 @@@ static int ixgbe_probe(struct pci_dev * pci_using_dac = 0; }
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), ixgbe_driver_name); + err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); @@@ -9717,7 -9779,8 +9778,7 @@@ err_ioremap disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: if (!adapter || disable_dev) @@@ -9784,7 -9847,8 +9845,7 @@@ static void ixgbe_remove(struct pci_de
#endif iounmap(adapter->io_addr); - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
e_dev_info("complete\n");
diff --combined drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 44cf16d,f32e272..bdda17d --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@@ -1042,8 -1042,6 +1042,8 @@@ static int mlx4_en_set_ringparam(struc { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_port_profile new_prof; + struct mlx4_en_priv *tmp; u32 rx_size, tx_size; int port_up = 0; int err = 0; @@@ -1063,25 -1061,22 +1063,25 @@@ tx_size == priv->tx_ring[0]->size) return 0;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + mutex_lock(&mdev->state_lock); + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); + new_prof.tx_ring_size = tx_size; + new_prof.rx_ring_size = rx_size; + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + if (err) + goto out; + if (priv->port_up) { port_up = 1; mlx4_en_stop_port(dev, 1); }
- mlx4_en_free_resources(priv); - - priv->prof->tx_ring_size = tx_size; - priv->prof->rx_ring_size = rx_size; + mlx4_en_safe_replace_resources(priv, tmp);
- err = mlx4_en_alloc_resources(priv); - if (err) { - en_err(priv, "Failed reallocating port resources\n"); - goto out; - } if (port_up) { err = mlx4_en_start_port(dev); if (err) @@@ -1089,8 -1084,8 +1089,8 @@@ }
err = mlx4_en_moderation_update(priv); - out: + kfree(tmp); mutex_unlock(&mdev->state_lock); return err; } @@@ -1112,7 -1107,7 +1112,7 @@@ static u32 mlx4_en_get_rxfh_indir_size( { struct mlx4_en_priv *priv = netdev_priv(dev);
- return priv->rx_ring_num; + return rounddown_pow_of_two(priv->rx_ring_num); }
static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) @@@ -1146,19 -1141,17 +1146,17 @@@ static int mlx4_en_get_rxfh(struct net_ u8 *hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rss_map *rss_map = &priv->rss_map; - int rss_rings; - size_t n = priv->rx_ring_num; + u32 n = mlx4_en_get_rxfh_indir_size(dev); + u32 i, rss_rings; int err = 0;
- rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; - rss_rings = 1 << ilog2(rss_rings); + rss_rings = priv->prof->rss_rings ?: n; + rss_rings = rounddown_pow_of_two(rss_rings);
- while (n--) { + for (i = 0; i < n; i++) { if (!ring_index) break; - ring_index[n] = rss_map->qps[n % rss_rings].qpn - - rss_map->base_qpn; + ring_index[i] = i % rss_rings; } if (key) memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); @@@ -1171,6 -1164,7 +1169,7 @@@ static int mlx4_en_set_rxfh(struct net_ const u8 *key, const u8 hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); + u32 n = mlx4_en_get_rxfh_indir_size(dev); struct mlx4_en_dev *mdev = priv->mdev; int port_up = 0; int err = 0; @@@ -1180,18 -1174,18 +1179,18 @@@ /* Calculate RSS table size and make sure flows are spread evenly * between rings */ - for (i = 0; i < priv->rx_ring_num; i++) { + for (i = 0; i < n; i++) { if (!ring_index) - continue; + break; if (i > 0 && !ring_index[i] && !rss_rings) rss_rings = i;
- if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) + if (ring_index[i] != (i % (rss_rings ?: n))) return -EINVAL; }
if (!rss_rings) - rss_rings = priv->rx_ring_num; + rss_rings = n;
/* RSS table size must be an order of 2 */ if (!is_power_of_2(rss_rings)) @@@ -1719,8 -1713,6 +1718,8 @@@ static int mlx4_en_set_channels(struct { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_port_profile new_prof; + struct mlx4_en_priv *tmp; int port_up = 0; int err = 0;
@@@ -1730,28 -1722,32 +1729,35 @@@ !channel->tx_count || !channel->rx_count) return -EINVAL;
+ if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) { + en_err(priv, "Minimum %d tx channels required with XDP on\n", + priv->xdp_ring_num / MLX4_EN_NUM_UP + 1); + return -EINVAL; + } + + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + mutex_lock(&mdev->state_lock); + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); + new_prof.num_tx_rings_p_up = channel->tx_count; + new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; + new_prof.rx_ring_num = channel->rx_count; + + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + if (err) + goto out; + if (priv->port_up) { port_up = 1; mlx4_en_stop_port(dev, 1); }
- mlx4_en_free_resources(priv); - - priv->num_tx_rings_p_up = channel->tx_count; - priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP; - priv->rx_ring_num = channel->rx_count; - - err = mlx4_en_alloc_resources(priv); - if (err) { - en_err(priv, "Failed reallocating port resources\n"); - goto out; - } + mlx4_en_safe_replace_resources(priv, tmp);
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_tx_queues(dev, priv->tx_ring_num - + priv->xdp_ring_num); netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
if (dev->num_tc) @@@ -1767,8 -1763,8 +1773,8 @@@ }
err = mlx4_en_moderation_update(priv); - out: + kfree(tmp); mutex_unlock(&mdev->state_lock); return err; } diff --combined drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8359e9e,9abbba6..4198e9b --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@@ -31,6 -31,7 +31,7 @@@ * */
+ #include <linux/bpf.h> #include <linux/etherdevice.h> #include <linux/tcp.h> #include <linux/if_vlan.h> @@@ -67,6 -68,17 +68,17 @@@ int mlx4_en_setup_tc(struct net_device offset += priv->num_tx_rings_p_up; }
+ #ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (up) { + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + priv->cee_params.dcb_cfg.pfc_state = false; + } + } + #endif /* CONFIG_MLX4_EN_DCB */ + return 0; }
@@@ -1201,8 -1213,8 +1213,8 @@@ static void mlx4_en_netpoll(struct net_ struct mlx4_en_cq *cq; int i;
- for (i = 0; i < priv->rx_ring_num; i++) { - cq = priv->rx_cq[i]; + for (i = 0; i < priv->tx_ring_num; i++) { + cq = priv->tx_cq[i]; napi_schedule(&cq->napi); } } @@@ -1510,6 -1522,24 +1522,24 @@@ static void mlx4_en_free_affinity_hint( free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); }
+ static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, + int tx_ring_idx) + { + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx]; + int rr_index; + + rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx; + if (rr_index >= 0) { + tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; + tx_ring->recycle_ring = priv->rx_ring[rr_index]; + en_dbg(DRV, priv, + "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n", + tx_ring_idx, rr_index); + } else { + tx_ring->recycle_ring = NULL; + } + } + int mlx4_en_start_port(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); @@@ -1632,6 -1662,8 +1662,8 @@@ } tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ mlx4_en_init_recycle_ring(priv, i); + /* Arm CQ for TX completions */ mlx4_en_arm_cq(priv, cq);
@@@ -1696,10 -1728,9 +1728,9 @@@ /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task);
- #ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) - vxlan_get_rx_port(dev); - #endif + udp_tunnel_get_rx_info(dev); + priv->port_up = true; netif_tx_start_all_queues(dev); netif_device_attach(dev); @@@ -1954,7 -1985,7 +1985,7 @@@ static int mlx4_en_close(struct net_dev return 0; }
-void mlx4_en_free_resources(struct mlx4_en_priv *priv) +static void mlx4_en_free_resources(struct mlx4_en_priv *priv) { int i;
@@@ -1979,7 -2010,7 +2010,7 @@@
}
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) +static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) { struct mlx4_en_port_profile *prof = priv->prof; int i; @@@ -2044,77 -2075,6 +2075,77 @@@ static void mlx4_en_shutdown(struct net rtnl_unlock(); }
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, + struct mlx4_en_priv *src, + struct mlx4_en_port_profile *prof) +{ + memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config, + sizeof(dst->hwtstamp_config)); + dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up; + dst->tx_ring_num = prof->tx_ring_num; + dst->rx_ring_num = prof->rx_ring_num; + dst->flags = prof->flags; + dst->mdev = src->mdev; + dst->port = src->port; + dst->dev = src->dev; + dst->prof = prof; + dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + + DS_SIZE * MLX4_EN_MAX_RX_FRAGS); + + dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, + GFP_KERNEL); + if (!dst->tx_ring) + return -ENOMEM; + + dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS, + GFP_KERNEL); + if (!dst->tx_cq) { + kfree(dst->tx_ring); + return -ENOMEM; + } + return 0; +} + +static void mlx4_en_update_priv(struct mlx4_en_priv *dst, + struct mlx4_en_priv *src) +{ + memcpy(dst->rx_ring, src->rx_ring, + sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num); + memcpy(dst->rx_cq, src->rx_cq, + sizeof(struct mlx4_en_cq *) * src->rx_ring_num); + memcpy(&dst->hwtstamp_config, &src->hwtstamp_config, + sizeof(dst->hwtstamp_config)); + dst->tx_ring_num = src->tx_ring_num; + dst->rx_ring_num = src->rx_ring_num; + dst->tx_ring = src->tx_ring; + dst->tx_cq = src->tx_cq; + memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile)); +} + +int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, + struct mlx4_en_priv *tmp, + struct mlx4_en_port_profile *prof) +{ + mlx4_en_copy_priv(tmp, priv, prof); + + if (mlx4_en_alloc_resources(tmp)) { + en_warn(priv, + "%s: Resource allocation failed, using previous configuration\n", + __func__); + kfree(tmp->tx_ring); + kfree(tmp->tx_cq); + return -ENOMEM; + } + return 0; +} + +void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, + struct mlx4_en_priv *tmp) +{ + mlx4_en_free_resources(priv); + mlx4_en_update_priv(priv, tmp); +} + void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); @@@ -2151,10 -2111,6 +2182,10 @@@ mdev->upper[priv->port] = NULL; mutex_unlock(&mdev->state_lock);
+#ifdef CONFIG_RFS_ACCEL + mlx4_en_cleanup_filters(priv); +#endif + mlx4_en_free_resources(priv);
kfree(priv->tx_ring); @@@ -2177,6 -2133,11 +2208,11 @@@ static int mlx4_en_change_mtu(struct ne en_err(priv, "Bad MTU size:%d.\n", new_mtu); return -EPERM; } + if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) { + en_err(priv, "MTU size:%d requires frags but XDP running\n", + new_mtu); + return -EOPNOTSUPP; + } dev->mtu = new_mtu;
if (netif_running(dev)) { @@@ -2434,7 -2395,6 +2470,6 @@@ static int mlx4_en_get_phys_port_id(str return 0; }
- #ifdef CONFIG_MLX4_EN_VXLAN static void mlx4_en_add_vxlan_offloads(struct work_struct *work) { int ret; @@@ -2484,15 -2444,19 +2519,19 @@@ static void mlx4_en_del_vxlan_offloads( }
static void mlx4_en_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return;
current_port = priv->vxlan_port; @@@ -2507,15 -2471,19 +2546,19 @@@ }
static void mlx4_en_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return;
current_port = priv->vxlan_port; @@@ -2550,7 -2518,6 +2593,6 @@@ static netdev_features_t mlx4_en_featur
return features; } - #endif
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) { @@@ -2579,6 -2546,103 +2621,103 @@@ return err; }
+ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct bpf_prog *old_prog; + int xdp_ring_num; + int port_up = 0; + int err; + int i; + + xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0; + + /* No need to reconfigure buffers when simply swapping the + * program for a new one. + */ + if (priv->xdp_ring_num == xdp_ring_num) { + if (prog) { + prog = bpf_prog_add(prog, priv->rx_ring_num - 1); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + for (i = 0; i < priv->rx_ring_num; i++) { + /* This xchg is paired with READ_ONCE in the fastpath */ + old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + } + return 0; + } + + if (priv->num_frags > 1) { + en_err(priv, "Cannot set XDP if MTU requires multiple frags\n"); + return -EOPNOTSUPP; + } + + if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) { + en_err(priv, + "Minimum %d tx channels required to run XDP\n", + (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP); + return -EINVAL; + } + + if (prog) { + prog = bpf_prog_add(prog, priv->rx_ring_num - 1); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + priv->xdp_ring_num = xdp_ring_num; + netif_set_real_num_tx_queues(dev, priv->tx_ring_num - + priv->xdp_ring_num); + + for (i = 0; i < priv->rx_ring_num; i++) { + old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + } + + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) { + en_err(priv, "Failed starting port %d for XDP change\n", + priv->port); + queue_work(mdev->workqueue, &priv->watchdog_task); + } + } + + mutex_unlock(&mdev->state_lock); + return 0; + } + + static bool mlx4_xdp_attached(struct net_device *dev) + { + struct mlx4_en_priv *priv = netdev_priv(dev); + + return !!priv->xdp_ring_num; + } + + static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) + { + switch (xdp->command) { + case XDP_SETUP_PROG: + return mlx4_xdp_set(dev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = mlx4_xdp_attached(dev); + return 0; + default: + return -EINVAL; + } + } + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@@ -2603,12 -2667,11 +2742,11 @@@ .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, - #ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, - #endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, + .ndo_xdp = mlx4_xdp, };
static const struct net_device_ops mlx4_netdev_ops_master = { @@@ -2641,12 -2704,11 +2779,11 @@@ .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, - #ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, - #endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, + .ndo_xdp = mlx4_xdp, };
struct mlx4_en_bond { @@@ -2911,6 -2973,9 +3048,9 @@@ int mlx4_en_init_netdev(struct mlx4_en_ struct mlx4_en_priv *priv; int i; int err; + #ifdef CONFIG_MLX4_EN_DCB + struct tc_configuration *tc; + #endif
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), MAX_TX_RINGS, MAX_RX_RINGS); @@@ -2936,10 -3001,8 +3076,8 @@@ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); - #ifdef CONFIG_MLX4_EN_VXLAN INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); - #endif #ifdef CONFIG_RFS_ACCEL INIT_LIST_HEAD(&priv->filters); spin_lock_init(&priv->filters_lock); @@@ -2979,6 -3042,17 +3117,17 @@@ priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; + priv->flags |= MLX4_EN_DCB_ENABLED; + priv->cee_params.dcb_cfg.pfc_state = false; + + for (i = 0; i < MLX4_EN_NUM_UP; i++) { + tc = &priv->cee_params.dcb_cfg.tc_config[i]; + tc->dcb_pfc = pfc_disabled; + } + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { dev->dcbnl_ops = &mlx4_en_dcbnl_ops; } else { @@@ -3199,8 -3273,6 +3348,8 @@@ int mlx4_en_reset_config(struct net_dev { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_port_profile new_prof; + struct mlx4_en_priv *tmp; int port_up = 0; int err = 0;
@@@ -3217,29 -3289,19 +3366,29 @@@ return -EINVAL; }
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return -ENOMEM; + mutex_lock(&mdev->state_lock); + + memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile)); + memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config)); + + err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof); + if (err) + goto out; + if (priv->port_up) { port_up = 1; mlx4_en_stop_port(dev, 1); }
- mlx4_en_free_resources(priv); - en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n", - ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + ts_config.rx_filter, + !!(features & NETIF_F_HW_VLAN_CTAG_RX));
- priv->hwtstamp_config.tx_type = ts_config.tx_type; - priv->hwtstamp_config.rx_filter = ts_config.rx_filter; + mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) { if (features & NETIF_F_HW_VLAN_CTAG_RX) @@@ -3273,6 -3335,11 +3422,6 @@@ dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; }
- err = mlx4_en_alloc_resources(priv); - if (err) { - en_err(priv, "Failed reallocating port resources\n"); - goto out; - } if (port_up) { err = mlx4_en_start_port(dev); if (err) @@@ -3281,8 -3348,6 +3430,8 @@@
out: mutex_unlock(&mdev->state_lock); - netdev_features_change(dev); + kfree(tmp); + if (!err) + netdev_features_change(dev); return err; } diff --combined drivers/net/ethernet/mellanox/mlx4/en_rx.c index 99b5407,a02dec6..2040dad --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@@ -32,6 -32,7 +32,7 @@@ */
#include <net/busy_poll.h> + #include <linux/bpf.h> #include <linux/mlx4/cq.h> #include <linux/slab.h> #include <linux/mlx4/qp.h> @@@ -57,7 -58,7 +58,7 @@@ static int mlx4_alloc_pages(struct mlx4 struct page *page; dma_addr_t dma;
- for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { + for (order = frag_info->order; ;) { gfp_t gfp = _gfp;
if (order) @@@ -70,7 -71,7 +71,7 @@@ return -ENOMEM; } dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, - PCI_DMA_FROMDEVICE); + frag_info->dma_dir); if (dma_mapping_error(priv->ddev, dma)) { put_page(page); return -ENOMEM; @@@ -124,7 -125,8 +125,8 @@@ out while (i--) { if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, - page_alloc[i].page_size, PCI_DMA_FROMDEVICE); + page_alloc[i].page_size, + priv->frag_info[i].dma_dir); page = page_alloc[i].page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc[i].page_size / @@@ -145,7 -147,7 +147,7 @@@ static void mlx4_en_free_frag(struct ml
if (next_frag_end > frags[i].page_size) dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, - PCI_DMA_FROMDEVICE); + frag_info->dma_dir);
if (frags[i].page) put_page(frags[i].page); @@@ -176,7 -178,8 +178,8 @@@ out
page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, PCI_DMA_FROMDEVICE); + page_alloc->page_size, + priv->frag_info[i].dma_dir); page = page_alloc->page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc->page_size / @@@ -201,7 -204,7 +204,7 @@@ static void mlx4_en_destroy_allocator(s i, page_count(page_alloc->page));
dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, PCI_DMA_FROMDEVICE); + page_alloc->page_size, frag_info->dma_dir); while (page_alloc->page_offset + frag_info->frag_stride < page_alloc->page_size) { put_page(page_alloc->page); @@@ -244,6 -247,12 +247,12 @@@ static int mlx4_en_prepare_rx_desc(stru struct mlx4_en_rx_alloc *frags = ring->rx_info + (index << priv->log_rx_info);
+ if (ring->page_cache.index > 0) { + frags[0] = ring->page_cache.buf[--ring->page_cache.index]; + rx_desc->data[0].addr = cpu_to_be64(frags[0].dma); + return 0; + } + return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); }
@@@ -502,23 -511,58 +511,55 @@@ void mlx4_en_recover_from_oom(struct ml } }
+ /* When the rx ring is running in page-per-packet mode, a released frame can go + * directly into a small cache, to avoid unmapping or touching the page + * allocator. In bpf prog performance scenarios, buffers are either forwarded + * or dropped, never converted to skbs, so every page can come directly from + * this cache when it is sized to be a multiple of the napi budget. + */ + bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, + struct mlx4_en_rx_alloc *frame) + { + struct mlx4_en_page_cache *cache = &ring->page_cache; + + if (cache->index >= MLX4_EN_CACHE_SIZE) + return false; + + cache->buf[cache->index++] = *frame; + return true; + } + void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring = *pring; + struct bpf_prog *old_prog;
+ old_prog = READ_ONCE(ring->xdp_prog); + if (old_prog) + bpf_prog_put(old_prog); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; kfree(ring); *pring = NULL; -#ifdef CONFIG_RFS_ACCEL - mlx4_en_cleanup_filters(priv); -#endif }
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { + int i; + + for (i = 0; i < ring->page_cache.index; i++) { + struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i]; + + dma_unmap_page(priv->ddev, frame->dma, frame->page_size, + priv->frag_info[0].dma_dir); + put_page(frame->page); + } + ring->page_cache.index = 0; mlx4_en_free_rx_buf(priv, ring); if (ring->stride <= TXBB_SIZE) ring->buf -= TXBB_SIZE; @@@ -740,7 -784,10 +781,10 @@@ int mlx4_en_process_rx_cq(struct net_de struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; struct mlx4_en_rx_alloc *frags; struct mlx4_en_rx_desc *rx_desc; + struct bpf_prog *xdp_prog; + int doorbell_pending; struct sk_buff *skb; + int tx_index; int index; int nr; unsigned int length; @@@ -756,6 -803,10 +800,10 @@@ if (budget <= 0) return polled;
+ xdp_prog = READ_ONCE(ring->xdp_prog); + doorbell_pending = 0; + tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring; + /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ @@@ -832,6 -883,43 +880,43 @@@ l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ /* A bpf program gets first chance to drop the packet. It may + * read bytes but not past the end of the frag. + */ + if (xdp_prog) { + struct xdp_buff xdp; + dma_addr_t dma; + u32 act; + + dma = be64_to_cpu(rx_desc->data[0].addr); + dma_sync_single_for_cpu(priv->ddev, dma, + priv->frag_info[0].frag_size, + DMA_FROM_DEVICE); + + xdp.data = page_address(frags[0].page) + + frags[0].page_offset; + xdp.data_end = xdp.data + length; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + if (!mlx4_en_xmit_frame(frags, dev, + length, tx_index, + &doorbell_pending)) + goto consumed; + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + case XDP_DROP: + if (mlx4_en_rx_recycle(ring, frags)) + goto consumed; + goto next; + } + } + if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { @@@ -983,6 -1071,7 +1068,7 @@@ next for (nr = 0; nr < priv->num_frags; nr++) mlx4_en_free_frag(priv, frags, nr);
+ consumed: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; @@@ -991,6 -1080,9 +1077,9 @@@ }
out: + if (doorbell_pending) + mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]); + AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_cq_set_ci(&cq->mcq); wmb(); /* ensure HW sees CQ consumer before we post new buffers */ @@@ -1058,22 -1150,35 +1147,35 @@@ static const int frag_sizes[] =
void mlx4_en_calc_rx_buf(struct net_device *dev) { + enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE; struct mlx4_en_priv *priv = netdev_priv(dev); - /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple - * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). - */ - int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN); + int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); + int order = MLX4_EN_ALLOC_PREFER_ORDER; + u32 align = SMP_CACHE_BYTES; int buf_size = 0; int i = 0;
+ /* bpf requires buffers to be set up as 1 packet per page. + * This only works when num_frags == 1. + */ + if (priv->xdp_ring_num) { + dma_dir = PCI_DMA_BIDIRECTIONAL; + /* This will gain efficient xdp frame recycling at the expense + * of more costly truesize accounting + */ + align = PAGE_SIZE; + order = 0; + } + while (buf_size < eff_mtu) { + priv->frag_info[i].order = order; priv->frag_info[i].frag_size = (eff_mtu > buf_size + frag_sizes[i]) ? frag_sizes[i] : eff_mtu - buf_size; priv->frag_info[i].frag_prefix_size = buf_size; priv->frag_info[i].frag_stride = - ALIGN(priv->frag_info[i].frag_size, - SMP_CACHE_BYTES); + ALIGN(priv->frag_info[i].frag_size, align); + priv->frag_info[i].dma_dir = dma_dir; buf_size += priv->frag_info[i].frag_size; i++; } diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 13d297e,29c81d2..2c2913d --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@@ -132,6 -132,7 +132,7 @@@ enum MLX4_EN_NUM_UP)
#define MLX4_EN_DEFAULT_TX_WORK 256 + #define MLX4_EN_DOORBELL_BUDGET 8
/* Target number of packets to coalesce with interrupt moderation */ #define MLX4_EN_RX_COAL_TARGET 44 @@@ -164,6 -165,10 +165,10 @@@ #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
#define MLX4_EN_MIN_MTU 46 + /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple + * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). + */ + #define MLX4_EN_EFF_MTU(mtu) ((mtu) + ETH_HLEN + (2 * VLAN_HLEN)) #define ETH_BCAST 0xffffffffffffULL
#define MLX4_EN_LOOPBACK_RETRIES 5 @@@ -215,7 -220,10 +220,10 @@@ enum cq_type
struct mlx4_en_tx_info { - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct page *page; + }; dma_addr_t map0_dma; u32 map0_byte_count; u32 nr_txbb; @@@ -255,6 -263,14 +263,14 @@@ struct mlx4_en_rx_alloc u32 page_size; };
+ #define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT) + struct mlx4_en_page_cache { + u32 index; + struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE]; + }; + + struct mlx4_en_priv; + struct mlx4_en_tx_ring { /* cache line used and dirtied in tx completion * (mlx4_en_free_tx_buf()) @@@ -288,6 -304,11 +304,11 @@@ __be32 mr_key; void *buf; struct mlx4_en_tx_info *tx_info; + struct mlx4_en_rx_ring *recycle_ring; + u32 (*free_tx_desc)(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, + u64 timestamp, int napi_mode); u8 *bounce_buf; struct mlx4_qp_context context; int qpn; @@@ -319,6 -340,8 +340,8 @@@ struct mlx4_en_rx_ring u8 fcs_del; void *buf; void *rx_info; + struct bpf_prog *xdp_prog; + struct mlx4_en_page_cache page_cache; unsigned long bytes; unsigned long packets; unsigned long csum_ok; @@@ -353,14 -376,12 +376,14 @@@ struct mlx4_en_port_profile u32 rx_ring_num; u32 tx_ring_size; u32 rx_ring_size; + u8 num_tx_rings_p_up; u8 rx_pause; u8 rx_ppp; u8 tx_pause; u8 tx_ppp; int rss_rings; int inline_thold; + struct hwtstamp_config hwtstamp_config; };
struct mlx4_en_profile { @@@ -440,7 -461,9 +463,9 @@@ struct mlx4_en_mc_list struct mlx4_en_frag_info { u16 frag_size; u16 frag_prefix_size; - u16 frag_stride; + u32 frag_stride; + enum dma_data_direction dma_dir; + int order; };
#ifdef CONFIG_MLX4_EN_DCB @@@ -450,6 -473,27 +475,27 @@@
#define MLX4_EN_TC_ETS 7
+ enum dcb_pfc_type { + pfc_disabled = 0, + pfc_enabled_full, + pfc_enabled_tx, + pfc_enabled_rx + }; + + struct tc_configuration { + enum dcb_pfc_type dcb_pfc; + }; + + struct mlx4_en_cee_config { + bool pfc_state; + struct tc_configuration tc_config[MLX4_EN_NUM_UP]; + }; + + struct mlx4_en_cee_params { + u8 dcbx_cap; + struct mlx4_en_cee_config dcb_cfg; + }; + #endif
struct ethtool_flow_id { @@@ -469,6 -513,9 +515,9 @@@ enum MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), + #ifdef CONFIG_MLX4_EN_DCB + MLX4_EN_FLAG_DCB_ENABLED = (1 << 6), + #endif };
#define PORT_BEACON_MAX_LIMIT (65535) @@@ -536,6 -583,7 +585,7 @@@ struct mlx4_en_priv struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; u16 num_frags; u16 log_rx_info; + int xdp_ring_num;
struct mlx4_en_tx_ring **tx_ring; struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; @@@ -547,10 -595,8 +597,8 @@@ struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; - #ifdef CONFIG_MLX4_EN_VXLAN struct work_struct vxlan_add_task; struct work_struct vxlan_del_task; - #endif struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_counter_stats pf_stats; @@@ -572,9 -618,11 +620,11 @@@ u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB + #define MLX4_EN_DCB_ENABLED 0x3 struct ieee_ets ets; u16 maxrate[IEEE_8021QAZ_MAX_TCS]; enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; + struct mlx4_en_cee_params cee_params; #endif #ifdef CONFIG_RFS_ACCEL spinlock_t filters_lock; @@@ -625,11 -673,8 +675,11 @@@ void mlx4_en_set_stats_bitmap(struct ml u8 rx_ppp, u8 rx_pause, u8 tx_ppp, u8 tx_pause);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv); -int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); +int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv, + struct mlx4_en_priv *tmp, + struct mlx4_en_port_profile *prof); +void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv, + struct mlx4_en_priv *tmp);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, int entries, int ring, enum cq_type mode, int node); @@@ -644,6 -689,12 +694,12 @@@ void mlx4_en_tx_irq(struct mlx4_cq *mcq u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); + netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, + struct net_device *dev, unsigned int length, + int tx_ind, int *doorbell_pending); + void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring); + bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, + struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, @@@ -672,6 -723,14 +728,14 @@@ int mlx4_en_process_rx_cq(struct net_de int budget); int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget); + u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode); + u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode); void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context); diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a4d88c,611ab55..bdcb699 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -47,8 -47,9 +47,9 @@@ enum };
struct mlx5e_rq_param { - u32 rqc[MLX5_ST_SZ_DW(rqc)]; - struct mlx5_wq_param wq; + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; + bool am_enabled; };
struct mlx5e_sq_param { @@@ -62,6 -63,7 +63,7 @@@ struct mlx5e_cq_param u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; u16 eq_ix; + u8 cq_period_mode; };
struct mlx5e_channel_param { @@@ -254,14 -256,14 +256,14 @@@ void mlx5e_update_stats(struct mlx5e_pr mlx5e_update_sw_counters(priv); }
- static void mlx5e_update_stats_work(struct work_struct *work) + void mlx5e_update_stats_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, update_stats_work); mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - mlx5e_update_stats(priv); + priv->profile->update_stats(priv); queue_delayed_work(priv->wq, dwork, msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); } @@@ -367,6 -369,9 +369,9 @@@ static int mlx5e_create_rq(struct mlx5e wqe->data.byte_count = cpu_to_be32(byte_count); }
+ INIT_WORK(&rq->am.work, mlx5e_rx_am_work); + rq->am.mode = priv->params.rx_cq_period_mode; + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@@ -539,6 -544,9 +544,9 @@@ static int mlx5e_open_rq(struct mlx5e_c if (err) goto err_disable_rq;
+ if (param->am_enabled) + set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; @@@ -574,6 -582,8 +582,8 @@@ static void mlx5e_close_rq(struct mlx5e /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ napi_synchronize(&rq->channel->napi);
+ cancel_work_sync(&rq->am.work); + mlx5e_disable_rq(rq); mlx5e_free_rx_descs(rq); mlx5e_destroy_rq(rq); @@@ -741,7 -751,8 +751,8 @@@ static int mlx5e_enable_sq(struct mlx5e return err; }
- static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) + static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, + int next_state, bool update_rl, int rl_index) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; @@@ -761,6 -772,10 +772,10 @@@
MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); + if (update_rl && next_state == MLX5_SQC_STATE_RDY) { + MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); + }
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
@@@ -776,6 -791,8 +791,8 @@@ static void mlx5e_disable_sq(struct mlx struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_sq(mdev, sq->sqn); + if (sq->rate_limit) + mlx5_rl_remove_rate(mdev, sq->rate_limit); }
static int mlx5e_open_sq(struct mlx5e_channel *c, @@@ -793,7 -810,8 +810,8 @@@ if (err) goto err_destroy_sq;
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + false, 0); if (err) goto err_disable_sq;
@@@ -836,7 -854,7 +854,7 @@@ static void mlx5e_close_sq(struct mlx5e mlx5e_send_nop(sq, true);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, - MLX5_SQC_STATE_ERR); + MLX5_SQC_STATE_ERR, false, 0); if (err) set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); } @@@ -891,7 -909,7 +909,7 @@@ static int mlx5e_create_cq(struct mlx5e mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &priv->cq_uar; + mcq->uar = &mdev->mlx5e_res.cq_uar;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); @@@ -938,6 -956,7 +956,7 @@@ static int mlx5e_enable_cq(struct mlx5e
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@@ -967,8 -986,7 +986,7 @@@ static void mlx5e_disable_cq(struct mlx static int mlx5e_open_cq(struct mlx5e_channel *c, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, - u16 moderation_usecs, - u16 moderation_frames) + struct mlx5e_cq_moder moderation) { int err; struct mlx5e_priv *priv = c->priv; @@@ -984,8 -1002,8 +1002,8 @@@
if (MLX5_CAP_GEN(mdev, cq_moderation)) mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); + moderation.usec, + moderation.pkts); return 0;
err_destroy_cq: @@@ -1014,8 -1032,7 +1032,7 @@@ static int mlx5e_open_tx_cqs(struct mlx
for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, - priv->params.tx_cq_moderation_usec, - priv->params.tx_cq_moderation_pkts); + priv->params.tx_cq_moderation); if (err) goto err_close_tx_cqs; } @@@ -1070,19 -1087,96 +1087,96 @@@ static void mlx5e_build_channeltc_to_tx { int i;
- for (i = 0; i < MLX5E_MAX_NUM_TC; i++) + for (i = 0; i < priv->profile->max_tc; i++) priv->channeltc_to_txq_map[ix][i] = ix + i * priv->params.num_channels; }
+ static int mlx5e_set_sq_maxrate(struct net_device *dev, + struct mlx5e_sq *sq, u32 rate) + { + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 rl_index = 0; + int err; + + if (rate == sq->rate_limit) + /* nothing to do */ + return 0; + + if (sq->rate_limit) + /* remove current rl index to free space to next ones */ + mlx5_rl_remove_rate(mdev, sq->rate_limit); + + sq->rate_limit = 0; + + if (rate) { + err = mlx5_rl_add_rate(mdev, rate, &rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + return err; + } + } + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RDY, true, rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + /* remove the rate from the table */ + if (rate) + mlx5_rl_remove_rate(mdev, rate); + return err; + } + + sq->rate_limit = rate; + return 0; + } + + static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) + { + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_sq *sq = priv->txq_to_sq_map[index]; + int err = 0; + + if (!mlx5_rl_is_supported(mdev)) { + netdev_err(dev, "Rate limiting is not supported on this device\n"); + return -EINVAL; + } + + /* rate is given in Mb/sec, HW config is in Kb/sec */ + rate = rate << 10; + + /* Check whether rate in valid range, 0 is always valid */ + if (rate && !mlx5_rl_is_in_range(mdev, rate)) { + netdev_err(dev, "TX rate %u, is not in range\n", rate); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_set_sq_maxrate(dev, sq, rate); + if (!err) + priv->tx_rates[index] = rate; + mutex_unlock(&priv->state_lock); + + return err; + } + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { + struct mlx5e_cq_moder icosq_cq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + struct mlx5e_cq_moder rx_cq_profile; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; + struct mlx5e_sq *sq; int err; + int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) @@@ -1093,14 -1187,19 +1187,19 @@@ c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = priv->params.num_tc;
+ if (priv->params.rx_am_enabled) + rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); + else + rx_cq_profile = priv->params.rx_cq_moderation; + mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder); if (err) goto err_napi_del;
@@@ -1109,8 -1208,7 +1208,7 @@@ goto err_close_icosq_cq;
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, - priv->params.rx_cq_moderation_usec, - priv->params.rx_cq_moderation_pkts); + rx_cq_profile); if (err) goto err_close_tx_cqs;
@@@ -1124,6 -1222,16 +1222,16 @@@ if (err) goto err_close_icosq;
+ for (i = 0; i < priv->params.num_tc; i++) { + u32 txq_ix = priv->channeltc_to_txq_map[ix][i]; + + if (priv->tx_rates[txq_ix]) { + sq = priv->txq_to_sq_map[txq_ix]; + mlx5e_set_sq_maxrate(priv->netdev, sq, + priv->tx_rates[txq_ix]); + } + } + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@@ -1195,11 -1303,13 +1303,13 @@@ static void mlx5e_build_rq_param(struc MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); - MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; + + param->am_enabled = priv->params.rx_am_enabled; }
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) @@@ -1218,7 -1328,7 +1328,7 @@@ static void mlx5e_build_sq_param_common void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); - MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); } @@@ -1240,7 -1350,7 +1350,7 @@@ static void mlx5e_build_common_cq_param { void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); + MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index); }
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, @@@ -1265,6 -1375,8 +1375,8 @@@ }
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = priv->params.rx_cq_period_mode; }
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, @@@ -1275,6 -1387,8 +1387,8 @@@ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; }
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, @@@ -1286,6 -1400,8 +1400,8 @@@ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; }
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@@ -1348,11 -1464,6 +1464,11 @@@ static int mlx5e_open_channels(struct m goto err_close_channels; }
+ /* FIXME: This is a W/A for tx timeout watch dog false alarm when + * polling for inactive tx queues. + */ + netif_tx_start_all_queues(priv->netdev); + kfree(cparam); return 0;
@@@ -1372,12 -1483,6 +1488,12 @@@ static void mlx5e_close_channels(struc { int i;
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when + * polling for inactive tx queues. + */ + netif_tx_stop_all_queues(priv->netdev); + netif_tx_disable(priv->netdev); + for (i = 0; i < priv->params.num_channels; i++) mlx5e_close_channel(priv->channel[i]);
@@@ -1432,7 -1537,8 +1548,8 @@@ static void mlx5e_fill_direct_rqt_rqn(s MLX5_SET(rqtc, rqtc, rq_num[0], rqn); }
- static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) + static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, + int ix, struct mlx5e_rqt *rqt) { struct mlx5_core_dev *mdev = priv->mdev; void *rqtc; @@@ -1455,34 -1561,36 +1572,36 @@@ else mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
- err = mlx5_core_create_rqt(mdev, in, inlen, rqtn); + err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); + if (!err) + rqt->enabled = true;
kvfree(in); return err; }
- static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn) + void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) { - mlx5_core_destroy_rqt(priv->mdev, rqtn); + rqt->enabled = false; + mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); }
- static int mlx5e_create_rqts(struct mlx5e_priv *priv) + static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); - u32 *rqtn; + struct mlx5e_rqt *rqt = &priv->indir_rqt; + + return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt); + } + + int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) + { + struct mlx5e_rqt *rqt; int err; int ix;
- /* Indirect RQT */ - rqtn = &priv->indir_rqtn; - err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn); - if (err) - return err; - - /* Direct RQTs */ - for (ix = 0; ix < nch; ix++) { - rqtn = &priv->direct_tir[ix].rqtn; - err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn); + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { + rqt = &priv->direct_tir[ix].rqt; + err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt); if (err) goto err_destroy_rqts; } @@@ -1491,24 -1599,11 +1610,11 @@@
err_destroy_rqts: for (ix--; ix >= 0; ix--) - mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn); - - mlx5e_destroy_rqt(priv, priv->indir_rqtn); + mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
return err; }
- static void mlx5e_destroy_rqts(struct mlx5e_priv *priv) - { - int nch = mlx5e_get_max_num_channels(priv->mdev); - int i; - - for (i = 0; i < nch; i++) - mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn); - - mlx5e_destroy_rqt(priv, priv->indir_rqtn); - } - int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) { struct mlx5_core_dev *mdev = priv->mdev; @@@ -1544,10 -1639,15 +1650,15 @@@ static void mlx5e_redirect_rqts(struct u32 rqtn; int ix;
- rqtn = priv->indir_rqtn; - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + if (priv->indir_rqt.enabled) { + rqtn = priv->indir_rqt.rqtn; + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + } + for (ix = 0; ix < priv->params.num_channels; ix++) { - rqtn = priv->direct_tir[ix].rqtn; + if (!priv->direct_tir[ix].rqt.enabled) + continue; + rqtn = priv->direct_tir[ix].rqt.rqtn; mlx5e_redirect_rqt(priv, rqtn, 1, ix); } } @@@ -1607,13 -1707,13 +1718,13 @@@ static int mlx5e_modify_tirs_lro(struc mlx5e_build_tir_ctx_lro(tirc, priv);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in, + err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); if (err) goto free_in; }
- for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) { + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in, inlen); if (err) @@@ -1626,40 -1726,6 +1737,6 @@@ free_in return err; }
- static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) - { - void *in; - int inlen; - int err; - int i; - - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { - err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in, - inlen); - if (err) - return err; - } - - for (i = 0; i < priv->params.num_channels; i++) { - err = mlx5_core_modify_tir(priv->mdev, - priv->direct_tir[i].tirn, in, - inlen); - if (err) - return err; - } - - kvfree(in); - - return 0; - } - static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) { struct mlx5_core_dev *mdev = priv->mdev; @@@ -1731,6 -1797,7 +1808,7 @@@ static void mlx5e_netdev_set_tcs(struc int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; int num_txqs; int err;
@@@ -1753,7 -1820,7 +1831,7 @@@ goto err_clear_state_opened_flag; }
- err = mlx5e_refresh_tirs_self_loopback_enable(priv); + err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev); if (err) { netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", __func__, err); @@@ -1766,9 -1833,14 +1844,14 @@@ #ifdef CONFIG_RFS_ACCEL priv->netdev->rx_cpu_rmap = priv->mdev->rmap; #endif + if (priv->profile->update_stats) + queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + err = mlx5e_add_sqs_fwd_rules(priv); + if (err) + goto err_close_channels; + } return 0;
err_close_channels: @@@ -1778,7 -1850,7 +1861,7 @@@ err_clear_state_opened_flag return err; }
- static int mlx5e_open(struct net_device *netdev) + int mlx5e_open(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@@ -1793,6 -1865,7 +1876,7 @@@ int mlx5e_close_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev;
/* May already be CLOSED in case a previous configuration operation * (e.g RX/TX queue size change) that involves close&open failed. @@@ -1802,6 -1875,9 +1886,9 @@@
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5e_remove_sqs_fwd_rules(priv); + mlx5e_timestamp_cleanup(priv); netif_carrier_off(priv->netdev); mlx5e_redirect_rqts(priv); @@@ -1810,7 -1886,7 +1897,7 @@@ return 0; }
- static int mlx5e_close(struct net_device *netdev) + int mlx5e_close(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@@ -1869,7 -1945,7 +1956,7 @@@ static int mlx5e_create_drop_cq(struct mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &priv->cq_uar; + mcq->uar = &mdev->mlx5e_res.cq_uar;
cq->priv = priv;
@@@ -1935,7 -2011,7 +2022,7 @@@ static int mlx5e_create_tis(struct mlx5 memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc << 1); - MLX5_SET(tisc, tisc, transport_domain, priv->tdn); + MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); } @@@ -1945,12 -2021,12 +2032,12 @@@ static void mlx5e_destroy_tis(struct ml mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); }
- static int mlx5e_create_tises(struct mlx5e_priv *priv) + int mlx5e_create_tises(struct mlx5e_priv *priv) { int err; int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) { + for (tc = 0; tc < priv->profile->max_tc; tc++) { err = mlx5e_create_tis(priv, tc); if (err) goto err_close_tises; @@@ -1965,11 -2041,11 +2052,11 @@@ err_close_tises return err; }
- static void mlx5e_destroy_tises(struct mlx5e_priv *priv) + void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) + for (tc = 0; tc < priv->profile->max_tc; tc++) mlx5e_destroy_tis(priv, tc); }
@@@ -1978,7 -2054,7 +2065,7 @@@ static void mlx5e_build_indir_tir_ctx(s { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@@ -1995,7 -2071,7 +2082,7 @@@ mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn); + MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); mlx5e_build_tir_ctx_hash(tirc, priv);
switch (tt) { @@@ -2085,7 -2161,7 +2172,7 @@@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, u32 rqtn) { - MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mlx5e_build_tir_ctx_lro(tirc, priv);
@@@ -2094,15 -2170,13 +2181,13 @@@ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); }
- static int mlx5e_create_tirs(struct mlx5e_priv *priv) + static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); + struct mlx5e_tir *tir; void *tirc; int inlen; - u32 *tirn; int err; u32 *in; - int ix; int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in); @@@ -2110,25 -2184,51 +2195,51 @@@ if (!in) return -ENOMEM;
- /* indirect tirs */ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { memset(in, 0, inlen); - tirn = &priv->indir_tirn[tt]; + tir = &priv->indir_tir[tt]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_indir_tir_ctx(priv, tirc, tt); - err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_tirs; }
- /* direct tirs */ + kvfree(in); + + return 0; + + err_destroy_tirs: + for (tt--; tt >= 0; tt--) + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); + + kvfree(in); + + return err; + } + + int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) + { + int nch = priv->profile->max_nch(priv->mdev); + struct mlx5e_tir *tir; + void *tirc; + int inlen; + int err; + u32 *in; + int ix; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + for (ix = 0; ix < nch; ix++) { memset(in, 0, inlen); - tirn = &priv->direct_tir[ix].tirn; + tir = &priv->direct_tir[ix]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_direct_tir_ctx(priv, tirc, - priv->direct_tir[ix].rqtn); - err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + priv->direct_tir[ix].rqt.rqtn); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_ch_tirs; } @@@ -2139,27 -2239,28 +2250,28 @@@
err_destroy_ch_tirs: for (ix--; ix >= 0; ix--) - mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn); - - err_destroy_tirs: - for (tt--; tt >= 0; tt--) - mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]); + mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
kvfree(in);
return err; }
- static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) + static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); int i;
- for (i = 0; i < nch; i++) - mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn); - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]); + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); + } + + void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) + { + int nch = priv->profile->max_nch(priv->mdev); + int i; + + for (i = 0; i < nch; i++) + mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]); }
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) @@@ -2233,7 -2334,7 +2345,7 @@@ mqprio return mlx5e_setup_tc(dev, tc->tc); }
- static struct rtnl_link_stats64 * + struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@@ -2585,25 -2686,31 +2697,31 @@@ static int mlx5e_get_vf_stats(struct ne }
static void mlx5e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); }
static void mlx5e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); }
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, @@@ -2667,7 -2774,7 +2785,7 @@@ static void mlx5e_tx_timeout(struct net for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) { struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) + if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) continue; sched_work = true; set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); @@@ -2693,6 -2800,7 +2811,7 @@@ static const struct net_device_ops mlx5 .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@@ -2713,8 -2821,9 +2832,9 @@@ static const struct net_device_ops mlx5 .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, - .ndo_add_vxlan_port = mlx5e_add_vxlan_port, - .ndo_del_vxlan_port = mlx5e_del_vxlan_port, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, @@@ -2844,13 -2953,31 +2964,31 @@@ static bool cqe_compress_heuristic(u32 (pci_bw < 40000) && (pci_bw < link_speed)); }
- static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, - struct net_device *netdev, - int num_channels) + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) + { + params->rx_cq_period_mode = cq_period_mode; + + params->rx_cq_moderation.pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; + } + + static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); u32 link_speed = 0; u32 pci_bw = 0; + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@@ -2896,13 -3023,13 +3034,13 @@@
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); - priv->params.rx_cq_moderation_usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - priv->params.rx_cq_moderation_pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - priv->params.tx_cq_moderation_usec = + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - priv->params.tx_cq_moderation_pkts = + priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.num_tc = 1; @@@ -2912,14 -3039,20 +3050,20 @@@ sizeof(priv->params.toeplitz_hash_key));
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, num_channels); + MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ /* Initialize pflags */ + MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + priv->mdev = mdev; priv->netdev = netdev; - priv->params.num_channels = num_channels; + priv->params.num_channels = profile->max_nch(mdev); + priv->profile = profile; + priv->ppriv = ppriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_ets_init(priv); @@@ -2945,7 -3078,11 +3089,11 @@@ static void mlx5e_set_netdev_dev_addr(s } }
- static void mlx5e_build_netdev(struct net_device *netdev) + static const struct switchdev_ops mlx5e_switchdev_ops = { + .switchdev_port_attr_get = mlx5e_attr_get, + }; + + static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@@ -3026,31 -3163,11 +3174,11 @@@ netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev); - } - - static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, - struct mlx5_core_mkey *mkey) - { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_create_mkey_mbox_in *in; - int err; - - in = mlx5_vzalloc(sizeof(*in)); - if (!in) - return -ENOMEM; - - in->seg.flags = MLX5_PERM_LOCAL_WRITE | - MLX5_PERM_LOCAL_READ | - MLX5_ACCESS_MODE_PA; - in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - - err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, - NULL);
- kvfree(in); - - return err; + #ifdef CONFIG_NET_SWITCHDEV + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + netdev->switchdev_ops = &mlx5e_switchdev_ops; + #endif }
static void mlx5e_create_q_counter(struct mlx5e_priv *priv) @@@ -3080,7 -3197,7 +3208,7 @@@ static int mlx5e_create_umr_mkey(struc struct mlx5_mkey_seg *mkc; int inlen = sizeof(*in); u64 npages = - mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; + priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; int err;
in = mlx5_vzalloc(inlen); @@@ -3095,7 -3212,7 +3223,7 @@@ MLX5_ACCESS_MODE_MTT;
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - mkc->flags_pd = cpu_to_be32(priv->pdn); + mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); mkc->len = cpu_to_be64(npages << PAGE_SHIFT); mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); mkc->log2_page_size = PAGE_SHIFT; @@@ -3108,160 -3225,233 +3236,233 @@@ return err; }
- static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) + static void mlx5e_nic_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { - struct net_device *netdev; - struct mlx5e_priv *priv; - int nch = mlx5e_get_max_num_channels(mdev); - int err; - - if (mlx5e_check_required_hca_cap(mdev)) - return NULL; + struct mlx5e_priv *priv = netdev_priv(netdev);
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), - nch * MLX5E_MAX_NUM_TC, - nch); - if (!netdev) { - mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); - return NULL; - } + mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); + mlx5e_build_nic_netdev(netdev); + mlx5e_vxlan_init(priv); + }
- mlx5e_build_netdev_priv(mdev, netdev, nch); - mlx5e_build_netdev(netdev); + static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) + { + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch;
- netif_carrier_off(netdev); + mlx5e_vxlan_cleanup(priv);
- priv = netdev_priv(netdev); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5_eswitch_unregister_vport_rep(esw, 0); + }
- priv->wq = create_singlethread_workqueue("mlx5e"); - if (!priv->wq) - goto err_free_netdev; + static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) + { + struct mlx5_core_dev *mdev = priv->mdev; + int err; + int i;
- err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); + err = mlx5e_create_indirect_rqts(priv); if (err) { - mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); - goto err_destroy_wq; + mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err); + return err; }
- err = mlx5_core_alloc_pd(mdev, &priv->pdn); + err = mlx5e_create_direct_rqts(priv); if (err) { - mlx5_core_err(mdev, "alloc pd failed, %d\n", err); - goto err_unmap_free_uar; + mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); + goto err_destroy_indirect_rqts; }
- err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn); + err = mlx5e_create_indirect_tirs(priv); if (err) { - mlx5_core_err(mdev, "alloc td failed, %d\n", err); - goto err_dealloc_pd; + mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err); + goto err_destroy_direct_rqts; }
- err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey); + err = mlx5e_create_direct_tirs(priv); if (err) { - mlx5_core_err(mdev, "create mkey failed, %d\n", err); - goto err_dealloc_transport_domain; + mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); + goto err_destroy_indirect_tirs; }
- err = mlx5e_create_umr_mkey(priv); + err = mlx5e_create_flow_steering(priv); if (err) { - mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); - goto err_destroy_mkey; + mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); + goto err_destroy_direct_tirs; }
+ err = mlx5e_tc_init(priv); + if (err) + goto err_destroy_flow_steering; + + return 0; + + err_destroy_flow_steering: + mlx5e_destroy_flow_steering(priv); + err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv); + err_destroy_indirect_tirs: + mlx5e_destroy_indirect_tirs(priv); + err_destroy_direct_rqts: + for (i = 0; i < priv->profile->max_nch(mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + err_destroy_indirect_rqts: + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + return err; + } + + static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) + { + int i; + + mlx5e_tc_cleanup(priv); + mlx5e_destroy_flow_steering(priv); + mlx5e_destroy_direct_tirs(priv); + mlx5e_destroy_indirect_tirs(priv); + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + } + + static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) + { + int err; + err = mlx5e_create_tises(priv); if (err) { - mlx5_core_warn(mdev, "create tises failed, %d\n", err); - goto err_destroy_umr_mkey; + mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); + return err; }
- err = mlx5e_open_drop_rq(priv); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_tises; + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); + #endif + return 0; + } + + static void mlx5e_nic_enable(struct mlx5e_priv *priv) + { + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_eswitch_rep rep; + + if (mlx5e_vxlan_allowed(mdev)) { + rtnl_lock(); + udp_tunnel_get_rx_info(netdev); + rtnl_unlock(); }
- err = mlx5e_create_rqts(priv); - if (err) { - mlx5_core_warn(mdev, "create rqts failed, %d\n", err); - goto err_close_drop_rq; + mlx5e_enable_async_events(priv); + queue_work(priv->wq, &priv->set_rx_mode_work); + + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + rep.load = mlx5e_nic_rep_load; + rep.unload = mlx5e_nic_rep_unload; + rep.vport = 0; + rep.priv_data = priv; + mlx5_eswitch_register_vport_rep(esw, &rep); } + }
- err = mlx5e_create_tirs(priv); - if (err) { - mlx5_core_warn(mdev, "create tirs failed, %d\n", err); - goto err_destroy_rqts; + static void mlx5e_nic_disable(struct mlx5e_priv *priv) + { + queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_disable_async_events(priv); + } + + static const struct mlx5e_profile mlx5e_nic_profile = { + .init = mlx5e_nic_init, + .cleanup = mlx5e_nic_cleanup, + .init_rx = mlx5e_init_nic_rx, + .cleanup_rx = mlx5e_cleanup_nic_rx, + .init_tx = mlx5e_init_nic_tx, + .cleanup_tx = mlx5e_cleanup_nic_tx, + .enable = mlx5e_nic_enable, + .disable = mlx5e_nic_disable, + .update_stats = mlx5e_update_stats, + .max_nch = mlx5e_get_max_num_channels, + .max_tc = MLX5E_MAX_NUM_TC, + }; + + void *mlx5e_create_netdev(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile, void *ppriv) + { + struct net_device *netdev; + struct mlx5e_priv *priv; + int nch = profile->max_nch(mdev); + int err; + + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), + nch * profile->max_tc, + nch); + if (!netdev) { + mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); + return NULL; }
- err = mlx5e_create_flow_steering(priv); + profile->init(mdev, netdev, profile, ppriv); + + netif_carrier_off(netdev); + + priv = netdev_priv(netdev); + + priv->wq = create_singlethread_workqueue("mlx5e"); + if (!priv->wq) + goto err_free_netdev; + + err = mlx5e_create_umr_mkey(priv); if (err) { - mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); - goto err_destroy_tirs; + mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); + goto err_destroy_wq; }
- mlx5e_create_q_counter(priv); - - mlx5e_init_l2_addr(priv); + err = profile->init_tx(priv); + if (err) + goto err_destroy_umr_mkey;
- mlx5e_vxlan_init(priv); + err = mlx5e_open_drop_rq(priv); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_cleanup_tx; + }
- err = mlx5e_tc_init(priv); + err = profile->init_rx(priv); if (err) - goto err_dealloc_q_counters; + goto err_close_drop_rq;
- #ifdef CONFIG_MLX5_CORE_EN_DCB - mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); - #endif + mlx5e_create_q_counter(priv); + + mlx5e_init_l2_addr(priv);
err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_tc_cleanup; - } - - if (mlx5e_vxlan_allowed(mdev)) { - rtnl_lock(); - vxlan_get_rx_port(netdev); - rtnl_unlock(); + goto err_dealloc_q_counters; }
- mlx5e_enable_async_events(priv); - queue_work(priv->wq, &priv->set_rx_mode_work); + if (profile->enable) + profile->enable(priv);
return priv;
- err_tc_cleanup: - mlx5e_tc_cleanup(priv); - err_dealloc_q_counters: mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_steering(priv); - - err_destroy_tirs: - mlx5e_destroy_tirs(priv); - - err_destroy_rqts: - mlx5e_destroy_rqts(priv); + profile->cleanup_rx(priv);
err_close_drop_rq: mlx5e_close_drop_rq(priv);
- err_destroy_tises: - mlx5e_destroy_tises(priv); + err_cleanup_tx: + profile->cleanup_tx(priv);
err_destroy_umr_mkey: mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
- err_destroy_mkey: - mlx5_core_destroy_mkey(mdev, &priv->mkey); - - err_dealloc_transport_domain: - mlx5_core_dealloc_transport_domain(mdev, priv->tdn); - - err_dealloc_pd: - mlx5_core_dealloc_pd(mdev, priv->pdn); - - err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &priv->cq_uar); - err_destroy_wq: destroy_workqueue(priv->wq);
@@@ -3271,15 -3461,59 +3472,59 @@@ err_free_netdev return NULL; }
- static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) + static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) { - struct mlx5e_priv *priv = vpriv; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + int vport; + + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return; + + for (vport = 1; vport < total_vfs; vport++) { + struct mlx5_eswitch_rep rep; + + rep.load = mlx5e_vport_rep_load; + rep.unload = mlx5e_vport_rep_unload; + rep.vport = vport; + mlx5_eswitch_register_vport_rep(esw, &rep); + } + } + + static void *mlx5e_add(struct mlx5_core_dev *mdev) + { + struct mlx5_eswitch *esw = mdev->priv.eswitch; + void *ppriv = NULL; + void *ret; + + if (mlx5e_check_required_hca_cap(mdev)) + return NULL; + + if (mlx5e_create_mdev_resources(mdev)) + return NULL; + + mlx5e_register_vport_rep(mdev); + + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + ppriv = &esw->offloads.vport_reps[0]; + + ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv); + if (!ret) { + mlx5e_destroy_mdev_resources(mdev); + return NULL; + } + return ret; + } + + void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) + { + const struct mlx5e_profile *profile = priv->profile; struct net_device *netdev = priv->netdev;
set_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (profile->disable) + profile->disable(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work); - mlx5e_disable_async_events(priv); flush_workqueue(priv->wq); if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { netif_device_detach(netdev); @@@ -3288,26 -3522,35 +3533,35 @@@ unregister_netdev(netdev); }
- mlx5e_tc_cleanup(priv); - mlx5e_vxlan_cleanup(priv); mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_steering(priv); - mlx5e_destroy_tirs(priv); - mlx5e_destroy_rqts(priv); + profile->cleanup_rx(priv); mlx5e_close_drop_rq(priv); - mlx5e_destroy_tises(priv); + profile->cleanup_tx(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); - mlx5_core_destroy_mkey(priv->mdev, &priv->mkey); - mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); - mlx5_core_dealloc_pd(priv->mdev, priv->pdn); - mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); cancel_delayed_work_sync(&priv->update_stats_work); destroy_workqueue(priv->wq); + if (profile->cleanup) + profile->cleanup(priv);
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) free_netdev(netdev); }
+ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) + { + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + struct mlx5e_priv *priv = vpriv; + int vport; + + mlx5e_destroy_netdev(mdev, priv); + + for (vport = 1; vport < total_vfs; vport++) + mlx5_eswitch_unregister_vport_rep(esw, vport); + + mlx5e_destroy_mdev_resources(mdev); + } + static void *mlx5e_get_netdev(void *vpriv) { struct mlx5e_priv *priv = vpriv; @@@ -3316,8 -3559,8 +3570,8 @@@ }
static struct mlx5_interface mlx5e_interface = { - .add = mlx5e_create_netdev, - .remove = mlx5e_destroy_netdev, + .add = mlx5e_add, + .remove = mlx5e_remove, .event = mlx5e_async_event, .protocol = MLX5_INTERFACE_PROTOCOL_ETH, .get_dev = mlx5e_get_netdev, @@@ -3325,6 -3568,7 +3579,7 @@@
void mlx5e_init(void) { + mlx5e_build_ptys2ethtool_map(); mlx5_register_interface(&mlx5e_interface); }
diff --combined drivers/net/ethernet/mellanox/mlxsw/reg.h index 57d48da,b669b04..5b2a0b9 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@@ -1,9 -1,10 +1,10 @@@ /* * drivers/net/ethernet/mellanox/mlxsw/reg.h * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Ido Schimmel idosch@mellanox.com + * Copyright (c) 2015-2016 Ido Schimmel idosch@mellanox.com * Copyright (c) 2015 Elad Raz eladr@mellanox.com - * Copyright (c) 2015 Jiri Pirko jiri@mellanox.com + * Copyright (c) 2015-2016 Jiri Pirko jiri@mellanox.com + * Copyright (c) 2016 Yotam Gigi yotamg@mellanox.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@@ -386,7 -387,9 +387,9 @@@ enum mlxsw_reg_sfd_rec_action /* forward and trap, trap_id is FDB_TRAP */ MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1, /* trap and do not forward, trap_id is FDB_TRAP */ - MLXSW_REG_SFD_REC_ACTION_TRAP = 3, + MLXSW_REG_SFD_REC_ACTION_TRAP = 2, + /* forward to IP router */ + MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3, MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15, };
@@@ -2500,6 -2503,7 +2503,7 @@@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14 enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, + MLXSW_REG_PPCNT_TC_CNT = 0x11, };
/* reg_ppcnt_grp @@@ -2700,6 -2704,23 +2704,23 @@@ MLXSW_ITEM64(reg, ppcnt, tx_pause_durat */ MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
+ /* Ethernet Per Traffic Group Counters */ + + /* reg_ppcnt_tc_transmit_queue + * Contains the transmit queue depth in cells of traffic class + * selected by prio_tc and the port selected by local_port. + * The field cannot be cleared. + * Access: RO + */ + MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64); + + /* reg_ppcnt_tc_no_buffer_discard_uc + * The number of unicast packets dropped due to lack of shared + * buffer resources. + * Access: RO + */ + MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64); + static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) @@@ -2718,7 -2739,7 +2739,7 @@@ * Configures the switch priority to buffer table. */ #define MLXSW_REG_PPTB_ID 0x500B -#define MLXSW_REG_PPTB_LEN 0x0C +#define MLXSW_REG_PPTB_LEN 0x10
static const struct mlxsw_reg_info mlxsw_reg_pptb = { .id = MLXSW_REG_PPTB_ID, @@@ -2784,13 -2805,6 +2805,13 @@@ MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 2 */ MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+/* reg_pptb_prio_to_buff_msb + * Mapping of switch priority <i+8> to one of the allocated receive port + * buffers. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4); + #define MLXSW_REG_PPTB_ALL_PRIO 0xFF
static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port) @@@ -2799,14 -2813,6 +2820,14 @@@ mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM); mlxsw_reg_pptb_local_port_set(payload, local_port); mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO); + mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO); +} + +static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio, + u8 buff) +{ + mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff); + mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff); }
/* PBMC - Port Buffer Management Control Register @@@ -3201,6 -3207,1183 +3222,1183 @@@ static inline void mlxsw_reg_hpkt_pack( mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); }
+ /* RGCR - Router General Configuration Register + * -------------------------------------------- + * The register is used for setting up the router configuration. + */ + #define MLXSW_REG_RGCR_ID 0x8001 + #define MLXSW_REG_RGCR_LEN 0x28 + + static const struct mlxsw_reg_info mlxsw_reg_rgcr = { + .id = MLXSW_REG_RGCR_ID, + .len = MLXSW_REG_RGCR_LEN, + }; + + /* reg_rgcr_ipv4_en + * IPv4 router enable. + * Access: RW + */ + MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1); + + /* reg_rgcr_ipv6_en + * IPv6 router enable. + * Access: RW + */ + MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1); + + /* reg_rgcr_max_router_interfaces + * Defines the maximum number of active router interfaces for all virtual + * routers. + * Access: RW + */ + MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16); + + /* reg_rgcr_usp + * Update switch priority and packet color. + * 0 - Preserve the value of Switch Priority and packet color. + * 1 - Recalculate the value of Switch Priority and packet color. + * Access: RW + * + * Note: Not supported by SwitchX and SwitchX-2. + */ + MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1); + + /* reg_rgcr_pcp_rw + * Indicates how to handle the pcp_rewrite_en value: + * 0 - Preserve the value of pcp_rewrite_en. + * 2 - Disable PCP rewrite. + * 3 - Enable PCP rewrite. + * Access: RW + * + * Note: Not supported by SwitchX and SwitchX-2. + */ + MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2); + + /* reg_rgcr_activity_dis + * Activity disable: + * 0 - Activity will be set when an entry is hit (default). + * 1 - Activity will not be set when an entry is hit. + * + * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry + * (RALUE). + * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host + * Entry (RAUHT). + * Bits 2:7 are reserved. + * Access: RW + * + * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB. + */ + MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8); + + static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en) + { + MLXSW_REG_ZERO(rgcr, payload); + mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en); + } + + /* RITR - Router Interface Table Register + * -------------------------------------- + * The register is used to configure the router interface table. + */ + #define MLXSW_REG_RITR_ID 0x8002 + #define MLXSW_REG_RITR_LEN 0x40 + + static const struct mlxsw_reg_info mlxsw_reg_ritr = { + .id = MLXSW_REG_RITR_ID, + .len = MLXSW_REG_RITR_LEN, + }; + + /* reg_ritr_enable + * Enables routing on the router interface. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1); + + /* reg_ritr_ipv4 + * IPv4 routing enable. Enables routing of IPv4 traffic on the router + * interface. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1); + + /* reg_ritr_ipv6 + * IPv6 routing enable. Enables routing of IPv6 traffic on the router + * interface. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); + + enum mlxsw_reg_ritr_if_type { + MLXSW_REG_RITR_VLAN_IF, + MLXSW_REG_RITR_FID_IF, + MLXSW_REG_RITR_SP_IF, + }; + + /* reg_ritr_type + * Router interface type. + * 0 - VLAN interface. + * 1 - FID interface. + * 2 - Sub-port interface. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3); + + enum { + MLXSW_REG_RITR_RIF_CREATE, + MLXSW_REG_RITR_RIF_DEL, + }; + + /* reg_ritr_op + * Opcode: + * 0 - Create or edit RIF. + * 1 - Delete RIF. + * Reserved for SwitchX-2. For Spectrum, editing of interface properties + * is not supported. An interface must be deleted and re-created in order + * to update properties. + * Access: WO + */ + MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2); + + /* reg_ritr_rif + * Router interface index. A pointer to the Router Interface Table. + * Access: Index + */ + MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16); + + /* reg_ritr_ipv4_fe + * IPv4 Forwarding Enable. + * Enables routing of IPv4 traffic on the router interface. When disabled, + * forwarding is blocked but local traffic (traps and IP2ME) will be enabled. + * Not supported in SwitchX-2. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); + + /* reg_ritr_ipv6_fe + * IPv6 Forwarding Enable. + * Enables routing of IPv6 traffic on the router interface. When disabled, + * forwarding is blocked but local traffic (traps and IP2ME) will be enabled. + * Not supported in SwitchX-2. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); + + /* reg_ritr_virtual_router + * Virtual router ID associated with the router interface. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16); + + /* reg_ritr_mtu + * Router interface MTU. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16); + + /* reg_ritr_if_swid + * Switch partition ID. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8); + + /* reg_ritr_if_mac + * Router interface MAC address. + * In Spectrum, all MAC addresses must have the same 38 MSBits. + * Access: RW + */ + MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6); + + /* VLAN Interface */ + + /* reg_ritr_vlan_if_vid + * VLAN ID. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12); + + /* FID Interface */ + + /* reg_ritr_fid_if_fid + * Filtering ID. Used to connect a bridge to the router. Only FIDs from + * the vFID range are supported. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16); + + static inline void mlxsw_reg_ritr_fid_set(char *payload, + enum mlxsw_reg_ritr_if_type rif_type, + u16 fid) + { + if (rif_type == MLXSW_REG_RITR_FID_IF) + mlxsw_reg_ritr_fid_if_fid_set(payload, fid); + else + mlxsw_reg_ritr_vlan_if_vid_set(payload, fid); + } + + /* Sub-port Interface */ + + /* reg_ritr_sp_if_lag + * LAG indication. When this bit is set the system_port field holds the + * LAG identifier. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1); + + /* reg_ritr_sp_system_port + * Port unique indentifier. When lag bit is set, this field holds the + * lag_id in bits 0:9. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16); + + /* reg_ritr_sp_if_vid + * VLAN ID. + * Access: RW + */ + MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12); + + static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif) + { + MLXSW_REG_ZERO(ritr, payload); + mlxsw_reg_ritr_rif_set(payload, rif); + } + + static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag, + u16 system_port, u16 vid) + { + mlxsw_reg_ritr_sp_if_lag_set(payload, lag); + mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port); + mlxsw_reg_ritr_sp_if_vid_set(payload, vid); + } + + static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, + enum mlxsw_reg_ritr_if_type type, + u16 rif, u16 mtu, const char *mac) + { + bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL; + + MLXSW_REG_ZERO(ritr, payload); + mlxsw_reg_ritr_enable_set(payload, enable); + mlxsw_reg_ritr_ipv4_set(payload, 1); + mlxsw_reg_ritr_type_set(payload, type); + mlxsw_reg_ritr_op_set(payload, op); + mlxsw_reg_ritr_rif_set(payload, rif); + mlxsw_reg_ritr_ipv4_fe_set(payload, 1); + mlxsw_reg_ritr_mtu_set(payload, mtu); + mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); + } + + /* RATR - Router Adjacency Table Register + * -------------------------------------- + * The RATR register is used to configure the Router Adjacency (next-hop) + * Table. + */ + #define MLXSW_REG_RATR_ID 0x8008 + #define MLXSW_REG_RATR_LEN 0x2C + + static const struct mlxsw_reg_info mlxsw_reg_ratr = { + .id = MLXSW_REG_RATR_ID, + .len = MLXSW_REG_RATR_LEN, + }; + + enum mlxsw_reg_ratr_op { + /* Read */ + MLXSW_REG_RATR_OP_QUERY_READ = 0, + /* Read and clear activity */ + MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2, + /* Write Adjacency entry */ + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1, + /* Write Adjacency entry only if the activity is cleared. + * The write may not succeed if the activity is set. There is not + * direct feedback if the write has succeeded or not, however + * the get will reveal the actual entry (SW can compare the get + * response to the set command). + */ + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3, + }; + + /* reg_ratr_op + * Note that Write operation may also be used for updating + * counter_set_type and counter_index. In this case all other + * fields must not be updated. + * Access: OP + */ + MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4); + + /* reg_ratr_v + * Valid bit. Indicates if the adjacency entry is valid. + * Note: the device may need some time before reusing an invalidated + * entry. During this time the entry can not be reused. It is + * recommended to use another entry before reusing an invalidated + * entry (e.g. software can put it at the end of the list for + * reusing). Trying to access an invalidated entry not yet cleared + * by the device results with failure indicating "Try Again" status. + * When valid is '0' then egress_router_interface,trap_action, + * adjacency_parameters and counters are reserved + * Access: RW + */ + MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1); + + /* reg_ratr_a + * Activity. Set for new entries. Set if a packet lookup has hit on + * the specific entry. To clear the a bit, use "clear activity". + * Access: RO + */ + MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1); + + /* reg_ratr_adjacency_index_low + * Bits 15:0 of index into the adjacency table. + * For SwitchX and SwitchX-2, the adjacency table is linear and + * used for adjacency entries only. + * For Spectrum, the index is to the KVD linear. + * Access: Index + */ + MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16); + + /* reg_ratr_egress_router_interface + * Range is 0 .. cap_max_router_interfaces - 1 + * Access: RW + */ + MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16); + + enum mlxsw_reg_ratr_trap_action { + MLXSW_REG_RATR_TRAP_ACTION_NOP, + MLXSW_REG_RATR_TRAP_ACTION_TRAP, + MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RATR_TRAP_ACTION_MIRROR, + MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS, + }; + + /* reg_ratr_trap_action + * see mlxsw_reg_ratr_trap_action + * Access: RW + */ + MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4); + + enum mlxsw_reg_ratr_trap_id { + MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0, + MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1, + }; + + /* reg_ratr_adjacency_index_high + * Bits 23:16 of the adjacency_index. + * Access: Index + */ + MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8); + + /* reg_ratr_trap_id + * Trap ID to be reported to CPU. + * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR + * Access: RW + */ + MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8); + + /* reg_ratr_eth_destination_mac + * MAC address of the destination next-hop. + * Access: RW + */ + MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6); + + static inline void + mlxsw_reg_ratr_pack(char *payload, + enum mlxsw_reg_ratr_op op, bool valid, + u32 adjacency_index, u16 egress_rif) + { + MLXSW_REG_ZERO(ratr, payload); + mlxsw_reg_ratr_op_set(payload, op); + mlxsw_reg_ratr_v_set(payload, valid); + mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index); + mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16); + mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif); + } + + static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload, + const char *dest_mac) + { + mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac); + } + + /* RALTA - Router Algorithmic LPM Tree Allocation Register + * ------------------------------------------------------- + * RALTA is used to allocate the LPM trees of the SHSPM method. + */ + #define MLXSW_REG_RALTA_ID 0x8010 + #define MLXSW_REG_RALTA_LEN 0x04 + + static const struct mlxsw_reg_info mlxsw_reg_ralta = { + .id = MLXSW_REG_RALTA_ID, + .len = MLXSW_REG_RALTA_LEN, + }; + + /* reg_ralta_op + * opcode (valid for Write, must be 0 on Read) + * 0 - allocate a tree + * 1 - deallocate a tree + * Access: OP + */ + MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2); + + enum mlxsw_reg_ralxx_protocol { + MLXSW_REG_RALXX_PROTOCOL_IPV4, + MLXSW_REG_RALXX_PROTOCOL_IPV6, + }; + + /* reg_ralta_protocol + * Protocol. + * Deallocation opcode: Reserved. + * Access: RW + */ + MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4); + + /* reg_ralta_tree_id + * An identifier (numbered from 1..cap_shspm_max_trees-1) representing + * the tree identifier (managed by software). + * Note that tree_id 0 is allocated for a default-route tree. + * Access: Index + */ + MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8); + + static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc, + enum mlxsw_reg_ralxx_protocol protocol, + u8 tree_id) + { + MLXSW_REG_ZERO(ralta, payload); + mlxsw_reg_ralta_op_set(payload, !alloc); + mlxsw_reg_ralta_protocol_set(payload, protocol); + mlxsw_reg_ralta_tree_id_set(payload, tree_id); + } + + /* RALST - Router Algorithmic LPM Structure Tree Register + * ------------------------------------------------------ + * RALST is used to set and query the structure of an LPM tree. + * The structure of the tree must be sorted as a sorted binary tree, while + * each node is a bin that is tagged as the length of the prefixes the lookup + * will refer to. Therefore, bin X refers to a set of entries with prefixes + * of X bits to match with the destination address. The bin 0 indicates + * the default action, when there is no match of any prefix. + */ + #define MLXSW_REG_RALST_ID 0x8011 + #define MLXSW_REG_RALST_LEN 0x104 + + static const struct mlxsw_reg_info mlxsw_reg_ralst = { + .id = MLXSW_REG_RALST_ID, + .len = MLXSW_REG_RALST_LEN, + }; + + /* reg_ralst_root_bin + * The bin number of the root bin. + * 0<root_bin=<(length of IP address) + * For a default-route tree configure 0xff + * Access: RW + */ + MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8); + + /* reg_ralst_tree_id + * Tree identifier numbered from 1..(cap_shspm_max_trees-1). + * Access: Index + */ + MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8); + + #define MLXSW_REG_RALST_BIN_NO_CHILD 0xff + #define MLXSW_REG_RALST_BIN_OFFSET 0x04 + #define MLXSW_REG_RALST_BIN_COUNT 128 + + /* reg_ralst_left_child_bin + * Holding the children of the bin according to the stored tree's structure. + * For trees composed of less than 4 blocks, the bins in excess are reserved. + * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff + * Access: RW + */ + MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false); + + /* reg_ralst_right_child_bin + * Holding the children of the bin according to the stored tree's structure. + * For trees composed of less than 4 blocks, the bins in excess are reserved. + * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff + * Access: RW + */ + MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00, + false); + + static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id) + { + MLXSW_REG_ZERO(ralst, payload); + + /* Initialize all bins to have no left or right child */ + memset(payload + MLXSW_REG_RALST_BIN_OFFSET, + MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2); + + mlxsw_reg_ralst_root_bin_set(payload, root_bin); + mlxsw_reg_ralst_tree_id_set(payload, tree_id); + } + + static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number, + u8 left_child_bin, + u8 right_child_bin) + { + int bin_index = bin_number - 1; + + mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin); + mlxsw_reg_ralst_right_child_bin_set(payload, bin_index, + right_child_bin); + } + + /* RALTB - Router Algorithmic LPM Tree Binding Register + * ---------------------------------------------------- + * RALTB is used to bind virtual router and protocol to an allocated LPM tree. + */ + #define MLXSW_REG_RALTB_ID 0x8012 + #define MLXSW_REG_RALTB_LEN 0x04 + + static const struct mlxsw_reg_info mlxsw_reg_raltb = { + .id = MLXSW_REG_RALTB_ID, + .len = MLXSW_REG_RALTB_LEN, + }; + + /* reg_raltb_virtual_router + * Virtual Router ID + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ + MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16); + + /* reg_raltb_protocol + * Protocol. + * Access: Index + */ + MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4); + + /* reg_raltb_tree_id + * Tree to be used for the {virtual_router, protocol} + * Tree identifier numbered from 1..(cap_shspm_max_trees-1). + * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0. + * Access: RW + */ + MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8); + + static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router, + enum mlxsw_reg_ralxx_protocol protocol, + u8 tree_id) + { + MLXSW_REG_ZERO(raltb, payload); + mlxsw_reg_raltb_virtual_router_set(payload, virtual_router); + mlxsw_reg_raltb_protocol_set(payload, protocol); + mlxsw_reg_raltb_tree_id_set(payload, tree_id); + } + + /* RALUE - Router Algorithmic LPM Unicast Entry Register + * ----------------------------------------------------- + * RALUE is used to configure and query LPM entries that serve + * the Unicast protocols. + */ + #define MLXSW_REG_RALUE_ID 0x8013 + #define MLXSW_REG_RALUE_LEN 0x38 + + static const struct mlxsw_reg_info mlxsw_reg_ralue = { + .id = MLXSW_REG_RALUE_ID, + .len = MLXSW_REG_RALUE_LEN, + }; + + /* reg_ralue_protocol + * Protocol. + * Access: Index + */ + MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4); + + enum mlxsw_reg_ralue_op { + /* Read operation. If entry doesn't exist, the operation fails. */ + MLXSW_REG_RALUE_OP_QUERY_READ = 0, + /* Clear on read operation. Used to read entry and + * clear Activity bit. + */ + MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1, + /* Write operation. Used to write a new entry to the table. All RW + * fields are written for new entry. Activity bit is set + * for new entries. + */ + MLXSW_REG_RALUE_OP_WRITE_WRITE = 0, + /* Update operation. Used to update an existing route entry and + * only update the RW fields that are detailed in the field + * op_u_mask. If entry doesn't exist, the operation fails. + */ + MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1, + /* Clear activity. The Activity bit (the field a) is cleared + * for the entry. + */ + MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2, + /* Delete operation. Used to delete an existing entry. If entry + * doesn't exist, the operation fails. + */ + MLXSW_REG_RALUE_OP_WRITE_DELETE = 3, + }; + + /* reg_ralue_op + * Operation. + * Access: OP + */ + MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3); + + /* reg_ralue_a + * Activity. Set for new entries. Set if a packet lookup has hit on the + * specific entry, only if the entry is a route. To clear the a bit, use + * "clear activity" op. + * Enabled by activity_dis in RGCR + * Access: RO + */ + MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1); + + /* reg_ralue_virtual_router + * Virtual Router ID + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ + MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16); + + #define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0) + #define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1) + #define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2) + + /* reg_ralue_op_u_mask + * opcode update mask. + * On read operation, this field is reserved. + * This field is valid for update opcode, otherwise - reserved. + * This field is a bitmask of the fields that should be updated. + * Access: WO + */ + MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3); + + /* reg_ralue_prefix_len + * Number of bits in the prefix of the LPM route. + * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes + * two entries in the physical HW table. + * Access: Index + */ + MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8); + + /* reg_ralue_dip* + * The prefix of the route or of the marker that the object of the LPM + * is compared with. The most significant bits of the dip are the prefix. + * The list significant bits must be '0' if the prefix_len is smaller + * than 128 for IPv6 or smaller than 32 for IPv4. + * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved. + * Access: Index + */ + MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32); + + enum mlxsw_reg_ralue_entry_type { + MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1, + MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2, + MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3, + }; + + /* reg_ralue_entry_type + * Entry type. + * Note - for Marker entries, the action_type and action fields are reserved. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2); + + /* reg_ralue_bmp_len + * The best match prefix length in the case that there is no match for + * longer prefixes. + * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len + * Note for any update operation with entry_type modification this + * field must be set. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8); + + enum mlxsw_reg_ralue_action_type { + MLXSW_REG_RALUE_ACTION_TYPE_REMOTE, + MLXSW_REG_RALUE_ACTION_TYPE_LOCAL, + MLXSW_REG_RALUE_ACTION_TYPE_IP2ME, + }; + + /* reg_ralue_action_type + * Action Type + * Indicates how the IP address is connected. + * It can be connected to a local subnet through local_erif or can be + * on a remote subnet connected through a next-hop router, + * or transmitted to the CPU. + * Reserved when entry_type = MARKER_ENTRY + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2); + + enum mlxsw_reg_ralue_trap_action { + MLXSW_REG_RALUE_TRAP_ACTION_NOP, + MLXSW_REG_RALUE_TRAP_ACTION_TRAP, + MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RALUE_TRAP_ACTION_MIRROR, + MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR, + }; + + /* reg_ralue_trap_action + * Trap action. + * For IP2ME action, only NOP and MIRROR are possible. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4); + + /* reg_ralue_trap_id + * Trap ID to be reported to CPU. + * Trap ID is RTR_INGRESS0 or RTR_INGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9); + + /* reg_ralue_adjacency_index + * Points to the first entry of the group-based ECMP. + * Only relevant in case of REMOTE action. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24); + + /* reg_ralue_ecmp_size + * Amount of sequential entries starting + * from the adjacency_index (the number of ECMPs). + * The valid range is 1-64, 512, 1024, 2048 and 4096. + * Reserved when trap_action is TRAP or DISCARD_ERROR. + * Only relevant in case of REMOTE action. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13); + + /* reg_ralue_local_erif + * Egress Router Interface. + * Only relevant in case of LOCAL action. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); + + /* reg_ralue_v + * Valid bit for the tunnel_ptr field. + * If valid = 0 then trap to CPU as IP2ME trap ID. + * If valid = 1 and the packet format allows NVE or IPinIP tunnel + * decapsulation then tunnel decapsulation is done. + * If valid = 1 and packet format does not allow NVE or IPinIP tunnel + * decapsulation then trap as IP2ME trap ID. + * Only relevant in case of IP2ME action. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1); + + /* reg_ralue_tunnel_ptr + * Tunnel Pointer for NVE or IPinIP tunnel decapsulation. + * For Spectrum, pointer to KVD Linear. + * Only relevant in case of IP2ME action. + * Access: RW + */ + MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24); + + static inline void mlxsw_reg_ralue_pack(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len) + { + MLXSW_REG_ZERO(ralue, payload); + mlxsw_reg_ralue_protocol_set(payload, protocol); + mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); + mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); + mlxsw_reg_ralue_entry_type_set(payload, + MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY); + mlxsw_reg_ralue_bmp_len_set(payload, prefix_len); + } + + static inline void mlxsw_reg_ralue_pack4(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len, + u32 dip) + { + mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len); + mlxsw_reg_ralue_dip4_set(payload, dip); + } + + static inline void + mlxsw_reg_ralue_act_remote_pack(char *payload, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u32 adjacency_index, u16 ecmp_size) + { + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_REMOTE); + mlxsw_reg_ralue_trap_action_set(payload, trap_action); + mlxsw_reg_ralue_trap_id_set(payload, trap_id); + mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index); + mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size); + } + + static inline void + mlxsw_reg_ralue_act_local_pack(char *payload, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u16 local_erif) + { + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_LOCAL); + mlxsw_reg_ralue_trap_action_set(payload, trap_action); + mlxsw_reg_ralue_trap_id_set(payload, trap_id); + mlxsw_reg_ralue_local_erif_set(payload, local_erif); + } + + static inline void + mlxsw_reg_ralue_act_ip2me_pack(char *payload) + { + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_IP2ME); + } + + /* RAUHT - Router Algorithmic LPM Unicast Host Table Register + * ---------------------------------------------------------- + * The RAUHT register is used to configure and query the Unicast Host table in + * devices that implement the Algorithmic LPM. + */ + #define MLXSW_REG_RAUHT_ID 0x8014 + #define MLXSW_REG_RAUHT_LEN 0x74 + + static const struct mlxsw_reg_info mlxsw_reg_rauht = { + .id = MLXSW_REG_RAUHT_ID, + .len = MLXSW_REG_RAUHT_LEN, + }; + + enum mlxsw_reg_rauht_type { + MLXSW_REG_RAUHT_TYPE_IPV4, + MLXSW_REG_RAUHT_TYPE_IPV6, + }; + + /* reg_rauht_type + * Access: Index + */ + MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2); + + enum mlxsw_reg_rauht_op { + MLXSW_REG_RAUHT_OP_QUERY_READ = 0, + /* Read operation */ + MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1, + /* Clear on read operation. Used to read entry and clear + * activity bit. + */ + MLXSW_REG_RAUHT_OP_WRITE_ADD = 0, + /* Add. Used to write a new entry to the table. All R/W fields are + * relevant for new entry. Activity bit is set for new entries. + */ + MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1, + /* Update action. Used to update an existing route entry and + * only update the following fields: + * trap_action, trap_id, mac, counter_set_type, counter_index + */ + MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2, + /* Clear activity. A bit is cleared for the entry. */ + MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3, + /* Delete entry */ + MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4, + /* Delete all host entries on a RIF. In this command, dip + * field is reserved. + */ + }; + + /* reg_rauht_op + * Access: OP + */ + MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3); + + /* reg_rauht_a + * Activity. Set for new entries. Set if a packet lookup has hit on + * the specific entry. + * To clear the a bit, use "clear activity" op. + * Enabled by activity_dis in RGCR + * Access: RO + */ + MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1); + + /* reg_rauht_rif + * Router Interface + * Access: Index + */ + MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16); + + /* reg_rauht_dip* + * Destination address. + * Access: Index + */ + MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32); + + enum mlxsw_reg_rauht_trap_action { + MLXSW_REG_RAUHT_TRAP_ACTION_NOP, + MLXSW_REG_RAUHT_TRAP_ACTION_TRAP, + MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR, + MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS, + }; + + /* reg_rauht_trap_action + * Access: RW + */ + MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4); + + enum mlxsw_reg_rauht_trap_id { + MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0, + MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1, + }; + + /* reg_rauht_trap_id + * Trap ID to be reported to CPU. + * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR, + * trap_id is reserved. + * Access: RW + */ + MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9); + + /* reg_rauht_counter_set_type + * Counter set type for flow counters + * Access: RW + */ + MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8); + + /* reg_rauht_counter_index + * Counter index for flow counters + * Access: RW + */ + MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24); + + /* reg_rauht_mac + * MAC address. + * Access: RW + */ + MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6); + + static inline void mlxsw_reg_rauht_pack(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac) + { + MLXSW_REG_ZERO(rauht, payload); + mlxsw_reg_rauht_op_set(payload, op); + mlxsw_reg_rauht_rif_set(payload, rif); + mlxsw_reg_rauht_mac_memcpy_to(payload, mac); + } + + static inline void mlxsw_reg_rauht_pack4(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac, u32 dip) + { + mlxsw_reg_rauht_pack(payload, op, rif, mac); + mlxsw_reg_rauht_dip4_set(payload, dip); + } + + /* RALEU - Router Algorithmic LPM ECMP Update Register + * --------------------------------------------------- + * The register enables updating the ECMP section in the action for multiple + * LPM Unicast entries in a single operation. The update is executed to + * all entries of a {virtual router, protocol} tuple using the same ECMP group. + */ + #define MLXSW_REG_RALEU_ID 0x8015 + #define MLXSW_REG_RALEU_LEN 0x28 + + static const struct mlxsw_reg_info mlxsw_reg_raleu = { + .id = MLXSW_REG_RALEU_ID, + .len = MLXSW_REG_RALEU_LEN, + }; + + /* reg_raleu_protocol + * Protocol. + * Access: Index + */ + MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4); + + /* reg_raleu_virtual_router + * Virtual Router ID + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ + MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16); + + /* reg_raleu_adjacency_index + * Adjacency Index used for matching on the existing entries. + * Access: Index + */ + MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24); + + /* reg_raleu_ecmp_size + * ECMP Size used for matching on the existing entries. + * Access: Index + */ + MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13); + + /* reg_raleu_new_adjacency_index + * New Adjacency Index. + * Access: WO + */ + MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24); + + /* reg_raleu_new_ecmp_size + * New ECMP Size. + * Access: WO + */ + MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13); + + static inline void mlxsw_reg_raleu_pack(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + u16 virtual_router, + u32 adjacency_index, u16 ecmp_size, + u32 new_adjacency_index, + u16 new_ecmp_size) + { + MLXSW_REG_ZERO(raleu, payload); + mlxsw_reg_raleu_protocol_set(payload, protocol); + mlxsw_reg_raleu_virtual_router_set(payload, virtual_router); + mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index); + mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size); + mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index); + mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size); + } + + /* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register + * ---------------------------------------------------------------- + * The RAUHTD register allows dumping entries from the Router Unicast Host + * Table. For a given session an entry is dumped no more than one time. The + * first RAUHTD access after reset is a new session. A session ends when the + * num_rec response is smaller than num_rec request or for IPv4 when the + * num_entries is smaller than 4. The clear activity affect the current session + * or the last session if a new session has not started. + */ + #define MLXSW_REG_RAUHTD_ID 0x8018 + #define MLXSW_REG_RAUHTD_BASE_LEN 0x20 + #define MLXSW_REG_RAUHTD_REC_LEN 0x20 + #define MLXSW_REG_RAUHTD_REC_MAX_NUM 32 + #define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \ + MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN) + #define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4 + + static const struct mlxsw_reg_info mlxsw_reg_rauhtd = { + .id = MLXSW_REG_RAUHTD_ID, + .len = MLXSW_REG_RAUHTD_LEN, + }; + + #define MLXSW_REG_RAUHTD_FILTER_A BIT(0) + #define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3) + + /* reg_rauhtd_filter_fields + * if a bit is '0' then the relevant field is ignored and dump is done + * regardless of the field value + * Bit0 - filter by activity: entry_a + * Bit3 - filter by entry rip: entry_rif + * Access: Index + */ + MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8); + + enum mlxsw_reg_rauhtd_op { + MLXSW_REG_RAUHTD_OP_DUMP, + MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR, + }; + + /* reg_rauhtd_op + * Access: OP + */ + MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2); + + /* reg_rauhtd_num_rec + * At request: number of records requested + * At response: number of records dumped + * For IPv4, each record has 4 entries at request and up to 4 entries + * at response + * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM + * Access: Index + */ + MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8); + + /* reg_rauhtd_entry_a + * Dump only if activity has value of entry_a + * Reserved if filter_fields bit0 is '0' + * Access: Index + */ + MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1); + + enum mlxsw_reg_rauhtd_type { + MLXSW_REG_RAUHTD_TYPE_IPV4, + MLXSW_REG_RAUHTD_TYPE_IPV6, + }; + + /* reg_rauhtd_type + * Dump only if record type is: + * 0 - IPv4 + * 1 - IPv6 + * Access: Index + */ + MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4); + + /* reg_rauhtd_entry_rif + * Dump only if RIF has value of entry_rif + * Reserved if filter_fields bit3 is '0' + * Access: Index + */ + MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16); + + static inline void mlxsw_reg_rauhtd_pack(char *payload, + enum mlxsw_reg_rauhtd_type type) + { + MLXSW_REG_ZERO(rauhtd, payload); + mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A); + mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR); + mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM); + mlxsw_reg_rauhtd_entry_a_set(payload, 1); + mlxsw_reg_rauhtd_type_set(payload, type); + } + + /* reg_rauhtd_ipv4_rec_num_entries + * Number of valid entries in this record: + * 0 - 1 valid entry + * 1 - 2 valid entries + * 2 - 3 valid entries + * 3 - 4 valid entries + * Access: RO + */ + MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries, + MLXSW_REG_RAUHTD_BASE_LEN, 28, 2, + MLXSW_REG_RAUHTD_REC_LEN, 0x00, false); + + /* reg_rauhtd_rec_type + * Record type. + * 0 - IPv4 + * 1 - IPv6 + * Access: RO + */ + MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2, + MLXSW_REG_RAUHTD_REC_LEN, 0x00, false); + + #define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8 + + /* reg_rauhtd_ipv4_ent_a + * Activity. Set for new entries. Set if a packet lookup has hit on the + * specific entry. + * Access: RO + */ + MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1, + MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false); + + /* reg_rauhtd_ipv4_ent_rif + * Router interface. + * Access: RO + */ + MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false); + + /* reg_rauhtd_ipv4_ent_dip + * Destination IPv4 address. + * Access: RO + */ + MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false); + + static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, + int ent_index, u16 *p_rif, + u32 *p_dip) + { + *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index); + *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index); + } + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. @@@ -3939,6 -5122,26 +5137,26 @@@ static inline const char *mlxsw_reg_id_ return "HTGT"; case MLXSW_REG_HPKT_ID: return "HPKT"; + case MLXSW_REG_RGCR_ID: + return "RGCR"; + case MLXSW_REG_RITR_ID: + return "RITR"; + case MLXSW_REG_RATR_ID: + return "RATR"; + case MLXSW_REG_RALTA_ID: + return "RALTA"; + case MLXSW_REG_RALST_ID: + return "RALST"; + case MLXSW_REG_RALTB_ID: + return "RALTB"; + case MLXSW_REG_RALUE_ID: + return "RALUE"; + case MLXSW_REG_RAUHT_ID: + return "RAUHT"; + case MLXSW_REG_RALEU_ID: + return "RALEU"; + case MLXSW_REG_RAUHTD_ID: + return "RAUHTD"; case MLXSW_REG_MFCR_ID: return "MFCR"; case MLXSW_REG_MFSC_ID: diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3740800,3e7920b..2ba8cc4 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@@ -49,7 -49,9 +49,9 @@@ #include <linux/jiffies.h> #include <linux/bitops.h> #include <linux/list.h> + #include <linux/notifier.h> #include <linux/dcbnl.h> + #include <linux/inetdevice.h> #include <net/switchdev.h> #include <generated/utsrelease.h>
@@@ -171,6 -173,23 +173,6 @@@ static int mlxsw_sp_port_admin_status_s return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); }
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, - bool *p_is_up) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char paos_pl[MLXSW_REG_PAOS_LEN]; - u8 oper_status; - int err; - - mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); - if (err) - return err; - oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); - *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; - return 0; -} - static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, unsigned char *addr) { @@@ -192,23 -211,6 +194,6 @@@ static int mlxsw_sp_port_dev_addr_init( return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); }
- static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid, enum mlxsw_reg_spms_state state) - { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *spms_pl; - int err; - - spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); - if (!spms_pl) - return -ENOMEM; - mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); - mlxsw_reg_spms_vid_pack(spms_pl, vid, state); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); - kfree(spms_pl); - return err; - } - static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@@ -619,94 -621,8 +604,8 @@@ static int mlxsw_sp_port_vlan_mode_tran return 0; }
- static struct mlxsw_sp_vfid * - mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) - { - struct mlxsw_sp_vfid *vfid; - - list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { - if (vfid->vid == vid) - return vfid; - } - - return NULL; - } - - static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) - { - return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, - MLXSW_SP_VFID_PORT_MAX); - } - - static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) - { - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - } - - static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) - { - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - } - - static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, - u16 vid) - { - struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; - int err; - - n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); - if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { - dev_err(dev, "No available vFIDs\n"); - return ERR_PTR(-ERANGE); - } - - err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); - if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); - return ERR_PTR(err); - } - - vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) - goto err_allocate_vfid; - - vfid->vfid = n_vfid; - vfid->vid = vid; - - list_add(&vfid->list, &mlxsw_sp->port_vfids.list); - set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); - - return vfid; - - err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); - return ERR_PTR(-ENOMEM); - } - - static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) - { - clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); - list_del(&vfid->list); - - __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); - - kfree(vfid); - } - static struct mlxsw_sp_port * - mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_vfid *vfid) + mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_vport;
@@@ -724,8 -640,7 +623,7 @@@ mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; - mlxsw_sp_vport->vport.vfid = vfid; - mlxsw_sp_vport->vport.vid = vfid->vid; + mlxsw_sp_vport->vport.vid = vid;
list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
@@@ -742,9 -657,8 +640,8 @@@ int mlxsw_sp_port_add_vid(struct net_de u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; + bool untagged = vid == 1; int err;
/* VLAN 0 is added to HW filter when device goes up, but it is @@@ -758,31 -672,10 +655,10 @@@ return 0; }
- vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!vfid) { - vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(vfid); - } - } - - mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); if (!mlxsw_sp_vport) { netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); - err = -ENOMEM; - goto err_port_vport_create; - } - - if (!vfid->nr_vports) { - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, - true, false); - if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_vport_flood_set; - } + return -ENOMEM; }
/* When adding the first VLAN interface on a bridged port we need to @@@ -797,70 -690,37 +673,37 @@@ } }
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, vfid->vfid); - goto err_port_vid_to_fid_set; - } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); if (err) { netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); goto err_port_vid_learning_set; }
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); goto err_port_add_vid; }
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_FORWARDING); - if (err) { - netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); - goto err_port_stp_state_set; - } - - vfid->nr_vports++; - return 0;
- err_port_stp_state_set: - mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); err_port_add_vid: mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(vfid->vfid), vid); - err_port_vid_to_fid_set: if (list_is_singular(&mlxsw_sp_port->vports_list)) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); err_port_vp_mode_trans: - if (!vfid->nr_vports) - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); - err_vport_flood_set: mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); - err_port_vport_create: - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; }
- int mlxsw_sp_port_kill_vid(struct net_device *dev, - __be16 __always_unused proto, u16 vid) + static int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f; int err;
/* VLAN 0 is removed from HW filter when device goes down, but @@@ -875,15 -735,6 +718,6 @@@ return 0; }
- vfid = mlxsw_sp_vport->vport.vfid; - - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_DISCARDING); - if (err) { - netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); - return err; - } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", @@@ -897,16 -748,12 +731,12 @@@ return err; }
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, vfid->vfid); - return err; - } + /* Drop FID reference. If this was the last reference the + * resources will be freed. + */ + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f && !WARN_ON(!f->leave)) + f->leave(mlxsw_sp_vport);
/* When removing the last VLAN interface on a bridged port we need to * transition all active 802.1Q bridge VLANs to use VID to FID @@@ -920,13 -767,8 +750,8 @@@ } }
- vfid->nr_vports--; mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
- /* Destroy the vFID if no vPorts are assigned to it anymore. */ - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); - return 0; }
@@@ -961,6 -803,8 +786,8 @@@ static const struct net_device_ops mlxs .ndo_get_stats64 = mlxsw_sp_port_get_stats64, .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, + .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, + .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, .ndo_fdb_add = switchdev_port_fdb_add, .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, @@@ -1055,7 -899,7 +882,7 @@@ struct mlxsw_sp_port_hw_stats u64 (*getter)(char *payload); };
- static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { + static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { { .str = "a_frames_transmitted_ok", .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, @@@ -1136,6 -980,90 +963,90 @@@
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { + { + .str = "rx_octets_prio", + .getter = mlxsw_reg_ppcnt_rx_octets_get, + }, + { + .str = "rx_frames_prio", + .getter = mlxsw_reg_ppcnt_rx_frames_get, + }, + { + .str = "tx_octets_prio", + .getter = mlxsw_reg_ppcnt_tx_octets_get, + }, + { + .str = "tx_frames_prio", + .getter = mlxsw_reg_ppcnt_tx_frames_get, + }, + { + .str = "rx_pause_prio", + .getter = mlxsw_reg_ppcnt_rx_pause_get, + }, + { + .str = "rx_pause_duration_prio", + .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, + }, + { + .str = "tx_pause_prio", + .getter = mlxsw_reg_ppcnt_tx_pause_get, + }, + { + .str = "tx_pause_duration_prio", + .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, + }, + }; + + #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) + + static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl) + { + u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); + + return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); + } + + static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { + { + .str = "tc_transmit_queue_tc", + .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, + }, + { + .str = "tc_no_buffer_discard_uc_tc", + .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, + }, + }; + + #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) + + #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ + (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ + MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ + IEEE_8021QAZ_MAX_TCS) + + static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) + { + int i; + + for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + mlxsw_sp_port_hw_prio_stats[i].str, prio); + *p += ETH_GSTRING_LEN; + } + } + + static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) + { + int i; + + for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + mlxsw_sp_port_hw_tc_stats[i].str, tc); + *p += ETH_GSTRING_LEN; + } + } + static void mlxsw_sp_port_get_strings(struct net_device *dev, u32 stringset, u8 *data) { @@@ -1149,6 -1077,13 +1060,13 @@@ ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port_get_prio_strings(&p, i); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port_get_tc_strings(&p, i); + break; } } @@@ -1176,27 -1111,80 +1094,80 @@@ static int mlxsw_sp_port_set_phys_id(st return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); }
- static void mlxsw_sp_port_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) + static int + mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, + int *p_len, enum mlxsw_reg_ppcnt_grp grp) + { + switch (grp) { + case MLXSW_REG_PPCNT_IEEE_8023_CNT: + *p_hw_stats = mlxsw_sp_port_hw_stats; + *p_len = MLXSW_SP_PORT_HW_STATS_LEN; + break; + case MLXSW_REG_PPCNT_PRIO_CNT: + *p_hw_stats = mlxsw_sp_port_hw_prio_stats; + *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; + break; + case MLXSW_REG_PPCNT_TC_CNT: + *p_hw_stats = mlxsw_sp_port_hw_tc_stats; + *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; + break; + default: + WARN_ON(1); + return -ENOTSUPP; + } + return 0; + } + + static void __mlxsw_sp_port_get_stats(struct net_device *dev, + enum mlxsw_reg_ppcnt_grp grp, int prio, + u64 *data, int data_index) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_hw_stats *hw_stats; char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; - int i; + int i, len; int err;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, - MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); + err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); + if (err) + return; + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); - for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) - data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; + for (i = 0; i < len; i++) + data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0; + } + + static void mlxsw_sp_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) + { + int i, data_index = 0; + + /* IEEE 802.3 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, + data, data_index); + data_index = MLXSW_SP_PORT_HW_STATS_LEN; + + /* Per-Priority Counters */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, + data, data_index); + data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; + } + + /* Per-TC Counters */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, + data, data_index); + data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; + } }
static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return MLXSW_SP_PORT_HW_STATS_LEN; + return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; default: return -EOPNOTSUPP; } @@@ -1417,8 -1405,7 +1388,8 @@@ static int mlxsw_sp_port_get_settings(s
cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | - SUPPORTED_Pause | SUPPORTED_Asym_Pause; + SUPPORTED_Pause | SUPPORTED_Asym_Pause | + SUPPORTED_Autoneg; cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper, cmd); @@@ -1477,6 -1464,7 +1448,6 @@@ static int mlxsw_sp_port_set_settings(s u32 eth_proto_new; u32 eth_proto_cap; u32 eth_proto_admin; - bool is_up; int err;
speed = ethtool_cmd_speed(cmd); @@@ -1508,7 -1496,12 +1479,7 @@@ return err; }
- err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); - if (err) { - netdev_err(dev, "Failed to get oper status"); - return err; - } - if (!is_up) + if (!netif_running(dev)) return 0;
err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); @@@ -1816,23 -1809,6 +1787,6 @@@ err_port_active_vlans_alloc return err; }
- static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) - { - struct net_device *dev = mlxsw_sp_port->dev; - struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; - - list_for_each_entry_safe(mlxsw_sp_vport, tmp, - &mlxsw_sp_port->vports_list, vport.list) { - u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - - /* vPorts created for VLAN devices should already be gone - * by now, since we unregistered the port netdev. - */ - WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); - mlxsw_sp_port_kill_vid(dev, 0, vid); - } - } - static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; @@@ -1843,13 -1819,14 +1797,14 @@@ mlxsw_core_port_fini(&mlxsw_sp_port->core_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ mlxsw_sp_port_dcb_fini(mlxsw_sp_port); - mlxsw_sp_port_vports_fini(mlxsw_sp_port); + mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); + WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); free_netdev(mlxsw_sp_port->dev); }
@@@ -2086,11 -2063,8 +2041,8 @@@ static void mlxsw_sp_pude_event_func(co
local_port = mlxsw_reg_pude_local_port_get(pude_pl); mlxsw_sp_port = mlxsw_sp->ports[local_port]; - if (!mlxsw_sp_port) { - dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", - local_port); + if (!mlxsw_sp_port) return; - }
status = mlxsw_reg_pude_oper_status_get(pude_pl); if (status == MLXSW_PORT_OPER_STATUS_UP) { @@@ -2245,6 -2219,31 +2197,31 @@@ static const struct mlxsw_rx_listener m .local_port = MLXSW_PORT_DONT_CARE, .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_ARPBC, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_ARPUC, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IP2ME, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4, + }, };
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) @@@ -2285,7 -2284,7 +2262,7 @@@ err_rx_trap_set mlxsw_sp); err_rx_listener_register: for (i--; i >= 0; i--) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, mlxsw_sp_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
@@@ -2302,7 -2301,7 +2279,7 @@@ static void mlxsw_sp_traps_fini(struct int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, mlxsw_sp_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
@@@ -2381,8 -2380,8 +2358,8 @@@ static int mlxsw_sp_init(struct mlxsw_c
mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; - INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); - INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->fids); + INIT_LIST_HEAD(&mlxsw_sp->vfids.list); INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
err = mlxsw_sp_base_mac_get(mlxsw_sp); @@@ -2391,16 -2390,10 +2368,10 @@@ return err; }
- err = mlxsw_sp_ports_create(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); - return err; - } - err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); - goto err_event_register; + return err; }
err = mlxsw_sp_traps_init(mlxsw_sp); @@@ -2433,8 -2426,24 +2404,24 @@@ goto err_switchdev_init; }
+ err = mlxsw_sp_router_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); + goto err_router_init; + } + + err = mlxsw_sp_ports_create(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); + goto err_ports_create; + } + return 0;
+ err_ports_create: + mlxsw_sp_router_fini(mlxsw_sp); + err_router_init: + mlxsw_sp_switchdev_fini(mlxsw_sp); err_switchdev_init: err_lag_init: mlxsw_sp_buffers_fini(mlxsw_sp); @@@ -2443,20 -2452,24 +2430,24 @@@ err_flood_init mlxsw_sp_traps_fini(mlxsw_sp); err_rx_listener_register: mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); - err_event_register: - mlxsw_sp_ports_remove(mlxsw_sp); return err; }
static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + int i;
+ mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); - mlxsw_sp_ports_remove(mlxsw_sp); + WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); + WARN_ON(!list_empty(&mlxsw_sp->fids)); + for (i = 0; i < MLXSW_SP_RIF_MAX; i++) + WARN_ON_ONCE(mlxsw_sp->rifs[i]); }
static struct mlxsw_config_profile mlxsw_sp_config_profile = { @@@ -2487,6 -2500,10 +2478,10 @@@ .max_ib_mc = 0, .used_max_pkey = 1, .max_pkey = 0, + .used_kvd_sizes = 1, + .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, + .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE, + .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE, .swid_config = { { .used_type = 1, @@@ -2518,121 -2535,679 +2513,679 @@@ static struct mlxsw_driver mlxsw_sp_dri .profile = &mlxsw_sp_config_profile, };
- static int - mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) + static bool mlxsw_sp_port_dev_check(const struct net_device *dev) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; - - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); - mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); - - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; }
- static int - mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid) + static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; + struct net_device *lower_dev; + struct list_head *iter;
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); - mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); - mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, - mlxsw_sp_port->local_port); + if (mlxsw_sp_port_dev_check(dev)) + return netdev_priv(dev);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + netdev_for_each_all_lower_dev(dev, lower_dev, iter) { + if (mlxsw_sp_port_dev_check(lower_dev)) + return netdev_priv(lower_dev); + } + return NULL; }
- static int - mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) + static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; - - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); - mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + struct mlxsw_sp_port *mlxsw_sp_port;
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); + return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; }
- static int - mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, - u16 fid) + static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; + struct net_device *lower_dev; + struct list_head *iter;
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); - mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); - mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + if (mlxsw_sp_port_dev_check(dev)) + return netdev_priv(dev);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) { + if (mlxsw_sp_port_dev_check(lower_dev)) + return netdev_priv(lower_dev); + } + return NULL; }
- static int - __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) + struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) { - int err, last_err = 0; - u16 vid; + struct mlxsw_sp_port *mlxsw_sp_port;
- for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; + rcu_read_lock(); + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); + if (mlxsw_sp_port) + dev_hold(mlxsw_sp_port->dev); + rcu_read_unlock(); + return mlxsw_sp_port; + } + + void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) + { + dev_put(mlxsw_sp_port->dev); + } + + static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, + unsigned long event) + { + switch (event) { + case NETDEV_UP: + if (!r) + return true; + r->ref_count++; + return false; + case NETDEV_DOWN: + if (r && --r->ref_count == 0) + return true; + /* It is possible we already removed the RIF ourselves + * if it was assigned to a netdev that is now a bridge + * or LAG slave. + */ + return false; }
- return last_err; + return false; }
- static int - __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) + static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) { - int err, last_err = 0; - u16 vid; + int i;
- for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; + for (i = 0; i < MLXSW_SP_RIF_MAX; i++) + if (!mlxsw_sp->rifs[i]) + return i; + + return MLXSW_SP_RIF_MAX; + } + + static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, + bool *p_lagged, u16 *p_system_port) + { + u8 local_port = mlxsw_sp_vport->local_port; + + *p_lagged = mlxsw_sp_vport->lagged; + *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; + } + + static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev, u16 rif, + bool create) + { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + bool lagged = mlxsw_sp_vport->lagged; + char ritr_pl[MLXSW_REG_RITR_LEN]; + u16 system_port; + + mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, + l3_dev->mtu, l3_dev->dev_addr); + + mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); + mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, + mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + } + + static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + + static struct mlxsw_sp_fid * + mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) + { + struct mlxsw_sp_fid *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->leave = mlxsw_sp_vport_rif_sp_leave; + f->ref_count = 0; + f->dev = l3_dev; + f->fid = fid; + + return f; + } + + static struct mlxsw_sp_rif * + mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) + { + struct mlxsw_sp_rif *r; + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return NULL; + + ether_addr_copy(r->addr, l3_dev->dev_addr); + r->mtu = l3_dev->mtu; + r->ref_count = 1; + r->dev = l3_dev; + r->rif = rif; + r->f = f; + + return r; + } + + static struct mlxsw_sp_rif * + mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) + { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_fid *f; + struct mlxsw_sp_rif *r; + u16 fid, rif; + int err; + + rif = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif == MLXSW_SP_RIF_MAX) + return ERR_PTR(-ERANGE); + + err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); + if (err) + return ERR_PTR(err); + + fid = mlxsw_sp_rif_sp_to_fid(rif); + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); + if (err) + goto err_rif_fdb_op; + + f = mlxsw_sp_rfid_alloc(fid, l3_dev); + if (!f) { + err = -ENOMEM; + goto err_rfid_alloc; + } + + r = mlxsw_sp_rif_alloc(rif, l3_dev, f); + if (!r) { + err = -ENOMEM; + goto err_rif_alloc; }
- return last_err; + f->r = r; + mlxsw_sp->rifs[rif] = r; + + return r; + + err_rif_alloc: + kfree(f); + err_rfid_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); + err_rif_fdb_op: + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); + return ERR_PTR(err); }
- static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) + static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, + struct mlxsw_sp_rif *r) { - if (!list_empty(&mlxsw_sp_port->vports_list)) - if (mlxsw_sp_port->lagged) - return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); - else - return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct net_device *l3_dev = r->dev; + struct mlxsw_sp_fid *f = r->f; + u16 fid = f->fid; + u16 rif = r->rif; + + mlxsw_sp->rifs[rif] = NULL; + f->r = NULL; + + kfree(r); + + kfree(f); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); + + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); + } + + static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) + { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_rif *r; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (!r) { + r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); + if (IS_ERR(r)) + return PTR_ERR(r); + } + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); + r->f->ref_count++; + + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); + + return 0; + } + + static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) + { + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); + } + + static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, + struct net_device *port_dev, + unsigned long event, u16 vid) + { + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); + case NETDEV_DOWN: + mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); + break; + } + + return 0; + } + + static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, + unsigned long event) + { + if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) + return 0; + + return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); + } + + static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, + struct net_device *lag_dev, + unsigned long event, u16 vid) + { + struct net_device *port_dev; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(lag_dev, port_dev, iter) { + if (mlxsw_sp_port_dev_check(port_dev)) { + err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, + event, vid); + if (err) + return err; + } + } + + return 0; + } + + static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, + unsigned long event) + { + if (netif_is_bridge_port(lag_dev)) + return 0; + + return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); + } + + static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) + { + u16 fid; + + if (is_vlan_dev(l3_dev)) + fid = vlan_dev_vlan_id(l3_dev); + else if (mlxsw_sp->master_bridge.dev == l3_dev) + fid = 1; else - if (mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); - else - return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); + return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); + + return mlxsw_sp_fid_find(mlxsw_sp, fid); + } + + static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) + { + if (mlxsw_sp_fid_is_vfid(fid)) + return MLXSW_REG_RITR_FID_IF; + else + return MLXSW_REG_RITR_VLAN_IF; + } + + static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + u16 fid, u16 rif, + bool create) + { + enum mlxsw_reg_ritr_if_type rif_type; + char ritr_pl[MLXSW_REG_RITR_LEN]; + + rif_type = mlxsw_sp_rif_type_get(fid); + mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, + l3_dev->dev_addr); + mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + } + + static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + struct mlxsw_sp_fid *f) + { + struct mlxsw_sp_rif *r; + u16 rif; + int err; + + rif = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif == MLXSW_SP_RIF_MAX) + return -ERANGE; + + err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); + if (err) + return err; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); + if (err) + goto err_rif_fdb_op; + + r = mlxsw_sp_rif_alloc(rif, l3_dev, f); + if (!r) { + err = -ENOMEM; + goto err_rif_alloc; + } + + f->r = r; + mlxsw_sp->rifs[rif] = r; + + netdev_dbg(l3_dev, "RIF=%d created\n", rif); + + return 0; + + err_rif_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); + err_rif_fdb_op: + mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); + return err; + } + + void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) + { + struct net_device *l3_dev = r->dev; + struct mlxsw_sp_fid *f = r->f; + u16 rif = r->rif; + + mlxsw_sp->rifs[rif] = NULL; + f->r = NULL; + + kfree(r); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); + + mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); + + netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); + } + + static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, + struct net_device *br_dev, + unsigned long event) + { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); + struct mlxsw_sp_fid *f; + + /* FID can either be an actual FID if the L3 device is the + * VLAN-aware bridge or a VLAN device on top. Otherwise, the + * L3 device is a VLAN-unaware bridge and we get a vFID. + */ + f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); + if (WARN_ON(!f)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); + case NETDEV_DOWN: + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + break; + } + + return 0; + } + + static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, + unsigned long event) + { + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_lag_master(real_dev)) + return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_bridge_master(real_dev) && + mlxsw_sp->master_bridge.dev == real_dev) + return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, + event); + + return 0; + } + + static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, + unsigned long event, void *ptr) + { + struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *r; + int err = 0; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(r, event)) + goto out; + + if (mlxsw_sp_port_dev_check(dev)) + err = mlxsw_sp_inetaddr_port_event(dev, event); + else if (netif_is_lag_master(dev)) + err = mlxsw_sp_inetaddr_lag_event(dev, event); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); + else if (is_vlan_dev(dev)) + err = mlxsw_sp_inetaddr_vlan_event(dev, event); + + out: + return notifier_from_errno(err); + } + + static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, + const char *mac, int mtu) + { + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); + mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); + mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + } + + static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) + { + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *r; + int err; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + return 0; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!r) + return 0; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); + if (err) + return err; + + err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); + if (err) + goto err_rif_edit; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); + if (err) + goto err_rif_fdb_op; + + ether_addr_copy(r->addr, dev->dev_addr); + r->mtu = dev->mtu; + + netdev_dbg(dev, "Updated RIF=%d\n", r->rif); + + return 0; + + err_rif_fdb_op: + mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); + err_rif_edit: + mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); + return err; + } + + static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, + u16 fid) + { + if (mlxsw_sp_fid_is_vfid(fid)) + return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); + else + return test_bit(fid, lag_port->active_vlans); + } + + static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) + { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u16 lag_id = mlxsw_sp_port->lag_id; + int i, count = 0; + + if (!mlxsw_sp_port->lagged) + return true; + + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + struct mlxsw_sp_port *lag_port; + + lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); + if (!lag_port || lag_port->local_port == local_port) + continue; + if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) + count++; + } + + return !count; + } + + static int + mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) + { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, + mlxsw_sp_port->local_port); + + netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", + mlxsw_sp_port->local_port, fid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + } + + static int + mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) + { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + + netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", + mlxsw_sp_port->lag_id, fid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); }
- static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) + int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); - u16 fid = mlxsw_sp_vfid_to_fid(vfid); + if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) + return 0;
- if (mlxsw_sp_vport->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, + if (mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, fid); else - return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); + return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); }
- static bool mlxsw_sp_port_dev_check(const struct net_device *dev) + static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) { - return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; + struct mlxsw_sp_fid *f, *tmp; + + list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) + if (--f->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp, f); + else + WARN_ON_ONCE(1); + } + + static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) + { + return !mlxsw_sp->master_bridge.dev || + mlxsw_sp->master_bridge.dev == br_dev; + } + + static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) + { + mlxsw_sp->master_bridge.dev = br_dev; + mlxsw_sp->master_bridge.ref_count++; + } + + static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) + { + if (--mlxsw_sp->master_bridge.ref_count == 0) { + mlxsw_sp->master_bridge.dev = NULL; + /* It's possible upper VLAN devices are still holding + * references to underlying FIDs. Drop the reference + * and release the resources if it was the last one. + * If it wasn't, then something bad happened. + */ + mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); + } }
- static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) + static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *br_dev) { struct net_device *dev = mlxsw_sp_port->dev; int err; @@@ -2646,6 -3221,8 +3199,8 @@@ if (err) return err;
+ mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); + mlxsw_sp_port->learning = 1; mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->uc_flood = 1; @@@ -2654,16 -3231,14 +3209,14 @@@ return 0; }
- static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, - bool flush_fdb) + static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev;
- if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@@ -2672,28 -3247,7 +3225,7 @@@ /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - return mlxsw_sp_port_add_vid(dev, 0, 1); - } - - static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) - { - return !mlxsw_sp->master_bridge.dev || - mlxsw_sp->master_bridge.dev == br_dev; - } - - static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) - { - mlxsw_sp->master_bridge.dev = br_dev; - mlxsw_sp->master_bridge.ref_count++; - } - - static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) - { - if (--mlxsw_sp->master_bridge.ref_count == 0) - mlxsw_sp->master_bridge.dev = NULL; + mlxsw_sp_port_add_vid(dev, 0, 1); }
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) @@@ -2809,6 -3363,45 +3341,45 @@@ static int mlxsw_sp_port_lag_index_get( return -EBUSY; }
+ static void + mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) + { + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_fid *f; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); + if (WARN_ON(!mlxsw_sp_vport)) + return; + + /* If vPort is assigned a RIF, then leave it since it's no + * longer valid. + */ + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f) + f->leave(mlxsw_sp_vport); + + mlxsw_sp_vport->lag_id = lag_id; + mlxsw_sp_vport->lagged = 1; + } + + static void + mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) + { + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_fid *f; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); + if (WARN_ON(!mlxsw_sp_vport)) + return; + + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f) + f->leave(mlxsw_sp_vport); + + mlxsw_sp_vport->lagged = 0; + } + static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) { @@@ -2844,6 -3437,9 +3415,9 @@@ mlxsw_sp_port->lag_id = lag_id; mlxsw_sp_port->lagged = 1; lag->ref_count++; + + mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); + return 0;
err_col_port_enable: @@@ -2854,65 -3450,35 +3428,35 @@@ err_col_port_add return err; }
- static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb); - - static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *lag_dev) + static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_upper *lag; u16 lag_id = mlxsw_sp_port->lag_id; - int err; + struct mlxsw_sp_upper *lag;
if (!mlxsw_sp_port->lagged) - return 0; + return; lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); WARN_ON(lag->ref_count == 0);
- err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); - if (err) - return err; - err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); - if (err) - return err; - - /* In case we leave a LAG device that has bridges built on top, - * then their teardown sequence is never issued and we need to - * invoke the necessary cleanup routines ourselves. - */ - list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, - vport.list) { - struct net_device *br_dev; - - if (!mlxsw_sp_vport->bridged) - continue; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); - } + mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
if (mlxsw_sp_port->bridged) { mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); - mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); - mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); }
- if (lag->ref_count == 1) { - if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); - if (err) - return err; - } + if (lag->ref_count == 1) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, mlxsw_sp_port->local_port); mlxsw_sp_port->lagged = 0; lag->ref_count--; - return 0; + + mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); }
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, @@@ -2961,42 -3527,25 +3505,25 @@@ static int mlxsw_sp_port_vlan_link(stru u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); + if (WARN_ON(!mlxsw_sp_vport)) return -EINVAL; - }
mlxsw_sp_vport->dev = vlan_dev;
return 0; }
- static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *vlan_dev) + static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) { struct mlxsw_sp_port *mlxsw_sp_vport; u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return -EINVAL; - } - - /* When removing a VLAN device while still bridged we should first - * remove it from the bridge, as we receive the bridge's notification - * when the vPort is already gone. - */ - if (mlxsw_sp_vport->bridged) { - struct net_device *br_dev; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); - } + if (WARN_ON(!mlxsw_sp_vport)) + return;
mlxsw_sp_vport->dev = mlxsw_sp_port->dev; - - return 0; }
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, @@@ -3006,7 -3555,7 +3533,7 @@@ struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; - int err; + int err = 0;
mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@@ -3015,73 -3564,56 +3542,56 @@@ switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) + if (!is_vlan_dev(upper_dev) && + !netif_is_lag_master(upper_dev) && + !netif_is_bridge_master(upper_dev)) + return -EINVAL; + if (!info->linking) break; /* HW limitation forbids to put ports to multiple bridges. */ if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, info->upper_info)) - return NOTIFY_BAD; + return -EINVAL; + if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) + return -EINVAL; + if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && + !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (is_vlan_dev(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to link VLAN device\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to unlink VLAN device\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); } else if (netif_is_bridge_master(upper_dev)) { - if (info->linking) { - err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } - mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - } else { - err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, - true); - mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } - } + if (info->linking) + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, + upper_dev); + else + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); } else if (netif_is_lag_master(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to join link aggregation\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to leave link aggregation\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + } else { + err = -EINVAL; + WARN_ON(1); } break; }
- return NOTIFY_DONE; + return err; }
static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, @@@ -3105,7 -3637,7 +3615,7 @@@ break; }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_port_event(struct net_device *dev, @@@ -3119,7 -3651,7 +3629,7 @@@ return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, @@@ -3132,218 -3664,230 +3642,230 @@@ netdev_for_each_lower_dev(lag_dev, dev, iter) { if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); - if (ret == NOTIFY_BAD) + if (ret) return ret; } }
- return NOTIFY_DONE; + return 0; }
- static struct mlxsw_sp_vfid * - mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev) + static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev) { - struct mlxsw_sp_vfid *vfid; + u16 fid = vlan_dev_vlan_id(vlan_dev); + struct mlxsw_sp_fid *f;
- list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { - if (vfid->br_dev == br_dev) - return vfid; + f = mlxsw_sp_fid_find(mlxsw_sp, fid); + if (!f) { + f = mlxsw_sp_fid_create(mlxsw_sp, fid); + if (IS_ERR(f)) + return PTR_ERR(f); }
- return NULL; + f->ref_count++; + + return 0; + } + + static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev) + { + u16 fid = vlan_dev_vlan_id(vlan_dev); + struct mlxsw_sp_fid *f; + + f = mlxsw_sp_fid_find(mlxsw_sp, fid); + if (f && f->r) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f && --f->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp, f); }
- static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) + static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, + unsigned long event, void *ptr) { - return vfid - MLXSW_SP_VFID_PORT_MAX; + struct netdev_notifier_changeupper_info *info; + struct net_device *upper_dev; + struct mlxsw_sp *mlxsw_sp; + int err; + + mlxsw_sp = mlxsw_sp_lower_get(br_dev); + if (!mlxsw_sp) + return 0; + if (br_dev != mlxsw_sp->master_bridge.dev) + return 0; + + info = ptr; + + switch (event) { + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (!is_vlan_dev(upper_dev)) + break; + if (info->linking) { + err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, + upper_dev); + if (err) + return err; + } else { + mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); + } + break; + } + + return 0; }
- static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) + static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) { - return MLXSW_SP_VFID_PORT_MAX + br_vfid; + return find_first_zero_bit(mlxsw_sp->vfids.mapped, + MLXSW_SP_VFID_MAX); }
- static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) + static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) { - return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, - MLXSW_SP_VFID_BR_MAX); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); }
- static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) + static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + + static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; + struct mlxsw_sp_fid *f; + u16 vfid, fid; int err;
- n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); - if (n_vfid == MLXSW_SP_VFID_MAX) { + vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (vfid == MLXSW_SP_VFID_MAX) { dev_err(dev, "No available vFIDs\n"); return ERR_PTR(-ERANGE); }
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + fid = mlxsw_sp_vfid_to_fid(vfid); + err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + dev_err(dev, "Failed to create FID=%d\n", fid); return ERR_PTR(err); }
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) goto err_allocate_vfid;
- vfid->vfid = n_vfid; - vfid->br_dev = br_dev; + f->leave = mlxsw_sp_vport_vfid_leave; + f->fid = fid; + f->dev = br_dev;
- list_add(&vfid->list, &mlxsw_sp->br_vfids.list); - set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + list_add(&f->list, &mlxsw_sp->vfids.list); + set_bit(vfid, mlxsw_sp->vfids.mapped);
- return vfid; + return f;
err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); return ERR_PTR(-ENOMEM); }
- static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) + static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *f) { - u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); + u16 fid = f->fid;
- clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); - list_del(&vfid->list); + clear_bit(vfid, mlxsw_sp->vfids.mapped); + list_del(&f->list);
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + if (f->r) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
- kfree(vfid); + kfree(f); + + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); }
- static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb) + static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool valid) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid, *new_vfid; - int err; - - vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - WARN_ON(!vfid); - return -EINVAL; - } - - /* We need a vFID to go back to after leaving the bridge's vFID. */ - new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!new_vfid) { - new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(new_vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(new_vfid); - } - }
- /* Invalidate existing {Port, VID} to vFID mapping and create a new - * one for the new vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, + vid); + }
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(new_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - new_vfid->vfid); - goto err_port_vid_to_fid_validate; - } + static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) + { + struct mlxsw_sp_fid *f; + int err;
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) { - netdev_err(dev, "Failed to disable learning\n"); - goto err_port_vid_learning_set; + f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (!f) { + f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (IS_ERR(f)) + return PTR_ERR(f); }
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); - if (err) { - netdev_err(dev, "Failed clear to clear flooding\n"); + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); + if (err) goto err_vport_flood_set; - } - - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_FORWARDING); - if (err) { - netdev_err(dev, "Failed to set STP state\n"); - goto err_port_stp_state_set; - }
- if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) - netdev_err(dev, "Failed to flush FDB\n"); + err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_fid_map;
- /* Switch between the vFIDs and destroy the old one if needed. */ - new_vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = new_vfid; - vfid->nr_vports--; - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); + f->ref_count++;
- mlxsw_sp_vport->learning = 0; - mlxsw_sp_vport->learning_sync = 0; - mlxsw_sp_vport->uc_flood = 0; - mlxsw_sp_vport->bridged = 0; + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
return 0;
- err_port_stp_state_set: + err_vport_fid_map: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); err_vport_flood_set: - err_port_vid_learning_set: - err_port_vid_to_fid_validate: - err_port_vid_to_fid_invalidate: - /* Rollback vFID only if new. */ - if (!new_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); + if (!f->ref_count) + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); return err; }
+ static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) + { + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); + + mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); + + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); + + mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); + } + static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, struct net_device *br_dev) { - struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid; int err;
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create bridge vFID\n"); - return PTR_ERR(vfid); - } - } + if (f && !WARN_ON(!f->leave)) + f->leave(mlxsw_sp_vport);
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_port_flood_set; + netdev_err(dev, "Failed to join vFID\n"); + return err; }
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); @@@ -3352,38 -3896,6 +3874,6 @@@ goto err_port_vid_learning_set; }
- /* We need to invalidate existing {Port, VID} to vFID mapping and - * create a new one for the bridge's vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - old_vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } - - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - vfid->vfid); - goto err_port_vid_to_fid_validate; - } - - /* Switch between the vFIDs and destroy the old one if needed. */ - vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = vfid; - old_vfid->nr_vports--; - if (!old_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); - mlxsw_sp_vport->learning = 1; mlxsw_sp_vport->learning_sync = 1; mlxsw_sp_vport->uc_flood = 1; @@@ -3391,20 -3903,25 +3881,25 @@@
return 0;
- err_port_vid_to_fid_validate: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); - err_port_vid_to_fid_invalidate: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); err_port_vid_learning_set: - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); - err_port_flood_set: - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); return err; }
+ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) + { + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; + } + static bool mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, const struct net_device *br_dev) @@@ -3413,7 -3930,9 +3908,9 @@@
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, vport.list) { - if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); + + if (dev && dev == br_dev) return false; }
@@@ -3428,56 -3947,39 +3925,39 @@@ static int mlxsw_sp_netdevice_vport_eve struct netdev_notifier_changeupper_info *info = ptr; struct mlxsw_sp_port *mlxsw_sp_vport; struct net_device *upper_dev; - int err; + int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) - break; if (!netif_is_bridge_master(upper_dev)) - return NOTIFY_BAD; + return -EINVAL; + if (!info->linking) + break; /* We can't have multiple VLAN interfaces configured on * the same port and being members in the same bridge. */ if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master) - break; if (info->linking) { - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return NOTIFY_BAD; - } + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, upper_dev); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } } else { - /* We ignore bridge's unlinking notifications if vPort - * is gone, since we already left the bridge when the - * VLAN device was unlinked from the real device. - */ if (!mlxsw_sp_vport) - return NOTIFY_DONE; - err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, - upper_dev, true); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } + return 0; + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); } }
- return NOTIFY_DONE; + return err; }
static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, @@@ -3492,12 -3994,12 +3972,12 @@@ if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, vid); - if (ret == NOTIFY_BAD) + if (ret) return ret; } }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, @@@ -3513,35 -4015,44 +3993,44 @@@ return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, vid);
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int err = 0;
- if (mlxsw_sp_port_dev_check(dev)) - return mlxsw_sp_netdevice_port_event(dev, event, ptr); - - if (netif_is_lag_master(dev)) - return mlxsw_sp_netdevice_lag_event(dev, event, ptr); - - if (is_vlan_dev(dev)) - return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) + err = mlxsw_sp_netdevice_router_port_event(dev); + else if (mlxsw_sp_port_dev_check(dev)) + err = mlxsw_sp_netdevice_port_event(dev, event, ptr); + else if (netif_is_lag_master(dev)) + err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); + else if (is_vlan_dev(dev)) + err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
- return NOTIFY_DONE; + return notifier_from_errno(err); }
static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { .notifier_call = mlxsw_sp_netdevice_event, };
+ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { + .notifier_call = mlxsw_sp_inetaddr_event, + .priority = 10, /* Must be called before FIB notifier block */ + }; + static int __init mlxsw_sp_module_init(void) { int err;
register_netdevice_notifier(&mlxsw_sp_netdevice_nb); + register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); err = mlxsw_core_driver_register(&mlxsw_sp_driver); if (err) goto err_core_driver_register; @@@ -3555,6 -4066,7 +4044,7 @@@ err_core_driver_register static void __exit mlxsw_sp_module_exit(void) { mlxsw_core_driver_unregister(&mlxsw_sp_driver); + unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); }
diff --combined drivers/net/ppp/ppp_generic.c index a30ee42,17953ab..f226db4 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@@ -1312,10 -1312,9 +1312,9 @@@ ppp_get_stats64(struct net_device *dev return stats64; }
- static struct lock_class_key ppp_tx_busylock; static int ppp_dev_init(struct net_device *dev) { - dev->qdisc_tx_busylock = &ppp_tx_busylock; + netdev_lockdep_set_classes(dev); return 0; }
@@@ -2601,6 -2600,8 +2600,6 @@@ ppp_unregister_channel(struct ppp_chann spin_lock_bh(&pn->all_channels_lock); list_del(&pch->list); spin_unlock_bh(&pn->all_channels_lock); - put_net(pch->chan_net); - pch->chan_net = NULL;
pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); @@@ -3134,9 -3135,6 +3133,9 @@@ ppp_disconnect_channel(struct channel * */ static void ppp_destroy_channel(struct channel *pch) { + put_net(pch->chan_net); + pch->chan_net = NULL; + atomic_dec(&channel_count);
if (!pch->file.dead) { diff --combined drivers/net/usb/r8152.c index e9654a6,168a8e2..f41a8ad --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -26,7 -26,6 +26,7 @@@ #include <linux/mdio.h> #include <linux/usb/cdc.h> #include <linux/suspend.h> +#include <linux/acpi.h>
/* Information for net-next */ #define NETNEXT_VERSION "08" @@@ -461,11 -460,6 +461,11 @@@ /* SRAM_IMPEDANCE */ #define RX_DRIVING_MASK 0x6000
+/* MAC PASSTHRU */ +#define AD_MASK 0xfee0 +#define EFUSE 0xcfdb +#define PASS_THRU_MASK 0x1 + enum rtl_register_content { _1000bps = 0x10, _100bps = 0x08, @@@ -613,7 -607,7 +613,7 @@@ struct r8152 struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; - struct delayed_work schedule; + struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ #ifdef CONFIG_PM_SLEEP @@@ -630,6 -624,7 +630,7 @@@ int (*eee_get)(struct r8152 *, struct ethtool_eee *); int (*eee_set)(struct r8152 *, struct ethtool_eee *); bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); void (*autosuspend_en)(struct r8152 *tp, bool enable); } rtl_ops;
@@@ -639,8 -634,11 +640,11 @@@ u32 tx_qlen; u32 coalesce; u16 ocp_base; + u16 speed; u8 *intr_buff; u8 version; + u8 duplex; + u8 autoneg; };
enum rtl_version { @@@ -1042,65 -1040,6 +1046,65 @@@ out1 return ret; }
+/* Devices containing RTL8153-AD can support a persistent + * host system provided MAC address. + * Examples of this are Dell TB15 and Dell WD15 docks + */ +static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) +{ + acpi_status status; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + int ret = -EINVAL; + u32 ocp_data; + unsigned char buf[6]; + + /* test for -AD variant of RTL8153 */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + if ((ocp_data & AD_MASK) != 0x1000) + return -ENODEV; + + /* test for MAC address pass-through bit */ + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE); + if ((ocp_data & PASS_THRU_MASK) != 1) + return -ENODEV; + + /* returns _AUXMAC_#AABBCCDDEEFF# */ + status = acpi_evaluate_object(NULL, "\_SB.AMAC", NULL, &buffer); + obj = (union acpi_object *)buffer.pointer; + if (!ACPI_SUCCESS(status)) + return -ENODEV; + if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) { + netif_warn(tp, probe, tp->netdev, + "Invalid buffer when reading pass-thru MAC addr: " + "(%d, %d)\n", + obj->type, obj->string.length); + goto amacout; + } + if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 || + strncmp(obj->string.pointer + 0x15, "#", 1) != 0) { + netif_warn(tp, probe, tp->netdev, + "Invalid header when reading pass-thru MAC addr\n"); + goto amacout; + } + ret = hex2bin(buf, obj->string.pointer + 9, 6); + if (!(ret == 0 && is_valid_ether_addr(buf))) { + netif_warn(tp, probe, tp->netdev, + "Invalid MAC when reading pass-thru MAC addr: " + "%d, %pM\n", ret, buf); + ret = -EINVAL; + goto amacout; + } + memcpy(sa->sa_data, buf, 6); + ether_addr_copy(tp->netdev->dev_addr, sa->sa_data); + netif_info(tp, probe, tp->netdev, + "Using pass-thru MAC addr %pM\n", sa->sa_data); + +amacout: + kfree(obj); + return ret; +} + static int set_ethernet_addr(struct r8152 *tp) { struct net_device *dev = tp->netdev; @@@ -1109,15 -1048,8 +1113,15 @@@
if (tp->version == RTL_VER_01) ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); - else - ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); + else { + /* if this is not an RTL8153-AD, no eFuse mac pass thru set, + * or system doesn't provide valid _SB.AMAC this will be + * be expected to non-zero + */ + ret = vendor_mac_passthru_addr_read(tp, &sa); + if (ret < 0) + ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); + }
if (ret < 0) { netif_err(tp, probe, dev, "Get ether addr fail\n"); @@@ -1820,7 -1752,7 +1824,7 @@@ static int rx_bottom(struct r8152 *tp, pkt_len -= CRC_SIZE; rx_data += sizeof(struct rx_desc);
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len); + skb = napi_alloc_skb(&tp->napi, pkt_len); if (!skb) { stats->rx_dropped++; goto find_next_rx; @@@ -2368,6 -2300,10 +2372,6 @@@ static u32 __rtl_get_wol(struct r8152 * u32 ocp_data; u32 wolopts = 0;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5); - if (!(ocp_data & LAN_WAKE_EN)) - return 0; - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); if (ocp_data & LINK_ON_WAKE_EN) wolopts |= WAKE_PHY; @@@ -2400,13 -2336,15 +2404,13 @@@ static void __rtl_set_wol(struct r8152 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); - ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN); + ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN); if (wolopts & WAKE_UCAST) ocp_data |= UWF_EN; if (wolopts & WAKE_BCAST) ocp_data |= BWF_EN; if (wolopts & WAKE_MCAST) ocp_data |= MWF_EN; - if (wolopts & WAKE_ANY) - ocp_data |= LAN_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); @@@ -2512,27 -2450,6 +2516,6 @@@ static void rtl8153_runtime_enable(stru } }
- static void rtl_phy_reset(struct r8152 *tp) - { - u16 data; - int i; - - data = r8152_mdio_read(tp, MII_BMCR); - - /* don't reset again before the previous one complete */ - if (data & BMCR_RESET) - return; - - data |= BMCR_RESET; - r8152_mdio_write(tp, MII_BMCR, data); - - for (i = 0; i < 50; i++) { - msleep(20); - if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) - break; - } - } - static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; @@@ -2600,8 -2517,6 +2583,6 @@@ static void r8152b_exit_oob(struct r815
rxdy_gated_en(tp, true); r8153_teredo_off(tp); - r8152b_hw_phy_cfg(tp); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@@ -2779,8 -2694,6 +2760,6 @@@ static void r8153_first_init(struct r81 ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- r8153_hw_phy_cfg(tp); - rtl8152_nic_reset(tp); rtl_reset_bmu(tp);
@@@ -2916,7 -2829,6 +2895,6 @@@ static int rtl8152_set_speed(struct r81 u16 bmcr, anar, gbcr; int ret = 0;
- cancel_delayed_work_sync(&tp->schedule); anar = r8152_mdio_read(tp, MII_ADVERTISE); anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); @@@ -2976,7 -2888,7 +2954,7 @@@ bmcr = BMCR_ANENABLE | BMCR_ANRESTART; }
- if (test_bit(PHY_RESET, &tp->flags)) + if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET;
if (tp->mii.supports_gmii) @@@ -2985,7 -2897,7 +2963,7 @@@ r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr);
- if (test_and_clear_bit(PHY_RESET, &tp->flags)) { + if (bmcr & BMCR_RESET) { int i;
for (i = 0; i < 50; i++) { @@@ -3135,15 -3047,33 +3113,33 @@@ static void rtl_work_func_t(struct work netif_carrier_ok(tp->netdev)) napi_schedule(&tp->napi);
- if (test_and_clear_bit(PHY_RESET, &tp->flags)) - rtl_phy_reset(tp); - mutex_unlock(&tp->control);
out1: usb_autopm_put_interface(tp->intf); }
+ static void rtl_hw_phy_work_func_t(struct work_struct *work) + { + struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + if (usb_autopm_get_interface(tp->intf) < 0) + return; + + mutex_lock(&tp->control); + + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + } + #ifdef CONFIG_PM_SLEEP static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) @@@ -3180,8 -3110,6 +3176,6 @@@ static int rtl8152_open(struct net_devi if (res) goto out;
- netif_carrier_off(netdev); - res = usb_autopm_get_interface(tp->intf); if (res < 0) { free_all_mem(tp); @@@ -3192,9 -3120,6 +3186,6 @@@
tp->rtl_ops.up(tp);
- rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); @@@ -3618,6 -3543,7 +3609,7 @@@ static int rtl8152_resume(struct usb_in
if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); netif_device_attach(tp->netdev); }
@@@ -3632,10 -3558,6 +3624,6 @@@ napi_enable(&tp->napi); } else { tp->rtl_ops.up(tp); - rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? - SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(tp->netdev); set_bit(WORK_ENABLE, &tp->flags); } @@@ -3765,6 -3687,11 +3753,11 @@@ static int rtl8152_set_settings(struct mutex_lock(&tp->control);
ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); + if (!ret) { + tp->autoneg = cmd->autoneg; + tp->speed = cmd->speed; + tp->duplex = cmd->duplex; + }
mutex_unlock(&tp->control);
@@@ -4222,6 -4149,7 +4215,7 @@@ static int rtl_ops_init(struct r8152 *t ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; + ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; break;
@@@ -4238,6 -4166,7 +4232,7 @@@ ops->eee_get = r8153_get_eee; ops->eee_set = r8153_set_eee; ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; break;
@@@ -4285,6 -4214,7 +4280,7 @@@ static int rtl8152_probe(struct usb_int
mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); + INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; @@@ -4324,9 -4254,14 +4320,14 @@@ break; }
+ tp->autoneg = AUTONEG_ENABLE; + tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; + tp->duplex = DUPLEX_FULL; + intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp);
usb_set_intfdata(intf, tp); @@@ -4372,6 -4307,7 +4373,7 @@@ static void rtl8152_disconnect(struct u
netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); + cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); free_netdev(tp->netdev); } @@@ -4425,4 -4361,3 +4427,4 @@@ module_usb_driver(rtl8152_driver) MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); diff --combined include/linux/acpi.h index cc63aef,4d4bb49..db7c8bd --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@@ -208,6 -208,7 +208,6 @@@ void acpi_boot_table_init (void) int acpi_mps_check (void); int acpi_numa_init (void);
-void early_acpi_table_init(void *data, size_t size); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init acpi_parse_entries(char *id, unsigned long table_size, @@@ -231,26 -232,12 +231,26 @@@ int acpi_table_parse_madt(enum acpi_mad int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
-/* the following four functions are architecture-dependent */ +/* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); + +#if defined(CONFIG_X86) || defined(CONFIG_IA64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +#else +static inline void +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } +#endif + void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + +#ifdef CONFIG_ARM64 +void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +#else +static inline void +acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +#endif + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); -void acpi_numa_arch_fixup(void);
#ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; @@@ -457,12 -444,8 +457,12 @@@ acpi_status acpi_run_osc(acpi_handle ha #define OSC_SB_HOTPLUG_OST_SUPPORT 0x00000008 #define OSC_SB_APEI_SUPPORT 0x00000010 #define OSC_SB_CPC_SUPPORT 0x00000020 +#define OSC_SB_CPCV2_SUPPORT 0x00000040 +#define OSC_SB_PCLPI_SUPPORT 0x00000080 +#define OSC_SB_OSLPI_SUPPORT 0x00000100
extern bool osc_sb_apei_support_acked; +extern bool osc_pc_lpi_support_confirmed;
/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */ #define OSC_PCI_EXT_CONFIG_SUPPORT 0x00000001 @@@ -549,24 -532,6 +549,24 @@@ void acpi_walk_dep_device_list(acpi_han struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr)
+static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = true; +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = false; +} + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE, +}; + +int acpi_reconfig_notifier_register(struct notifier_block *nb); +int acpi_reconfig_notifier_unregister(struct notifier_block *nb); + #else /* !CONFIG_ACPI */
#define acpi_disabled 1 @@@ -578,6 -543,11 +578,11 @@@
struct fwnode_handle;
+ static inline bool acpi_dev_found(const char *hid) + { + return false; + } + static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; @@@ -623,6 -593,7 +628,6 @@@ static inline const char *acpi_dev_name return NULL; }
-static inline void early_acpi_table_init(void *data, size_t size) { } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { }
@@@ -688,6 -659,14 +693,14 @@@ static inline bool acpi_driver_match_de return false; }
+ static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, + const u8 *uuid, + int rev, int func, + union acpi_object *argv4) + { + return NULL; + } + static inline int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) { @@@ -712,24 -691,6 +725,24 @@@ static inline enum dev_dma_attr acpi_ge
#define ACPI_PTR(_ptr) (NULL)
+static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ +} + +static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} + #endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI @@@ -1049,10 -1010,4 +1062,10 @@@ static inline struct fwnode_handle *acp #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif
+#ifdef CONFIG_ACPI_TABLE_UPGRADE +void acpi_table_upgrade(void); +#else +static inline void acpi_table_upgrade(void) { } +#endif + #endif /*_LINUX_ACPI_H*/ diff --combined include/linux/filter.h index 8f74f3d,15d816a..a16439b --- a/include/linux/filter.h +++ b/include/linux/filter.h @@@ -368,6 -368,11 +368,11 @@@ struct bpf_skb_data_end void *data_end; };
+ struct xdp_buff { + void *data; + void *data_end; + }; + /* compute the linear packet data range [data, data_end) which * will be accessed by cls_bpf and act_bpf programs */ @@@ -429,6 -434,18 +434,18 @@@ static inline u32 bpf_prog_run_clear_cb return BPF_PROG_RUN(prog, skb); }
+ static inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, + struct xdp_buff *xdp) + { + u32 ret; + + rcu_read_lock(); + ret = BPF_PROG_RUN(prog, (void *)xdp); + rcu_read_unlock(); + + return ret; + } + static inline unsigned int bpf_prog_size(unsigned int proglen) { return max(sizeof(struct bpf_prog), @@@ -467,11 -484,7 +484,11 @@@ static inline void bpf_prog_unlock_ro(s } #endif /* CONFIG_DEBUG_SET_MODULE_RONX */
-int sk_filter(struct sock *sk, struct sk_buff *skb); +int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); +static inline int sk_filter(struct sock *sk, struct sk_buff *skb) +{ + return sk_filter_trim_cap(sk, skb, 1); +}
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); void bpf_prog_free(struct bpf_prog *fp); @@@ -513,6 -526,7 +530,7 @@@ bool bpf_helper_changes_skb_data(void *
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); + void bpf_warn_invalid_xdp_action(u32 act);
#ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; diff --combined include/linux/netdevice.h index da4b33b,fab9a1c..43c749b --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@@ -61,6 -61,9 +61,9 @@@ struct wireless_dev /* 802.15.4 specific */ struct wpan_dev; struct mpls_dev; + /* UDP Tunnel offloads */ + struct udp_tunnel_info; + struct bpf_prog;
void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@@ -90,7 -93,6 +93,6 @@@ #define NET_XMIT_SUCCESS 0x00 #define NET_XMIT_DROP 0x01 /* skb dropped */ #define NET_XMIT_CN 0x02 /* congestion notification */ - #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It @@@ -798,6 -800,33 +800,33 @@@ struct tc_to_netdev }; };
+ /* These structures hold the attributes of xdp state that are being passed + * to the netdevice through the xdp op. + */ + enum xdp_netdev_command { + /* Set or clear a bpf program used in the earliest stages of packet + * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee + * is responsible for calling bpf_prog_put on any old progs that are + * stored. In case of error, the callee need not release the new prog + * reference, but on success it takes ownership and must bpf_prog_put + * when it is no longer used. + */ + XDP_SETUP_PROG, + /* Check if a bpf program is set on the device. The callee should + * return true if a program is currently attached and running. + */ + XDP_QUERY_PROG, + }; + + struct netdev_xdp { + enum xdp_netdev_command command; + union { + /* XDP_SETUP_PROG */ + struct bpf_prog *prog; + /* XDP_QUERY_PROG */ + bool prog_attached; + }; + };
/* * This structure defines the management hooks for network devices. @@@ -1025,31 -1054,18 +1054,18 @@@ * not implement this, it is assumed that the hw is not able to have * multiple net devices on single physical port. * - * void (*ndo_add_vxlan_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by vxlan to notify a driver about the UDP port and socket - * address family that vxlan is listening to. It is called only when - * a new port starts listening. The operation is protected by the - * vxlan_net->sock_lock. - * - * void (*ndo_add_geneve_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by geneve to notify a driver about the UDP port and socket - * address family that geneve is listnening to. It is called only when - * a new port starts listening. The operation is protected by the - * geneve_net->sock_lock. - * - * void (*ndo_del_geneve_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by geneve to notify the driver about a UDP port and socket - * address family that geneve is not listening to anymore. The operation - * is protected by the geneve_net->sock_lock. - * - * void (*ndo_del_vxlan_port)(struct net_device *dev, - * sa_family_t sa_family, __be16 port); - * Called by vxlan to notify the driver about a UDP port and socket - * address family that vxlan is not listening to anymore. The operation - * is protected by the vxlan_net->sock_lock. + * void (*ndo_udp_tunnel_add)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify a driver about the UDP port and socket + * address family that a UDP tunnel is listnening to. It is called only + * when a new port starts listening. The operation is protected by the + * RTNL. + * + * void (*ndo_udp_tunnel_del)(struct net_device *dev, + * struct udp_tunnel_info *ti); + * Called by UDP tunnel to notify the driver about a UDP port and socket + * address family that the UDP tunnel is not listening to anymore. The + * operation is protected by the RTNL. * * void* (*ndo_dfwd_add_station)(struct net_device *pdev, * struct net_device *dev) @@@ -1099,6 -1115,9 +1115,9 @@@ * appropriate rx headroom value allows avoiding skb head copy on * forward. Setting a negative value resets the rx headroom to the * default value. + * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); + * This function is used to set or query state related to XDP on the + * netdevice. See definition of enum xdp_netdev_command for details. * */ struct net_device_ops { @@@ -1221,8 -1240,10 +1240,10 @@@ netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); - int (*ndo_neigh_construct)(struct neighbour *n); - void (*ndo_neigh_destroy)(struct neighbour *n); + int (*ndo_neigh_construct)(struct net_device *dev, + struct neighbour *n); + void (*ndo_neigh_destroy)(struct net_device *dev, + struct neighbour *n);
int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], @@@ -1258,18 -1279,10 +1279,10 @@@ struct netdev_phys_item_id *ppid); int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, size_t len); - void (*ndo_add_vxlan_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_del_vxlan_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_add_geneve_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); - void (*ndo_del_geneve_port)(struct net_device *dev, - sa_family_t sa_family, - __be16 port); + void (*ndo_udp_tunnel_add)(struct net_device *dev, + struct udp_tunnel_info *ti); + void (*ndo_udp_tunnel_del)(struct net_device *dev, + struct udp_tunnel_info *ti); void* (*ndo_dfwd_add_station)(struct net_device *pdev, struct net_device *dev); void (*ndo_dfwd_del_station)(struct net_device *pdev, @@@ -1289,6 -1302,8 +1302,8 @@@ struct sk_buff *skb); void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); + int (*ndo_xdp)(struct net_device *dev, + struct netdev_xdp *xdp); };
/** @@@ -1457,6 -1472,8 +1472,8 @@@ enum netdev_priv_flags * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions * @ethtool_ops: Management operations + * @ndisc_ops: Includes callbacks for different IPv6 neighbour + * discovery handling. Necessary for e.g. 6LoWPAN. * @header_ops: Includes callbacks for creating,parsing,caching,etc * of Layer 2 headers. * @@@ -1484,8 -1501,7 +1501,7 @@@ * @perm_addr: Permanent hw address * @addr_assign_type: Hw address assignment type * @addr_len: Hardware address length - * @neigh_priv_len; Used in neigh_alloc(), - * initialized only in atm/clip.c + * @neigh_priv_len: Used in neigh_alloc() * @dev_id: Used to differentiate devices that share * the same link layer address * @dev_port: Used to differentiate devices that share @@@ -1594,7 -1610,8 +1610,8 @@@ * @phydev: Physical device may attach itself * for hardware timestamping * - * @qdisc_tx_busylock: XXX: need comments on this one + * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * * @proto_down: protocol port state information can be sent to the * switch driver and used to set the phys state of the @@@ -1673,6 -1690,9 +1690,9 @@@ struct net_device #ifdef CONFIG_NET_L3_MASTER_DEV const struct l3mdev_ops *l3mdev_ops; #endif + #if IS_ENABLED(CONFIG_IPV6) + const struct ndisc_ops *ndisc_ops; + #endif
const struct header_ops *header_ops;
@@@ -1862,6 -1882,7 +1882,7 @@@ #endif struct phy_device *phydev; struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; bool proto_down; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@@ -1944,6 -1965,23 +1965,23 @@@ static inline void netdev_for_each_tx_q f(dev, &dev->_tx[i], arg); }
+ #define netdev_lockdep_set_classes(dev) \ + { \ + static struct lock_class_key qdisc_tx_busylock_key; \ + static struct lock_class_key qdisc_running_key; \ + static struct lock_class_key qdisc_xmit_lock_key; \ + static struct lock_class_key dev_addr_list_lock_key; \ + unsigned int i; \ + \ + (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ + (dev)->qdisc_running_key = &qdisc_running_key; \ + lockdep_set_class(&(dev)->addr_list_lock, \ + &dev_addr_list_lock_key); \ + for (i = 0; i < (dev)->num_tx_queues; i++) \ + lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ + &qdisc_xmit_lock_key); \ + } + struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, void *accel_priv); @@@ -2233,8 -2271,8 +2271,8 @@@ struct netdev_lag_lower_state_info #define NETDEV_BONDING_INFO 0x0019 #define NETDEV_PRECHANGEUPPER 0x001A #define NETDEV_CHANGELOWERSTATE 0x001B - #define NETDEV_OFFLOAD_PUSH_VXLAN 0x001C - #define NETDEV_OFFLOAD_PUSH_GENEVE 0x001D + #define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C + #define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E
int register_netdevice_notifier(struct notifier_block *nb); int unregister_netdevice_notifier(struct notifier_block *nb); @@@ -2370,6 -2408,8 +2408,8 @@@ void synchronize_net(void) int init_dummy_netdev(struct net_device *dev);
DECLARE_PER_CPU(int, xmit_recursion); + #define XMIT_RECURSION_LIMIT 10 + static inline int dev_recursion_level(void) { return this_cpu_read(xmit_recursion); @@@ -3250,6 -3290,7 +3290,7 @@@ int dev_get_phys_port_id(struct net_dev int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); + int dev_change_xdp_fd(struct net_device *dev, int fd); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@@ -3799,12 -3840,30 +3840,30 @@@ void *netdev_lower_get_next_private_rcu
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter); + #define netdev_for_each_lower_dev(dev, ldev, iter) \ for (iter = (dev)->adj_list.lower.next, \ ldev = netdev_lower_get_next(dev, &(iter)); \ ldev; \ ldev = netdev_lower_get_next(dev, &(iter)))
+ struct net_device *netdev_all_lower_get_next(struct net_device *dev, + struct list_head **iter); + struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, + struct list_head **iter); + + #define netdev_for_each_all_lower_dev(dev, ldev, iter) \ + for (iter = (dev)->all_adj_list.lower.next, \ + ldev = netdev_all_lower_get_next(dev, &(iter)); \ + ldev; \ + ldev = netdev_all_lower_get_next(dev, &(iter))) + + #define netdev_for_each_all_lower_dev_rcu(dev, ldev, iter) \ + for (iter = (dev)->all_adj_list.lower.next, \ + ldev = netdev_all_lower_get_next_rcu(dev, &(iter)); \ + ldev; \ + ldev = netdev_all_lower_get_next_rcu(dev, &(iter))) + void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); @@@ -3820,6 -3879,10 +3879,10 @@@ void *netdev_lower_dev_get_private(stru struct net_device *lower_dev); void netdev_lower_state_changed(struct net_device *lower_dev, void *lower_state_info); + int netdev_default_l2upper_neigh_construct(struct net_device *dev, + struct neighbour *n); + void netdev_default_l2upper_neigh_destroy(struct net_device *dev, + struct neighbour *n);
/* RSS keys are 40 or 52 bytes long */ #define NETDEV_RSS_KEY_LEN 52 @@@ -4012,6 -4075,7 +4075,7 @@@ static inline bool net_gso_ok(netdev_fe BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature; } @@@ -4145,13 -4209,6 +4209,13 @@@ static inline void netif_keep_dst(struc dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); }
+/* return true if dev can't cope with mtu frames that need vlan tag insertion */ +static inline bool netif_reduces_vlan_mtu(struct net_device *dev) +{ + /* TODO: reserve and use an additional IFF bit, if we get more users */ + return dev->priv_flags & IFF_MACSEC; +} + extern struct pernet_operations __net_initdata loopback_net_ops;
/* Logging, debugging and troubleshooting/diagnostic helpers. */ diff --combined include/net/netfilter/nf_conntrack.h index b6083c3,5d3397f..092ca19 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@@ -85,6 -85,9 +85,9 @@@ struct nf_conn spinlock_t lock; u16 cpu;
+ #ifdef CONFIG_NF_CONNTRACK_ZONES + struct nf_conntrack_zone zone; + #endif /* XXX should I move this to the tail ? - Y.K */ /* These are my tuples; original and reply */ struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; @@@ -284,17 -287,10 +287,18 @@@ static inline bool nf_is_loopback_packe return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; }
+/* jiffies until ct expires, 0 if already expired */ +static inline unsigned long nf_ct_expires(const struct nf_conn *ct) +{ + long timeout = (long)ct->timeout.expires - (long)jiffies; + + return timeout > 0 ? timeout : 0; +} + struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); + int nf_conntrack_hash_resize(unsigned int hashsize); extern unsigned int nf_conntrack_htable_size; extern unsigned int nf_conntrack_max;
diff --combined include/net/switchdev.h index 1d8e158,9023e3e..62f6a96 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@@ -60,7 -60,7 +60,7 @@@ struct switchdev_attr struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ u8 stp_state; /* PORT_STP_STATE */ unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ - u32 ageing_time; /* BRIDGE_AGEING_TIME */ + clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ } u; }; @@@ -227,6 -227,8 +227,8 @@@ void switchdev_port_fwd_mark_set(struc struct net_device *group_dev, bool joining);
+ bool switchdev_port_same_parent_id(struct net_device *a, + struct net_device *b); #else
static inline void switchdev_deferred_process(void) @@@ -351,6 -353,12 +353,12 @@@ static inline void switchdev_port_fwd_m { }
+ static inline bool switchdev_port_same_parent_id(struct net_device *a, + struct net_device *b) + { + return false; + } + #endif
#endif /* _LINUX_SWITCHDEV_H_ */ diff --combined kernel/events/core.c index 43d43a2d,b1891b6..195e765 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@@ -1678,33 -1678,12 +1678,33 @@@ static bool is_orphaned_event(struct pe return event->state == PERF_EVENT_STATE_DEAD; }
-static inline int pmu_filter_match(struct perf_event *event) +static inline int __pmu_filter_match(struct perf_event *event) { struct pmu *pmu = event->pmu; return pmu->filter_match ? pmu->filter_match(event) : 1; }
+/* + * Check whether we should attempt to schedule an event group based on + * PMU-specific filtering. An event group can consist of HW and SW events, + * potentially with a SW leader, so we must check all the filters, to + * determine whether a group is schedulable: + */ +static inline int pmu_filter_match(struct perf_event *event) +{ + struct perf_event *child; + + if (!__pmu_filter_match(event)) + return 0; + + list_for_each_entry(child, &event->sibling_list, group_entry) { + if (!__pmu_filter_match(child)) + return 0; + } + + return 1; +} + static inline int event_filter_match(struct perf_event *event) { @@@ -5574,16 -5553,26 +5574,26 @@@ void perf_output_sample(struct perf_out }
if (sample_type & PERF_SAMPLE_RAW) { - if (data->raw) { - u32 raw_size = data->raw->size; - u32 real_size = round_up(raw_size + sizeof(u32), - sizeof(u64)) - sizeof(u32); - u64 zero = 0; - - perf_output_put(handle, real_size); - __output_copy(handle, data->raw->data, raw_size); - if (real_size - raw_size) - __output_copy(handle, &zero, real_size - raw_size); + struct perf_raw_record *raw = data->raw; + + if (raw) { + struct perf_raw_frag *frag = &raw->frag; + + perf_output_put(handle, raw->size); + do { + if (frag->copy) { + __output_custom(handle, frag->copy, + frag->data, frag->size); + } else { + __output_copy(handle, frag->data, + frag->size); + } + if (perf_raw_frag_last(frag)) + break; + frag = frag->next; + } while (1); + if (frag->pad) + __output_skip(handle, NULL, frag->pad); } else { struct { u32 size; @@@ -5708,14 -5697,28 +5718,28 @@@ void perf_prepare_sample(struct perf_ev }
if (sample_type & PERF_SAMPLE_RAW) { - int size = sizeof(u32); - - if (data->raw) - size += data->raw->size; - else - size += sizeof(u32); + struct perf_raw_record *raw = data->raw; + int size; + + if (raw) { + struct perf_raw_frag *frag = &raw->frag; + u32 sum = 0; + + do { + sum += frag->size; + if (perf_raw_frag_last(frag)) + break; + frag = frag->next; + } while (1); + + size = round_up(sum + sizeof(u32), sizeof(u64)); + raw->size = size - sizeof(u32); + frag->pad = raw->size - sum; + } else { + size = sizeof(u64); + }
- header->size += round_up(size, sizeof(u64)); + header->size += size; }
if (sample_type & PERF_SAMPLE_BRANCH_STACK) { @@@ -7352,7 -7355,7 +7376,7 @@@ static struct pmu perf_swevent = static int perf_tp_filter_match(struct perf_event *event, struct perf_sample_data *data) { - void *record = data->raw->data; + void *record = data->raw->frag.data;
/* only top level events have filters set */ if (event->parent) @@@ -7408,8 -7411,10 +7432,10 @@@ void perf_tp_event(u16 event_type, u64 struct perf_event *event;
struct perf_raw_record raw = { - .size = entry_size, - .data = record, + .frag = { + .size = entry_size, + .data = record, + }, };
perf_sample_data_init(&data, 0, 0); @@@ -7550,7 -7555,7 +7576,7 @@@ static void perf_event_free_bpf_prog(st prog = event->tp_event->prog; if (prog) { event->tp_event->prog = NULL; - bpf_prog_put_rcu(prog); + bpf_prog_put(prog); } }
diff --combined net/8021q/vlan_dev.c index 516b0e7,c8f422c..fbfacd5 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@@ -146,12 -146,10 +146,12 @@@ static netdev_tx_t vlan_dev_hard_start_
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) { - /* TODO: gotta make sure the underlying layer can handle it, - * maybe an IFF_VLAN_CAPABLE flag for devices? - */ - if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu) + struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; + unsigned int max_mtu = real_dev->mtu; + + if (netif_reduces_vlan_mtu(real_dev)) + max_mtu -= VLAN_HLEN; + if (max_mtu < new_mtu) return -ERANGE;
dev->mtu = new_mtu; @@@ -792,6 -790,8 +792,8 @@@ static const struct net_device_ops vlan .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, #endif .ndo_fix_features = vlan_dev_fix_features, + .ndo_neigh_construct = netdev_default_l2upper_neigh_construct, + .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy, .ndo_fdb_add = switchdev_port_fdb_add, .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, diff --combined net/batman-adv/bridge_loop_avoidance.c index 825a5cd,e4f7494..ad2ffe1 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -48,6 -48,7 +48,7 @@@
#include "hard-interface.h" #include "hash.h" + #include "log.h" #include "originator.h" #include "packet.h" #include "sysfs.h" @@@ -177,21 -178,10 +178,21 @@@ static void batadv_backbone_gw_put(stru static void batadv_claim_release(struct kref *ref) { struct batadv_bla_claim *claim; + struct batadv_bla_backbone_gw *old_backbone_gw;
claim = container_of(ref, struct batadv_bla_claim, refcount);
- batadv_backbone_gw_put(claim->backbone_gw); + spin_lock_bh(&claim->backbone_lock); + old_backbone_gw = claim->backbone_gw; + claim->backbone_gw = NULL; + spin_unlock_bh(&claim->backbone_lock); + + spin_lock_bh(&old_backbone_gw->crc_lock); + old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + spin_unlock_bh(&old_backbone_gw->crc_lock); + + batadv_backbone_gw_put(old_backbone_gw); + kfree_rcu(claim, rcu); }
@@@ -429,12 -419,9 +430,12 @@@ static void batadv_bla_send_claim(struc break; }
- if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb) + goto out; + }
skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, soft_iface); @@@ -688,10 -675,8 +689,10 @@@ static void batadv_bla_add_claim(struc const u8 *mac, const unsigned short vid, struct batadv_bla_backbone_gw *backbone_gw) { + struct batadv_bla_backbone_gw *old_backbone_gw; struct batadv_bla_claim *claim; struct batadv_bla_claim search_claim; + bool remove_crc = false; int hash_added;
ether_addr_copy(search_claim.addr, mac); @@@ -705,10 -690,8 +706,10 @@@ return;
ether_addr_copy(claim->addr, mac); + spin_lock_init(&claim->backbone_lock); claim->vid = vid; claim->lasttime = jiffies; + kref_get(&backbone_gw->refcount); claim->backbone_gw = backbone_gw;
kref_init(&claim->refcount); @@@ -736,26 -719,15 +737,26 @@@ "bla_add_claim(): changing ownership for %pM, vid %d\n", mac, BATADV_PRINT_VID(vid));
- spin_lock_bh(&claim->backbone_gw->crc_lock); - claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); - spin_unlock_bh(&claim->backbone_gw->crc_lock); - batadv_backbone_gw_put(claim->backbone_gw); + remove_crc = true; } - /* set (new) backbone gw */ + + /* replace backbone_gw atomically and adjust reference counters */ + spin_lock_bh(&claim->backbone_lock); + old_backbone_gw = claim->backbone_gw; kref_get(&backbone_gw->refcount); claim->backbone_gw = backbone_gw; + spin_unlock_bh(&claim->backbone_lock); + + if (remove_crc) { + /* remove claim address from old backbone_gw */ + spin_lock_bh(&old_backbone_gw->crc_lock); + old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + spin_unlock_bh(&old_backbone_gw->crc_lock); + }
+ batadv_backbone_gw_put(old_backbone_gw); + + /* add claim address to new backbone_gw */ spin_lock_bh(&backbone_gw->crc_lock); backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); spin_unlock_bh(&backbone_gw->crc_lock); @@@ -766,26 -738,6 +767,26 @@@ claim_free_ref }
/** + * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of + * claim + * @claim: claim whose backbone_gw should be returned + * + * Return: valid reference to claim::backbone_gw + */ +static struct batadv_bla_backbone_gw * +batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim) +{ + struct batadv_bla_backbone_gw *backbone_gw; + + spin_lock_bh(&claim->backbone_lock); + backbone_gw = claim->backbone_gw; + kref_get(&backbone_gw->refcount); + spin_unlock_bh(&claim->backbone_lock); + + return backbone_gw; +} + +/** * batadv_bla_del_claim - delete a claim from the claim hash * @bat_priv: the bat priv with all the soft interface information * @mac: mac address of the claim to be removed @@@ -809,6 -761,10 +810,6 @@@ static void batadv_bla_del_claim(struc batadv_choose_claim, claim); batadv_claim_put(claim); /* reference from the hash is gone */
- spin_lock_bh(&claim->backbone_gw->crc_lock); - claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); - spin_unlock_bh(&claim->backbone_gw->crc_lock); - /* don't need the reference from hash_find() anymore */ batadv_claim_put(claim); } @@@ -1261,7 -1217,6 +1262,7 @@@ static void batadv_bla_purge_claims(str struct batadv_hard_iface *primary_if, int now) { + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_bla_claim *claim; struct hlist_head *head; struct batadv_hashtable *hash; @@@ -1276,17 -1231,14 +1277,17 @@@
rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); if (now) goto purge_now; - if (!batadv_compare_eth(claim->backbone_gw->orig, + + if (!batadv_compare_eth(backbone_gw->orig, primary_if->net_dev->dev_addr)) - continue; + goto skip; + if (!batadv_has_timed_out(claim->lasttime, BATADV_BLA_CLAIM_TIMEOUT)) - continue; + goto skip;
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_purge_claims(): %pM, vid %d, time out\n", @@@ -1294,10 -1246,8 +1295,10 @@@
purge_now: batadv_handle_unclaim(bat_priv, primary_if, - claim->backbone_gw->orig, + backbone_gw->orig, claim->addr, claim->vid); +skip: + batadv_backbone_gw_put(backbone_gw); } rcu_read_unlock(); } @@@ -1808,11 -1758,9 +1809,11 @@@ batadv_bla_loopdetect_check(struct bata bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, bool is_bcast) { + struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; + bool own_claim; bool ret;
ethhdr = eth_hdr(skb); @@@ -1847,12 -1795,8 +1848,12 @@@ }
/* if it is our own claim ... */ - if (batadv_compare_eth(claim->backbone_gw->orig, - primary_if->net_dev->dev_addr)) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + own_claim = batadv_compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr); + batadv_backbone_gw_put(backbone_gw); + + if (own_claim) { /* ... allow it in any case */ claim->lasttime = jiffies; goto allow; @@@ -1916,9 -1860,7 +1917,9 @@@ bool batadv_bla_tx(struct batadv_priv * { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_hard_iface *primary_if; + bool client_roamed; bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv); @@@ -1948,12 -1890,8 +1949,12 @@@ goto allow;
/* check if we are responsible. */ - if (batadv_compare_eth(claim->backbone_gw->orig, - primary_if->net_dev->dev_addr)) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + client_roamed = batadv_compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr); + batadv_backbone_gw_put(backbone_gw); + + if (client_roamed) { /* if yes, the client has roamed and we have * to unclaim it. */ @@@ -2001,7 -1939,6 +2002,7 @@@ int batadv_bla_claim_table_seq_print_te struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_hashtable *hash = bat_priv->bla.claim_hash; + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_bla_claim *claim; struct batadv_hard_iface *primary_if; struct hlist_head *head; @@@ -2026,21 -1963,17 +2027,21 @@@
rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { - is_own = batadv_compare_eth(claim->backbone_gw->orig, + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + + is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
- spin_lock_bh(&claim->backbone_gw->crc_lock); - backbone_crc = claim->backbone_gw->crc; - spin_unlock_bh(&claim->backbone_gw->crc_lock); + spin_lock_bh(&backbone_gw->crc_lock); + backbone_crc = backbone_gw->crc; + spin_unlock_bh(&backbone_gw->crc_lock); seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", claim->addr, BATADV_PRINT_VID(claim->vid), - claim->backbone_gw->orig, + backbone_gw->orig, (is_own ? 'x' : ' '), backbone_crc); + + batadv_backbone_gw_put(backbone_gw); } rcu_read_unlock(); } diff --combined net/batman-adv/distributed-arp-table.c index aee3b39,fa76465..b1cc8bf --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@@ -45,9 -45,11 +45,11 @@@
#include "hard-interface.h" #include "hash.h" + #include "log.h" #include "originator.h" #include "send.h" #include "translation-table.h" + #include "tvlv.h"
static void batadv_dat_purge(struct work_struct *work);
@@@ -1009,12 -1011,9 +1011,12 @@@ bool batadv_dat_snoop_outgoing_arp_requ if (!skb_new) goto out;
- if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb_new) + goto out; + }
skb_reset_mac_header(skb_new); skb_new->protocol = eth_type_trans(skb_new, @@@ -1092,12 -1091,9 +1094,12 @@@ bool batadv_dat_snoop_incoming_arp_requ */ skb_reset_mac_header(skb_new);
- if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb_new) + goto out; + }
/* To preserve backwards compatibility, the node has choose the outgoing * format based on the incoming request packet type. The assumption is diff --combined net/batman-adv/originator.c index ab8c4f9,7d1e542..3940b5d --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@@ -34,11 -34,13 +34,13 @@@ #include <linux/spinlock.h> #include <linux/workqueue.h>
+ #include "bat_algo.h" #include "distributed-arp-table.h" #include "fragmentation.h" #include "gateway_client.h" #include "hard-interface.h" #include "hash.h" + #include "log.h" #include "multicast.h" #include "network-coding.h" #include "routing.h" @@@ -251,10 -253,8 +253,8 @@@ static void batadv_neigh_node_release(s struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; - struct batadv_algo_ops *bao;
neigh_node = container_of(ref, struct batadv_neigh_node, refcount); - bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, &neigh_node->ifinfo_list, list) { @@@ -263,9 -263,6 +263,6 @@@
batadv_hardif_neigh_put(neigh_node->hardif_neigh);
- if (bao->bat_neigh_free) - bao->bat_neigh_free(neigh_node); - batadv_hardif_put(neigh_node->if_incoming);
kfree_rcu(neigh_node, rcu); @@@ -537,8 -534,8 +534,8 @@@ batadv_hardif_neigh_create(struct batad
kref_init(&hardif_neigh->refcount);
- if (bat_priv->bat_algo_ops->bat_hardif_neigh_init) - bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh); + if (bat_priv->algo_ops->neigh.hardif_init) + bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
@@@ -602,19 -599,19 +599,19 @@@ batadv_hardif_neigh_get(const struct ba }
/** - * batadv_neigh_node_new - create and init a new neigh_node object + * batadv_neigh_node_create - create a neigh node object * @orig_node: originator object representing the neighbour * @hard_iface: the interface where the neighbour is connected to * @neigh_addr: the mac address of the neighbour interface * * Allocates a new neigh_node object and initialises all the generic fields. * - * Return: neighbor when found. Othwerwise NULL + * Return: the neighbour node if found or created or NULL otherwise. */ - struct batadv_neigh_node * - batadv_neigh_node_new(struct batadv_orig_node *orig_node, - struct batadv_hard_iface *hard_iface, - const u8 *neigh_addr) + static struct batadv_neigh_node * + batadv_neigh_node_create(struct batadv_orig_node *orig_node, + struct batadv_hard_iface *hard_iface, + const u8 *neigh_addr) { struct batadv_neigh_node *neigh_node; struct batadv_hardif_neigh_node *hardif_neigh = NULL; @@@ -667,6 -664,29 +664,29 @@@ out }
/** + * batadv_neigh_node_get_or_create - retrieve or create a neigh node object + * @orig_node: originator object representing the neighbour + * @hard_iface: the interface where the neighbour is connected to + * @neigh_addr: the mac address of the neighbour interface + * + * Return: the neighbour node if found or created or NULL otherwise. + */ + struct batadv_neigh_node * + batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node, + struct batadv_hard_iface *hard_iface, + const u8 *neigh_addr) + { + struct batadv_neigh_node *neigh_node = NULL; + + /* first check without locking to avoid the overhead */ + neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr); + if (neigh_node) + return neigh_node; + + return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr); + } + + /** * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list * @seq: neighbour table seq_file struct * @offset: not used @@@ -686,17 -706,17 +706,17 @@@ int batadv_hardif_neigh_seq_print_text( seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", BATADV_SOURCE_VERSION, primary_if->net_dev->name, primary_if->net_dev->dev_addr, net_dev->name, - bat_priv->bat_algo_ops->name); + bat_priv->algo_ops->name);
batadv_hardif_put(primary_if);
- if (!bat_priv->bat_algo_ops->bat_neigh_print) { + if (!bat_priv->algo_ops->neigh.print) { seq_puts(seq, "No printing function for this routing protocol\n"); return 0; }
- bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq); + bat_priv->algo_ops->neigh.print(bat_priv, seq); return 0; }
@@@ -747,8 -767,8 +767,8 @@@ static void batadv_orig_node_free_rcu(s
batadv_frag_purge_orig(orig_node, NULL);
- if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) - orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); + if (orig_node->bat_priv->algo_ops->orig.free) + orig_node->bat_priv->algo_ops->orig.free(orig_node);
kfree(orig_node->tt_buff); kfree(orig_node); @@@ -765,8 -785,6 +785,8 @@@ static void batadv_orig_node_release(st struct batadv_neigh_node *neigh_node; struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo; + struct batadv_orig_node_vlan *vlan; + struct batadv_orig_ifinfo *last_candidate;
orig_node = container_of(ref, struct batadv_orig_node, refcount);
@@@ -784,21 -802,8 +804,21 @@@ hlist_del_rcu(&orig_ifinfo->list); batadv_orig_ifinfo_put(orig_ifinfo); } + + last_candidate = orig_node->last_bonding_candidate; + orig_node->last_bonding_candidate = NULL; spin_unlock_bh(&orig_node->neigh_list_lock);
+ if (last_candidate) + batadv_orig_ifinfo_put(last_candidate); + + spin_lock_bh(&orig_node->vlan_list_lock); + hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) { + hlist_del_rcu(&vlan->list); + batadv_orig_node_vlan_put(vlan); + } + spin_unlock_bh(&orig_node->vlan_list_lock); + /* Free nc_nodes */ batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
@@@ -1092,12 -1097,12 +1112,12 @@@ batadv_find_best_neighbor(struct batadv struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_node *best = NULL, *neigh; - struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; + struct batadv_algo_ops *bao = bat_priv->algo_ops;
rcu_read_lock(); hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) { - if (best && (bao->bat_neigh_cmp(neigh, if_outgoing, - best, if_outgoing) <= 0)) + if (best && (bao->neigh.cmp(neigh, if_outgoing, best, + if_outgoing) <= 0)) continue;
if (!kref_get_unless_zero(&neigh->refcount)) @@@ -1249,18 -1254,17 +1269,17 @@@ int batadv_orig_seq_print_text(struct s seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", BATADV_SOURCE_VERSION, primary_if->net_dev->name, primary_if->net_dev->dev_addr, net_dev->name, - bat_priv->bat_algo_ops->name); + bat_priv->algo_ops->name);
batadv_hardif_put(primary_if);
- if (!bat_priv->bat_algo_ops->bat_orig_print) { + if (!bat_priv->algo_ops->orig.print) { seq_puts(seq, "No printing function for this routing protocol\n"); return 0; }
- bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, - BATADV_IF_DEFAULT); + bat_priv->algo_ops->orig.print(bat_priv, seq, BATADV_IF_DEFAULT);
return 0; } @@@ -1287,7 -1291,7 +1306,7 @@@ int batadv_orig_hardif_seq_print_text(s }
bat_priv = netdev_priv(hard_iface->soft_iface); - if (!bat_priv->bat_algo_ops->bat_orig_print) { + if (!bat_priv->algo_ops->orig.print) { seq_puts(seq, "No printing function for this routing protocol\n"); goto out; @@@ -1301,9 -1305,9 +1320,9 @@@ seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n", BATADV_SOURCE_VERSION, hard_iface->net_dev->name, hard_iface->net_dev->dev_addr, - hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name); + hard_iface->soft_iface->name, bat_priv->algo_ops->name);
- bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); + bat_priv->algo_ops->orig.print(bat_priv, seq, hard_iface);
out: if (hard_iface) @@@ -1315,7 -1319,7 +1334,7 @@@ int batadv_orig_hash_add_if(struct bata int max_if_num) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; + struct batadv_algo_ops *bao = bat_priv->algo_ops; struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node; @@@ -1331,9 -1335,8 +1350,8 @@@ rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { ret = 0; - if (bao->bat_orig_add_if) - ret = bao->bat_orig_add_if(orig_node, - max_if_num); + if (bao->orig.add_if) + ret = bao->orig.add_if(orig_node, max_if_num); if (ret == -ENOMEM) goto err; } @@@ -1355,7 -1358,7 +1373,7 @@@ int batadv_orig_hash_del_if(struct bata struct hlist_head *head; struct batadv_hard_iface *hard_iface_tmp; struct batadv_orig_node *orig_node; - struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; + struct batadv_algo_ops *bao = bat_priv->algo_ops; u32 i; int ret;
@@@ -1368,10 -1371,9 +1386,9 @@@ rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { ret = 0; - if (bao->bat_orig_del_if) - ret = bao->bat_orig_del_if(orig_node, - max_if_num, - hard_iface->if_num); + if (bao->orig.del_if) + ret = bao->orig.del_if(orig_node, max_if_num, + hard_iface->if_num); if (ret == -ENOMEM) goto err; } diff --combined net/batman-adv/routing.c index bfac086,af8e119..7602c00 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@@ -40,12 -40,15 +40,15 @@@ #include "fragmentation.h" #include "hard-interface.h" #include "icmp_socket.h" + #include "log.h" #include "network-coding.h" #include "originator.h" #include "packet.h" #include "send.h" #include "soft-interface.h" + #include "tp_meter.h" #include "translation-table.h" + #include "tvlv.h"
static int batadv_route_unicast_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if); @@@ -268,10 -271,19 +271,19 @@@ static int batadv_recv_my_icmp_packet(s icmph->ttl = BATADV_TTL;
res = batadv_send_skb_to_orig(skb, orig_node, NULL); - if (res != NET_XMIT_DROP) - ret = NET_RX_SUCCESS; + if (res == -1) + goto out; + + ret = NET_RX_SUCCESS;
break; + case BATADV_TP: + if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet))) + goto out; + + batadv_tp_meter_recv(bat_priv, skb); + ret = NET_RX_SUCCESS; + goto out; default: /* drop unknown type */ goto out; @@@ -290,7 -302,7 +302,7 @@@ static int batadv_recv_icmp_ttl_exceede struct batadv_hard_iface *primary_if = NULL; struct batadv_orig_node *orig_node = NULL; struct batadv_icmp_packet *icmp_packet; - int ret = NET_RX_DROP; + int res, ret = NET_RX_DROP;
icmp_packet = (struct batadv_icmp_packet *)skb->data;
@@@ -321,7 -333,8 +333,8 @@@ icmp_packet->msg_type = BATADV_TTL_EXCEEDED; icmp_packet->ttl = BATADV_TTL;
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) + res = batadv_send_skb_to_orig(skb, orig_node, NULL); + if (res != -1) ret = NET_RX_SUCCESS;
out: @@@ -341,7 -354,7 +354,7 @@@ int batadv_recv_icmp_packet(struct sk_b struct ethhdr *ethhdr; struct batadv_orig_node *orig_node = NULL; int hdr_size = sizeof(struct batadv_icmp_header); - int ret = NET_RX_DROP; + int res, ret = NET_RX_DROP;
/* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, hdr_size))) @@@ -408,7 -421,8 +421,8 @@@ icmph->ttl--;
/* route it */ - if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) + res = batadv_send_skb_to_orig(skb, orig_node, recv_if); + if (res != -1) ret = NET_RX_SUCCESS;
out: @@@ -456,29 -470,6 +470,29 @@@ static int batadv_check_unicast_packet( }
/** + * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node + * @orig_node: originator node whose bonding candidates should be replaced + * @new_candidate: new bonding candidate or NULL + */ +static void +batadv_last_bonding_replace(struct batadv_orig_node *orig_node, + struct batadv_orig_ifinfo *new_candidate) +{ + struct batadv_orig_ifinfo *old_candidate; + + spin_lock_bh(&orig_node->neigh_list_lock); + old_candidate = orig_node->last_bonding_candidate; + + if (new_candidate) + kref_get(&new_candidate->refcount); + orig_node->last_bonding_candidate = new_candidate; + spin_unlock_bh(&orig_node->neigh_list_lock); + + if (old_candidate) + batadv_orig_ifinfo_put(old_candidate); +} + +/** * batadv_find_router - find a suitable router for this originator * @bat_priv: the bat priv with all the soft interface information * @orig_node: the destination node @@@ -492,7 -483,7 +506,7 @@@ batadv_find_router(struct batadv_priv * struct batadv_orig_node *orig_node, struct batadv_hard_iface *recv_if) { - struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; + struct batadv_algo_ops *bao = bat_priv->algo_ops; struct batadv_neigh_node *first_candidate_router = NULL; struct batadv_neigh_node *next_candidate_router = NULL; struct batadv_neigh_node *router, *cand_router = NULL; @@@ -546,9 -537,9 +560,9 @@@ /* alternative candidate should be good enough to be * considered */ - if (!bao->bat_neigh_is_similar_or_better(cand_router, - cand->if_outgoing, - router, recv_if)) + if (!bao->neigh.is_similar_or_better(cand_router, + cand->if_outgoing, router, + recv_if)) goto next;
/* don't use the same router twice */ @@@ -585,6 -576,10 +599,6 @@@ next } rcu_read_unlock();
- /* last_bonding_candidate is reset below, remove the old reference. */ - if (orig_node->last_bonding_candidate) - batadv_orig_ifinfo_put(orig_node->last_bonding_candidate); - /* After finding candidates, handle the three cases: * 1) there is a next candidate, use that * 2) there is no next candidate, use the first of the list @@@ -593,28 -588,21 +607,28 @@@ if (next_candidate) { batadv_neigh_node_put(router);
- /* remove references to first candidate, we don't need it. */ - if (first_candidate) { - batadv_neigh_node_put(first_candidate_router); - batadv_orig_ifinfo_put(first_candidate); - } + kref_get(&next_candidate_router->refcount); router = next_candidate_router; - orig_node->last_bonding_candidate = next_candidate; + batadv_last_bonding_replace(orig_node, next_candidate); } else if (first_candidate) { batadv_neigh_node_put(router);
- /* refcounting has already been done in the loop above. */ + kref_get(&first_candidate_router->refcount); router = first_candidate_router; - orig_node->last_bonding_candidate = first_candidate; + batadv_last_bonding_replace(orig_node, first_candidate); } else { - orig_node->last_bonding_candidate = NULL; + batadv_last_bonding_replace(orig_node, NULL); + } + + /* cleanup of candidates */ + if (first_candidate) { + batadv_neigh_node_put(first_candidate_router); + batadv_orig_ifinfo_put(first_candidate); + } + + if (next_candidate) { + batadv_neigh_node_put(next_candidate_router); + batadv_orig_ifinfo_put(next_candidate); }
return router; @@@ -671,6 -659,8 +685,8 @@@ static int batadv_route_unicast_packet(
len = skb->len; res = batadv_send_skb_to_orig(skb, orig_node, recv_if); + if (res == -1) + goto out;
/* translate transmit result into receive result */ if (res == NET_XMIT_SUCCESS) { @@@ -678,13 -668,10 +694,10 @@@ batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD); batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES, len + ETH_HLEN); - - ret = NET_RX_SUCCESS; - } else if (res == NET_XMIT_POLICED) { - /* skb was buffered and consumed */ - ret = NET_RX_SUCCESS; }
+ ret = NET_RX_SUCCESS; + out: if (orig_node) batadv_orig_node_put(orig_node); @@@ -1033,6 -1020,8 +1046,8 @@@ int batadv_recv_frag_packet(struct sk_b if (!orig_node_src) goto out;
+ skb->priority = frag_packet->priority + 256; + /* Route the fragment if it is not for us and too big to be merged. */ if (!batadv_is_my_mac(bat_priv, frag_packet->dest) && batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) { diff --combined net/batman-adv/send.c index 0103976,3a10d87..6191159 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@@ -20,10 -20,11 +20,11 @@@
#include <linux/atomic.h> #include <linux/byteorder/generic.h> + #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/fs.h> - #include <linux/if_ether.h> #include <linux/if.h> + #include <linux/if_ether.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/kref.h> @@@ -42,6 -43,7 +43,7 @@@ #include "fragmentation.h" #include "gateway_client.h" #include "hard-interface.h" + #include "log.h" #include "network-coding.h" #include "originator.h" #include "routing.h" @@@ -71,6 -73,7 +73,7 @@@ int batadv_send_skb_packet(struct sk_bu { struct batadv_priv *bat_priv; struct ethhdr *ethhdr; + int ret;
bat_priv = netdev_priv(hard_iface->soft_iface);
@@@ -108,8 -111,15 +111,15 @@@ /* dev_queue_xmit() returns a negative result on error. However on * congestion and traffic shaping, it drops and returns NET_XMIT_DROP * (which is > 0). This will not be treated as an error. + * + * a negative value cannot be returned because it could be interepreted + * as not consumed skb by callers of batadv_send_skb_to_orig. */ - return dev_queue_xmit(skb); + ret = dev_queue_xmit(skb); + if (ret < 0) + ret = NET_XMIT_DROP; + + return ret; send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; @@@ -155,8 -165,11 +165,11 @@@ int batadv_send_unicast_skb(struct sk_b * host, NULL can be passed as recv_if and no interface alternating is * attempted. * - * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or - * NET_XMIT_POLICED if the skb is buffered for later transmit. + * Return: -1 on failure (and the skb is not consumed), -EINPROGRESS if the + * skb is buffered for later transmit or the NET_XMIT status returned by the + * lower routine if the packet has been passed down. + * + * If the returning value is not -1 the skb has been consumed. */ int batadv_send_skb_to_orig(struct sk_buff *skb, struct batadv_orig_node *orig_node, @@@ -164,7 -177,7 +177,7 @@@ { struct batadv_priv *bat_priv = orig_node->bat_priv; struct batadv_neigh_node *neigh_node; - int ret = NET_XMIT_DROP; + int ret = -1;
/* batadv_find_router() increases neigh_nodes refcount if found. */ neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); @@@ -177,8 -190,7 +190,7 @@@ if (atomic_read(&bat_priv->fragmentation) && skb->len > neigh_node->if_incoming->net_dev->mtu) { /* Fragment and send packet. */ - if (batadv_frag_send_packet(skb, orig_node, neigh_node)) - ret = NET_XMIT_SUCCESS; + ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
goto out; } @@@ -187,12 -199,10 +199,10 @@@ * (i.e. being forwarded). If the packet originates from this node or if * network coding fails, then send the packet as usual. */ - if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) { - ret = NET_XMIT_POLICED; - } else { - batadv_send_unicast_skb(skb, neigh_node); - ret = NET_XMIT_SUCCESS; - } + if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) + ret = -EINPROGRESS; + else + ret = batadv_send_unicast_skb(skb, neigh_node);
out: if (neigh_node) @@@ -318,7 -328,7 +328,7 @@@ int batadv_send_skb_unicast(struct bata { struct batadv_unicast_packet *unicast_packet; struct ethhdr *ethhdr; - int ret = NET_XMIT_DROP; + int res, ret = NET_XMIT_DROP;
if (!orig_node) goto out; @@@ -355,7 -365,8 +365,8 @@@ if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) unicast_packet->ttvn = unicast_packet->ttvn - 1;
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) + res = batadv_send_skb_to_orig(skb, orig_node, NULL); + if (res != -1) ret = NET_XMIT_SUCCESS;
out: @@@ -424,31 -435,11 +435,11 @@@ int batadv_send_skb_via_gw(struct batad struct batadv_orig_node *orig_node;
orig_node = batadv_gw_get_selected_orig(bat_priv); - return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, - orig_node, vid); + return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, + BATADV_P_DATA, orig_node, vid); }
- void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) - { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - - if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) || - (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)) - return; - - /* the interface gets activated here to avoid race conditions between - * the moment of activating the interface in - * hardif_activate_interface() where the originator mac is set and - * outdated packets (especially uninitialized mac addresses) in the - * packet queue - */ - if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED) - hard_iface->if_status = BATADV_IF_ACTIVE; - - bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface); - } - - static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) + void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) { kfree_skb(forw_packet->skb); if (forw_packet->if_incoming) @@@ -604,45 -595,6 +595,6 @@@ out atomic_inc(&bat_priv->bcast_queue_left); }
- void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) - { - struct delayed_work *delayed_work; - struct batadv_forw_packet *forw_packet; - struct batadv_priv *bat_priv; - - delayed_work = to_delayed_work(work); - forw_packet = container_of(delayed_work, struct batadv_forw_packet, - delayed_work); - bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); - spin_lock_bh(&bat_priv->forw_bat_list_lock); - hlist_del(&forw_packet->list); - spin_unlock_bh(&bat_priv->forw_bat_list_lock); - - if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) - goto out; - - bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); - - /* we have to have at least one packet in the queue to determine the - * queues wake up time unless we are shutting down. - * - * only re-schedule if this is the "original" copy, e.g. the OGM of the - * primary interface should only be rescheduled once per period, but - * this function will be called for the forw_packet instances of the - * other secondary interfaces as well. - */ - if (forw_packet->own && - forw_packet->if_incoming == forw_packet->if_outgoing) - batadv_schedule_bat_ogm(forw_packet->if_incoming); - - out: - /* don't count own packet */ - if (!forw_packet->own) - atomic_inc(&bat_priv->batman_queue_left); - - batadv_forw_packet_free(forw_packet); - } - void batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, const struct batadv_hard_iface *hard_iface) diff --combined net/batman-adv/types.h index 74d865a,43db7b6..a64522c --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@@ -33,6 -33,7 +33,7 @@@ #include <linux/types.h> #include <linux/wait.h> #include <linux/workqueue.h> + #include <uapi/linux/batman_adv.h>
#include "packet.h"
@@@ -330,9 -331,7 +331,9 @@@ struct batadv_orig_node DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); u32 last_bcast_seqno; struct hlist_head neigh_list; - /* neigh_list_lock protects: neigh_list and router */ + /* neigh_list_lock protects: neigh_list, ifinfo_list, + * last_bonding_candidate and router + */ spinlock_t neigh_list_lock; struct hlist_node hash_entry; struct batadv_priv *bat_priv; @@@ -709,6 -708,8 +710,8 @@@ struct batadv_priv_debug_log * @list: list of available gateway nodes * @list_lock: lock protecting gw_list & curr_gw * @curr_gw: pointer to currently selected gateway node + * @mode: gateway operation: off, client or server (see batadv_gw_modes) + * @sel_class: gateway selection class (applies if gw_mode client) * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server) * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server) * @reselect: bool indicating a gateway re-selection is in progress @@@ -717,6 -718,8 +720,8 @@@ struct batadv_priv_gw struct hlist_head list; spinlock_t list_lock; /* protects gw_list & curr_gw */ struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */ + atomic_t mode; + atomic_t sel_class; atomic_t bandwidth_down; atomic_t bandwidth_up; atomic_t reselect; @@@ -753,14 -756,28 +758,28 @@@ struct batadv_priv_dat
#ifdef CONFIG_BATMAN_ADV_MCAST /** + * struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged + * @exists: whether a querier exists in the mesh + * @shadowing: if a querier exists, whether it is potentially shadowing + * multicast listeners (i.e. querier is behind our own bridge segment) + */ + struct batadv_mcast_querier_state { + bool exists; + bool shadowing; + }; + + /** * struct batadv_priv_mcast - per mesh interface mcast data * @mla_list: list of multicast addresses we are currently announcing via TT * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable * multicast traffic * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic + * @querier_ipv4: the current state of an IGMP querier in the mesh + * @querier_ipv6: the current state of an MLD querier in the mesh * @flags: the flags we have last sent in our mcast tvlv * @enabled: whether the multicast tvlv is currently enabled + * @bridged: whether the soft interface has a bridge on top * @num_disabled: number of nodes that have no mcast tvlv * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic * @num_want_all_ipv4: counter for items in want_all_ipv4_list @@@ -773,8 -790,11 +792,11 @@@ struct batadv_priv_mcast struct hlist_head want_all_unsnoopables_list; struct hlist_head want_all_ipv4_list; struct hlist_head want_all_ipv6_list; + struct batadv_mcast_querier_state querier_ipv4; + struct batadv_mcast_querier_state querier_ipv6; u8 flags; bool enabled; + bool bridged; atomic_t num_disabled; atomic_t num_want_all_unsnoopables; atomic_t num_want_all_ipv4; @@@ -814,6 -834,111 +836,111 @@@ struct batadv_priv_nc };
/** + * struct batadv_tp_unacked - unacked packet meta-information + * @seqno: seqno of the unacked packet + * @len: length of the packet + * @list: list node for batadv_tp_vars::unacked_list + * + * This struct is supposed to represent a buffer unacked packet. However, since + * the purpose of the TP meter is to count the traffic only, there is no need to + * store the entire sk_buff, the starting offset and the length are enough + */ + struct batadv_tp_unacked { + u32 seqno; + u16 len; + struct list_head list; + }; + + /** + * enum batadv_tp_meter_role - Modus in tp meter session + * @BATADV_TP_RECEIVER: Initialized as receiver + * @BATADV_TP_SENDER: Initialized as sender + */ + enum batadv_tp_meter_role { + BATADV_TP_RECEIVER, + BATADV_TP_SENDER + }; + + /** + * struct batadv_tp_vars - tp meter private variables per session + * @list: list node for bat_priv::tp_list + * @timer: timer for ack (receiver) and retry (sender) + * @bat_priv: pointer to the mesh object + * @start_time: start time in jiffies + * @other_end: mac address of remote + * @role: receiver/sender modi + * @sending: sending binary semaphore: 1 if sending, 0 is not + * @reason: reason for a stopped session + * @finish_work: work item for the finishing procedure + * @test_length: test length in milliseconds + * @session: TP session identifier + * @icmp_uid: local ICMP "socket" index + * @dec_cwnd: decimal part of the cwnd used during linear growth + * @cwnd: current size of the congestion window + * @cwnd_lock: lock do protect @cwnd & @dec_cwnd + * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the + * connection switches to the Congestion Avoidance state + * @last_acked: last acked byte + * @last_sent: last sent byte, not yet acked + * @tot_sent: amount of data sent/ACKed so far + * @dup_acks: duplicate ACKs counter + * @fast_recovery: true if in Fast Recovery mode + * @recover: last sent seqno when entering Fast Recovery + * @rto: sender timeout + * @srtt: smoothed RTT scaled by 2^3 + * @rttvar: RTT variation scaled by 2^2 + * @more_bytes: waiting queue anchor when waiting for more ack/retry timeout + * @prerandom_offset: offset inside the prerandom buffer + * @prerandom_lock: spinlock protecting access to prerandom_offset + * @last_recv: last in-order received packet + * @unacked_list: list of unacked packets (meta-info only) + * @unacked_lock: protect unacked_list + * @last_recv_time: time time (jiffies) a msg was received + * @refcount: number of context where the object is used + * @rcu: struct used for freeing in an RCU-safe manner + */ + struct batadv_tp_vars { + struct hlist_node list; + struct timer_list timer; + struct batadv_priv *bat_priv; + unsigned long start_time; + u8 other_end[ETH_ALEN]; + enum batadv_tp_meter_role role; + atomic_t sending; + enum batadv_tp_meter_reason reason; + struct delayed_work finish_work; + u32 test_length; + u8 session[2]; + u8 icmp_uid; + + /* sender variables */ + u16 dec_cwnd; + u32 cwnd; + spinlock_t cwnd_lock; /* Protects cwnd & dec_cwnd */ + u32 ss_threshold; + atomic_t last_acked; + u32 last_sent; + atomic64_t tot_sent; + atomic_t dup_acks; + bool fast_recovery; + u32 recover; + u32 rto; + u32 srtt; + u32 rttvar; + wait_queue_head_t more_bytes; + u32 prerandom_offset; + spinlock_t prerandom_lock; /* Protects prerandom_offset */ + + /* receiver variables */ + u32 last_recv; + struct list_head unacked_list; + spinlock_t unacked_lock; /* Protects unacked_list */ + unsigned long last_recv_time; + struct kref refcount; + struct rcu_head rcu; + }; + + /** * struct batadv_softif_vlan - per VLAN attributes set * @bat_priv: pointer to the mesh object * @vid: VLAN identifier @@@ -867,8 -992,6 +994,6 @@@ struct batadv_priv_bat_v * enabled * @multicast_mode: Enable or disable multicast optimizations on this node's * sender/originating side - * @gw_mode: gateway operation: off, client or server (see batadv_gw_modes) - * @gw_sel_class: gateway selection class (applies if gw_mode client) * @orig_interval: OGM broadcast interval in milliseconds * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop * @log_level: configured log level (see batadv_dbg_level) @@@ -883,14 -1006,17 +1008,17 @@@ * @debug_dir: dentry for debugfs batman-adv subdirectory * @forw_bat_list: list of aggregated OGMs that will be forwarded * @forw_bcast_list: list of broadcast packets that will be rebroadcasted + * @tp_list: list of tp sessions + * @tp_num: number of currently active tp sessions * @orig_hash: hash table containing mesh participants (orig nodes) * @forw_bat_list_lock: lock protecting forw_bat_list * @forw_bcast_list_lock: lock protecting forw_bcast_list + * @tp_list_lock: spinlock protecting @tp_list * @orig_work: work queue callback item for orig node purging * @cleanup_work: work queue callback item for soft-interface deinit * @primary_if: one of the hard-interfaces assigned to this mesh interface * becomes the primary interface - * @bat_algo_ops: routing algorithm used by this mesh interface + * @algo_ops: routing algorithm used by this mesh interface * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top * of the mesh interface represented by this object * @softif_vlan_list_lock: lock protecting softif_vlan_list @@@ -924,8 -1050,6 +1052,6 @@@ struct batadv_priv #ifdef CONFIG_BATMAN_ADV_MCAST atomic_t multicast_mode; #endif - atomic_t gw_mode; - atomic_t gw_sel_class; atomic_t orig_interval; atomic_t hop_penalty; #ifdef CONFIG_BATMAN_ADV_DEBUG @@@ -941,13 -1065,16 +1067,16 @@@ struct dentry *debug_dir; struct hlist_head forw_bat_list; struct hlist_head forw_bcast_list; + struct hlist_head tp_list; struct batadv_hashtable *orig_hash; spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */ + spinlock_t tp_list_lock; /* protects tp_list */ + atomic_t tp_num; struct delayed_work orig_work; struct work_struct cleanup_work; struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ - struct batadv_algo_ops *bat_algo_ops; + struct batadv_algo_ops *algo_ops; struct hlist_head softif_vlan_list; spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */ #ifdef CONFIG_BATMAN_ADV_BLA @@@ -1044,7 -1171,6 +1173,7 @@@ struct batadv_bla_backbone_gw * @addr: mac address of claimed non-mesh client * @vid: vlan id this client was detected on * @backbone_gw: pointer to backbone gw claiming this client + * @backbone_lock: lock protecting backbone_gw pointer * @lasttime: last time we heard of claim (locals only) * @hash_entry: hlist node for batadv_priv_bla::claim_hash * @refcount: number of contexts the object is used @@@ -1054,7 -1180,6 +1183,7 @@@ struct batadv_bla_claim u8 addr[ETH_ALEN]; unsigned short vid; struct batadv_bla_backbone_gw *backbone_gw; + spinlock_t backbone_lock; /* protects backbone_gw */ unsigned long lasttime; struct hlist_node hash_entry; struct rcu_head rcu; @@@ -1265,66 -1390,77 +1394,77 @@@ struct batadv_forw_packet };
/** + * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific) + * @activate: start routing mechanisms when hard-interface is brought up + * @enable: init routing info when hard-interface is enabled + * @disable: de-init routing info when hard-interface is disabled + * @update_mac: (re-)init mac addresses of the protocol information + * belonging to this hard-interface + * @primary_set: called when primary interface is selected / changed + */ + struct batadv_algo_iface_ops { + void (*activate)(struct batadv_hard_iface *hard_iface); + int (*enable)(struct batadv_hard_iface *hard_iface); + void (*disable)(struct batadv_hard_iface *hard_iface); + void (*update_mac)(struct batadv_hard_iface *hard_iface); + void (*primary_set)(struct batadv_hard_iface *hard_iface); + }; + + /** + * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific) + * @hardif_init: called on creation of single hop entry + * @cmp: compare the metrics of two neighbors for their respective outgoing + * interfaces + * @is_similar_or_better: check if neigh1 is equally similar or better than + * neigh2 for their respective outgoing interface from the metric prospective + * @print: print the single hop neighbor list (optional) + */ + struct batadv_algo_neigh_ops { + void (*hardif_init)(struct batadv_hardif_neigh_node *neigh); + int (*cmp)(struct batadv_neigh_node *neigh1, + struct batadv_hard_iface *if_outgoing1, + struct batadv_neigh_node *neigh2, + struct batadv_hard_iface *if_outgoing2); + bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1, + struct batadv_hard_iface *if_outgoing1, + struct batadv_neigh_node *neigh2, + struct batadv_hard_iface *if_outgoing2); + void (*print)(struct batadv_priv *priv, struct seq_file *seq); + }; + + /** + * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific) + * @free: free the resources allocated by the routing algorithm for an orig_node + * object + * @add_if: ask the routing algorithm to apply the needed changes to the + * orig_node due to a new hard-interface being added into the mesh + * @del_if: ask the routing algorithm to apply the needed changes to the + * orig_node due to an hard-interface being removed from the mesh + * @print: print the originator table (optional) + */ + struct batadv_algo_orig_ops { + void (*free)(struct batadv_orig_node *orig_node); + int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); + int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, + int del_if_num); + void (*print)(struct batadv_priv *priv, struct seq_file *seq, + struct batadv_hard_iface *hard_iface); + }; + + /** * struct batadv_algo_ops - mesh algorithm callbacks * @list: list node for the batadv_algo_list * @name: name of the algorithm - * @bat_iface_activate: start routing mechanisms when hard-interface is brought - * up - * @bat_iface_enable: init routing info when hard-interface is enabled - * @bat_iface_disable: de-init routing info when hard-interface is disabled - * @bat_iface_update_mac: (re-)init mac addresses of the protocol information - * belonging to this hard-interface - * @bat_primary_iface_set: called when primary interface is selected / changed - * @bat_ogm_schedule: prepare a new outgoing OGM for the send queue - * @bat_ogm_emit: send scheduled OGM - * @bat_hardif_neigh_init: called on creation of single hop entry - * @bat_neigh_cmp: compare the metrics of two neighbors for their respective - * outgoing interfaces - * @bat_neigh_is_similar_or_better: check if neigh1 is equally similar or - * better than neigh2 for their respective outgoing interface from the metric - * prospective - * @bat_neigh_print: print the single hop neighbor list (optional) - * @bat_neigh_free: free the resources allocated by the routing algorithm for a - * neigh_node object - * @bat_orig_print: print the originator table (optional) - * @bat_orig_free: free the resources allocated by the routing algorithm for an - * orig_node object - * @bat_orig_add_if: ask the routing algorithm to apply the needed changes to - * the orig_node due to a new hard-interface being added into the mesh - * @bat_orig_del_if: ask the routing algorithm to apply the needed changes to - * the orig_node due to an hard-interface being removed from the mesh + * @iface: callbacks related to interface handling + * @neigh: callbacks related to neighbors handling + * @orig: callbacks related to originators handling */ struct batadv_algo_ops { struct hlist_node list; char *name; - void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface); - int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface); - void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface); - void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface); - void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface); - void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface); - void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet); - /* neigh_node handling API */ - void (*bat_hardif_neigh_init)(struct batadv_hardif_neigh_node *neigh); - int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1, - struct batadv_hard_iface *if_outgoing1, - struct batadv_neigh_node *neigh2, - struct batadv_hard_iface *if_outgoing2); - bool (*bat_neigh_is_similar_or_better) - (struct batadv_neigh_node *neigh1, - struct batadv_hard_iface *if_outgoing1, - struct batadv_neigh_node *neigh2, - struct batadv_hard_iface *if_outgoing2); - void (*bat_neigh_print)(struct batadv_priv *priv, struct seq_file *seq); - void (*bat_neigh_free)(struct batadv_neigh_node *neigh); - /* orig_node handling API */ - void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq, - struct batadv_hard_iface *hard_iface); - void (*bat_orig_free)(struct batadv_orig_node *orig_node); - int (*bat_orig_add_if)(struct batadv_orig_node *orig_node, - int max_if_num); - int (*bat_orig_del_if)(struct batadv_orig_node *orig_node, - int max_if_num, int del_if_num); + struct batadv_algo_iface_ops iface; + struct batadv_algo_neigh_ops neigh; + struct batadv_algo_orig_ops orig; };
/** diff --combined net/core/filter.c index e759d90,6c627bc..0b521353 --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -53,10 -53,9 +53,10 @@@ #include <net/sock_reuseport.h>
/** - * sk_filter - run a packet through a socket filter + * sk_filter_trim_cap - run a packet through a socket filter * @sk: sock associated with &sk_buff * @skb: buffer to filter + * @cap: limit on how short the eBPF program may trim the packet * * Run the eBPF program and then cut skb->data to correct size returned by * the program. If pkt_len is 0 we toss packet. If skb->len is smaller @@@ -65,7 -64,7 +65,7 @@@ * be accepted or -EPERM if the packet should be tossed. * */ -int sk_filter(struct sock *sk, struct sk_buff *skb) +int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) { int err; struct sk_filter *filter; @@@ -86,13 -85,14 +86,13 @@@ filter = rcu_dereference(sk->sk_filter); if (filter) { unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); - - err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; + err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; } rcu_read_unlock();
return err; } -EXPORT_SYMBOL(sk_filter); +EXPORT_SYMBOL(sk_filter_trim_cap);
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { @@@ -150,6 -150,12 +150,12 @@@ static u64 __get_raw_cpu_id(u64 ctx, u6 return raw_smp_processor_id(); }
+ static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { + .func = __get_raw_cpu_id, + .gpl_only = false, + .ret_type = RET_INTEGER, + }; + static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, struct bpf_insn *insn_buf) { @@@ -748,6 -754,17 +754,17 @@@ static bool chk_code_allowed(u16 code_t return codes[code_to_probe]; }
+ static bool bpf_check_basics_ok(const struct sock_filter *filter, + unsigned int flen) + { + if (filter == NULL) + return false; + if (flen == 0 || flen > BPF_MAXINSNS) + return false; + + return true; + } + /** * bpf_check_classic - verify socket filter code * @filter: filter to verify @@@ -768,9 -785,6 +785,6 @@@ static int bpf_check_classic(const stru bool anc_found; int pc;
- if (flen == 0 || flen > BPF_MAXINSNS) - return -EINVAL; - /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *ftest = &filter[pc]; @@@ -1065,7 -1079,7 +1079,7 @@@ int bpf_prog_create(struct bpf_prog **p struct bpf_prog *fp;
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); @@@ -1112,7 -1126,7 +1126,7 @@@ int bpf_prog_create_from_user(struct bp int err;
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); @@@ -1207,7 -1221,6 +1221,6 @@@ stati struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) { unsigned int fsize = bpf_classic_proglen(fprog); - unsigned int bpf_fsize = bpf_prog_size(fprog->len); struct bpf_prog *prog; int err;
@@@ -1215,10 -1228,10 +1228,10 @@@ return ERR_PTR(-EPERM);
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return ERR_PTR(-EINVAL);
- prog = bpf_prog_alloc(bpf_fsize, 0); + prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!prog) return ERR_PTR(-ENOMEM);
@@@ -1288,21 -1301,10 +1301,10 @@@ int sk_reuseport_attach_filter(struct s
static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) { - struct bpf_prog *prog; - if (sock_flag(sk, SOCK_FILTER_LOCKED)) return ERR_PTR(-EPERM);
- prog = bpf_prog_get(ufd); - if (IS_ERR(prog)) - return prog; - - if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) { - bpf_prog_put(prog); - return ERR_PTR(-EINVAL); - } - - return prog; + return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); }
int sk_attach_bpf(u32 ufd, struct sock *sk) @@@ -1603,9 -1605,36 +1605,36 @@@ static const struct bpf_func_proto bpf_ .arg5_type = ARG_ANYTHING, };
+ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) + { + if (skb_at_tc_ingress(skb)) + skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); + + return dev_forward_skb(dev, skb); + } + + static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) + { + int ret; + + if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { + net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); + kfree_skb(skb); + return -ENETDOWN; + } + + skb->dev = dev; + + __this_cpu_inc(xmit_recursion); + ret = dev_queue_xmit(skb); + __this_cpu_dec(xmit_recursion); + + return ret; + } + static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; + struct sk_buff *skb = (struct sk_buff *) (long) r1; struct net_device *dev;
if (unlikely(flags & ~(BPF_F_INGRESS))) @@@ -1615,19 -1644,12 +1644,12 @@@ if (unlikely(!dev)) return -EINVAL;
- skb2 = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!skb2)) + skb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb)) return -ENOMEM;
- if (flags & BPF_F_INGRESS) { - if (skb_at_tc_ingress(skb2)) - skb_postpush_rcsum(skb2, skb_mac_header(skb2), - skb2->mac_len); - return dev_forward_skb(dev, skb2); - } - - skb2->dev = dev; - return dev_queue_xmit(skb2); + return flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); }
static const struct bpf_func_proto bpf_clone_redirect_proto = { @@@ -1671,15 -1693,8 +1693,8 @@@ int skb_do_redirect(struct sk_buff *skb return -EINVAL; }
- if (ri->flags & BPF_F_INGRESS) { - if (skb_at_tc_ingress(skb)) - skb_postpush_rcsum(skb, skb_mac_header(skb), - skb->mac_len); - return dev_forward_skb(dev, skb); - } - - skb->dev = dev; - return dev_queue_xmit(skb); + return ri->flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); }
static const struct bpf_func_proto bpf_redirect_proto = { @@@ -1714,6 -1729,23 +1729,23 @@@ static const struct bpf_func_proto bpf_ .arg1_type = ARG_PTR_TO_CTX, };
+ static u64 bpf_get_hash_recalc(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) + { + /* If skb_clear_hash() was called due to mangling, we can + * trigger SW recalculation here. Later access to hash + * can then use the inline skb->hash via context directly + * instead of calling this helper again. + */ + return skb_get_hash((struct sk_buff *) (unsigned long) r1); + } + + static const struct bpf_func_proto bpf_get_hash_recalc_proto = { + .func = bpf_get_hash_recalc, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + }; + static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *) (long) r1; @@@ -1757,6 -1789,224 +1789,224 @@@ const struct bpf_func_proto bpf_skb_vla }; EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
+ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) + { + /* Caller already did skb_cow() with len as headroom, + * so no need to do it here. + */ + skb_push(skb, len); + memmove(skb->data, skb->data + len, off); + memset(skb->data + off, 0, len); + + /* No skb_postpush_rcsum(skb, skb->data + off, len) + * needed here as it does not change the skb->csum + * result for checksum complete when summing over + * zeroed blocks. + */ + return 0; + } + + static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) + { + /* skb_ensure_writable() is not needed here, as we're + * already working on an uncloned skb. + */ + if (unlikely(!pskb_may_pull(skb, off + len))) + return -ENOMEM; + + skb_postpull_rcsum(skb, skb->data + off, len); + memmove(skb->data + len, skb->data, off); + __skb_pull(skb, len); + + return 0; + } + + static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) + { + bool trans_same = skb->transport_header == skb->network_header; + int ret; + + /* There's no need for __skb_push()/__skb_pull() pair to + * get to the start of the mac header as we're guaranteed + * to always start from here under eBPF. + */ + ret = bpf_skb_generic_push(skb, off, len); + if (likely(!ret)) { + skb->mac_header -= len; + skb->network_header -= len; + if (trans_same) + skb->transport_header = skb->network_header; + } + + return ret; + } + + static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) + { + bool trans_same = skb->transport_header == skb->network_header; + int ret; + + /* Same here, __skb_push()/__skb_pull() pair not needed. */ + ret = bpf_skb_generic_pop(skb, off, len); + if (likely(!ret)) { + skb->mac_header += len; + skb->network_header += len; + if (trans_same) + skb->transport_header = skb->network_header; + } + + return ret; + } + + static int bpf_skb_proto_4_to_6(struct sk_buff *skb) + { + const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); + u32 off = skb->network_header - skb->mac_header; + int ret; + + ret = skb_cow(skb, len_diff); + if (unlikely(ret < 0)) + return ret; + + ret = bpf_skb_net_hdr_push(skb, off, len_diff); + if (unlikely(ret < 0)) + return ret; + + if (skb_is_gso(skb)) { + /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to + * be changed into SKB_GSO_TCPV6. + */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { + skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; + } + + /* Due to IPv6 header, MSS needs to be downgraded. */ + skb_shinfo(skb)->gso_size -= len_diff; + /* Header must be checked, and gso_segs recomputed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + } + + skb->protocol = htons(ETH_P_IPV6); + skb_clear_hash(skb); + + return 0; + } + + static int bpf_skb_proto_6_to_4(struct sk_buff *skb) + { + const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); + u32 off = skb->network_header - skb->mac_header; + int ret; + + ret = skb_unclone(skb, GFP_ATOMIC); + if (unlikely(ret < 0)) + return ret; + + ret = bpf_skb_net_hdr_pop(skb, off, len_diff); + if (unlikely(ret < 0)) + return ret; + + if (skb_is_gso(skb)) { + /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to + * be changed into SKB_GSO_TCPV4. + */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; + } + + /* Due to IPv4 header, MSS can be upgraded. */ + skb_shinfo(skb)->gso_size += len_diff; + /* Header must be checked, and gso_segs recomputed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + } + + skb->protocol = htons(ETH_P_IP); + skb_clear_hash(skb); + + return 0; + } + + static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) + { + __be16 from_proto = skb->protocol; + + if (from_proto == htons(ETH_P_IP) && + to_proto == htons(ETH_P_IPV6)) + return bpf_skb_proto_4_to_6(skb); + + if (from_proto == htons(ETH_P_IPV6) && + to_proto == htons(ETH_P_IP)) + return bpf_skb_proto_6_to_4(skb); + + return -ENOTSUPP; + } + + static u64 bpf_skb_change_proto(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) + { + struct sk_buff *skb = (struct sk_buff *) (long) r1; + __be16 proto = (__force __be16) r2; + int ret; + + if (unlikely(flags)) + return -EINVAL; + + /* General idea is that this helper does the basic groundwork + * needed for changing the protocol, and eBPF program fills the + * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() + * and other helpers, rather than passing a raw buffer here. + * + * The rationale is to keep this minimal and without a need to + * deal with raw packet data. F.e. even if we would pass buffers + * here, the program still needs to call the bpf_lX_csum_replace() + * helpers anyway. Plus, this way we keep also separation of + * concerns, since f.e. bpf_skb_store_bytes() should only take + * care of stores. + * + * Currently, additional options and extension header space are + * not supported, but flags register is reserved so we can adapt + * that. For offloads, we mark packet as dodgy, so that headers + * need to be verified first. + */ + ret = bpf_skb_proto_xlat(skb, proto); + bpf_compute_data_end(skb); + return ret; + } + + static const struct bpf_func_proto bpf_skb_change_proto_proto = { + .func = bpf_skb_change_proto, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + }; + + static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) + { + struct sk_buff *skb = (struct sk_buff *) (long) r1; + u32 pkt_type = r2; + + /* We only allow a restricted subset to be changed for now. */ + if (unlikely(skb->pkt_type > PACKET_OTHERHOST || + pkt_type > PACKET_OTHERHOST)) + return -EINVAL; + + skb->pkt_type = pkt_type; + return 0; + } + + static const struct bpf_func_proto bpf_skb_change_type_proto = { + .func = bpf_skb_change_type, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + }; + bool bpf_helper_changes_skb_data(void *func) { if (func == bpf_skb_vlan_push) @@@ -1765,6 -2015,8 +2015,8 @@@ return true; if (func == bpf_skb_store_bytes) return true; + if (func == bpf_skb_change_proto) + return true; if (func == bpf_l3_csum_replace) return true; if (func == bpf_l4_csum_replace) @@@ -1773,6 -2025,47 +2025,47 @@@ return false; }
+ static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, + unsigned long len) + { + void *ptr = skb_header_pointer(skb, 0, len, dst_buff); + + if (unlikely(!ptr)) + return len; + if (ptr != dst_buff) + memcpy(dst_buff, ptr, len); + + return 0; + } + + static u64 bpf_skb_event_output(u64 r1, u64 r2, u64 flags, u64 r4, + u64 meta_size) + { + struct sk_buff *skb = (struct sk_buff *)(long) r1; + struct bpf_map *map = (struct bpf_map *)(long) r2; + u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; + void *meta = (void *)(long) r4; + + if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) + return -EINVAL; + if (unlikely(skb_size > skb->len)) + return -EFAULT; + + return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, + bpf_skb_copy); + } + + static const struct bpf_func_proto bpf_skb_event_output_proto = { + .func = bpf_skb_event_output, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_STACK, + .arg5_type = ARG_CONST_STACK_SIZE, + }; + static unsigned short bpf_tunnel_key_af(u64 flags) { return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; @@@ -2004,6 -2297,40 +2297,40 @@@ bpf_get_skb_set_tunnel_proto(enum bpf_f } }
+ #ifdef CONFIG_SOCK_CGROUP_DATA + static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) + { + struct sk_buff *skb = (struct sk_buff *)(long)r1; + struct bpf_map *map = (struct bpf_map *)(long)r2; + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct cgroup *cgrp; + struct sock *sk; + u32 i = (u32)r3; + + sk = skb->sk; + if (!sk || !sk_fullsock(sk)) + return -ENOENT; + + if (unlikely(i >= array->map.max_entries)) + return -E2BIG; + + cgrp = READ_ONCE(array->ptrs[i]); + if (unlikely(!cgrp)) + return -EAGAIN; + + return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); + } + + static const struct bpf_func_proto bpf_skb_in_cgroup_proto = { + .func = bpf_skb_in_cgroup, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + }; + #endif + static const struct bpf_func_proto * sk_filter_func_proto(enum bpf_func_id func_id) { @@@ -2017,7 -2344,7 +2344,7 @@@ case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; case BPF_FUNC_get_smp_processor_id: - return &bpf_get_smp_processor_id_proto; + return &bpf_get_raw_smp_processor_id_proto; case BPF_FUNC_tail_call: return &bpf_tail_call_proto; case BPF_FUNC_ktime_get_ns: @@@ -2052,6 -2379,10 +2379,10 @@@ tc_cls_act_func_proto(enum bpf_func_id return &bpf_skb_vlan_push_proto; case BPF_FUNC_skb_vlan_pop: return &bpf_skb_vlan_pop_proto; + case BPF_FUNC_skb_change_proto: + return &bpf_skb_change_proto_proto; + case BPF_FUNC_skb_change_type: + return &bpf_skb_change_type_proto; case BPF_FUNC_skb_get_tunnel_key: return &bpf_skb_get_tunnel_key_proto; case BPF_FUNC_skb_set_tunnel_key: @@@ -2064,13 -2395,27 +2395,27 @@@ return &bpf_redirect_proto; case BPF_FUNC_get_route_realm: return &bpf_get_route_realm_proto; + case BPF_FUNC_get_hash_recalc: + return &bpf_get_hash_recalc_proto; case BPF_FUNC_perf_event_output: - return bpf_get_event_output_proto(); + return &bpf_skb_event_output_proto; + case BPF_FUNC_get_smp_processor_id: + return &bpf_get_smp_processor_id_proto; + #ifdef CONFIG_SOCK_CGROUP_DATA + case BPF_FUNC_skb_in_cgroup: + return &bpf_skb_in_cgroup_proto; + #endif default: return sk_filter_func_proto(func_id); } }
+ static const struct bpf_func_proto * + xdp_func_proto(enum bpf_func_id func_id) + { + return sk_filter_func_proto(func_id); + } + static bool __is_valid_access(int off, int size, enum bpf_access_type type) { if (off < 0 || off >= sizeof(struct __sk_buff)) @@@ -2138,6 -2483,44 +2483,44 @@@ static bool tc_cls_act_is_valid_access( return __is_valid_access(off, size, type); }
+ static bool __is_valid_xdp_access(int off, int size, + enum bpf_access_type type) + { + if (off < 0 || off >= sizeof(struct xdp_md)) + return false; + if (off % size != 0) + return false; + if (size != 4) + return false; + + return true; + } + + static bool xdp_is_valid_access(int off, int size, + enum bpf_access_type type, + enum bpf_reg_type *reg_type) + { + if (type == BPF_WRITE) + return false; + + switch (off) { + case offsetof(struct xdp_md, data): + *reg_type = PTR_TO_PACKET; + break; + case offsetof(struct xdp_md, data_end): + *reg_type = PTR_TO_PACKET_END; + break; + } + + return __is_valid_xdp_access(off, size, type); + } + + void bpf_warn_invalid_xdp_action(u32 act) + { + WARN_ONCE(1, "Illegal XDP return value %u, expect packet loss\n", act); + } + EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); + static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, int src_reg, int ctx_off, struct bpf_insn *insn_buf, @@@ -2289,6 -2672,29 +2672,29 @@@ return insn - insn_buf; }
+ static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg, + int src_reg, int ctx_off, + struct bpf_insn *insn_buf, + struct bpf_prog *prog) + { + struct bpf_insn *insn = insn_buf; + + switch (ctx_off) { + case offsetof(struct xdp_md, data): + *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data)), + dst_reg, src_reg, + offsetof(struct xdp_buff, data)); + break; + case offsetof(struct xdp_md, data_end): + *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct xdp_buff, data_end)), + dst_reg, src_reg, + offsetof(struct xdp_buff, data_end)); + break; + } + + return insn - insn_buf; + } + static const struct bpf_verifier_ops sk_filter_ops = { .get_func_proto = sk_filter_func_proto, .is_valid_access = sk_filter_is_valid_access, @@@ -2301,6 -2707,12 +2707,12 @@@ static const struct bpf_verifier_ops tc .convert_ctx_access = bpf_net_convert_ctx_access, };
+ static const struct bpf_verifier_ops xdp_ops = { + .get_func_proto = xdp_func_proto, + .is_valid_access = xdp_is_valid_access, + .convert_ctx_access = xdp_convert_ctx_access, + }; + static struct bpf_prog_type_list sk_filter_type __read_mostly = { .ops = &sk_filter_ops, .type = BPF_PROG_TYPE_SOCKET_FILTER, @@@ -2316,11 -2728,17 +2728,17 @@@ static struct bpf_prog_type_list sched_ .type = BPF_PROG_TYPE_SCHED_ACT, };
+ static struct bpf_prog_type_list xdp_type __read_mostly = { + .ops = &xdp_ops, + .type = BPF_PROG_TYPE_XDP, + }; + static int __init register_sk_filter_ops(void) { bpf_register_prog_type(&sk_filter_type); bpf_register_prog_type(&sched_cls_type); bpf_register_prog_type(&sched_act_type); + bpf_register_prog_type(&xdp_type);
return 0; } diff --combined net/ipv4/tcp_input.c index 42bf89a,94d4aff..f9f9e37 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -87,7 -87,7 +87,7 @@@ int sysctl_tcp_adv_win_scale __read_mos EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */ -int sysctl_tcp_challenge_ack_limit = 100; +int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; @@@ -3115,6 -3115,7 +3115,7 @@@ static int tcp_clean_rtx_queue(struct s long ca_rtt_us = -1L; struct sk_buff *skb; u32 pkts_acked = 0; + u32 last_in_flight = 0; bool rtt_update; int flag = 0;
@@@ -3154,6 -3155,7 +3155,7 @@@ if (!first_ackt.v64) first_ackt = last_ackt;
+ last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; reord = min(pkts_acked, reord); if (!after(scb->end_seq, tp->high_seq)) flag |= FLAG_ORIG_SACK_ACKED; @@@ -3250,7 -3252,8 +3252,8 @@@
if (icsk->icsk_ca_ops->pkts_acked) { struct ack_sample sample = { .pkts_acked = pkts_acked, - .rtt_us = ca_rtt_us }; + .rtt_us = ca_rtt_us, + .in_flight = last_in_flight };
icsk->icsk_ca_ops->pkts_acked(sk, &sample); } @@@ -3421,23 -3424,6 +3424,23 @@@ static int tcp_ack_update_window(struc return flag; }
+static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, + u32 *last_oow_ack_time) +{ + if (*last_oow_ack_time) { + s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); + + if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { + NET_INC_STATS(net, mib_idx); + return true; /* rate-limited: don't send yet! */ + } + } + + *last_oow_ack_time = tcp_time_stamp; + + return false; /* not rate-limited: go ahead, send dupack now! */ +} + /* Return true if we're currently rate-limiting out-of-window ACKs and * thus shouldn't send a dupack right now. We rate-limit dupacks in * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS @@@ -3451,9 -3437,21 +3454,9 @@@ bool tcp_oow_rate_limited(struct net *n /* Data packets without SYNs are not likely part of an ACK loop. */ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && !tcp_hdr(skb)->syn) - goto not_rate_limited; - - if (*last_oow_ack_time) { - s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); - - if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { - NET_INC_STATS(net, mib_idx); - return true; /* rate-limited: don't send yet! */ - } - } - - *last_oow_ack_time = tcp_time_stamp; + return false;
-not_rate_limited: - return false; /* not rate-limited: go ahead, send dupack now! */ + return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time); }
/* RFC 5961 7 [ACK Throttling] */ @@@ -3463,26 -3461,21 +3466,26 @@@ static void tcp_send_challenge_ack(stru static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); - u32 now; + u32 count, now;
/* First check our per-socket dupack rate limit. */ - if (tcp_oow_rate_limited(sock_net(sk), skb, - LINUX_MIB_TCPACKSKIPPEDCHALLENGE, - &tp->last_oow_ack_time)) + if (__tcp_oow_rate_limited(sock_net(sk), + LINUX_MIB_TCPACKSKIPPEDCHALLENGE, + &tp->last_oow_ack_time)) return;
- /* Then check the check host-wide RFC 5961 rate limit. */ + /* Then check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { + u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1; + challenge_timestamp = now; - challenge_count = 0; + WRITE_ONCE(challenge_count, half + + prandom_u32_max(sysctl_tcp_challenge_ack_limit)); } - if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { + count = READ_ONCE(challenge_count); + if (count > 0) { + WRITE_ONCE(challenge_count, count - 1); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } @@@ -5169,6 -5162,7 +5172,7 @@@ static bool tcp_validate_incoming(struc const struct tcphdr *th, int syn_inerr) { struct tcp_sock *tp = tcp_sk(sk); + bool rst_seq_match = false;
/* RFC1323: H1. Apply PAWS check first. */ if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && @@@ -5205,13 -5199,32 +5209,32 @@@
/* Step 2: check RST bit */ if (th->rst) { - /* RFC 5961 3.2 : - * If sequence number exactly matches RCV.NXT, then + /* RFC 5961 3.2 (extend to match against SACK too if available): + * If seq num matches RCV.NXT or the right-most SACK block, + * then * RESET the connection * else * Send a challenge ACK */ - if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) + if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { + rst_seq_match = true; + } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { + struct tcp_sack_block *sp = &tp->selective_acks[0]; + int max_sack = sp[0].end_seq; + int this_sack; + + for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; + ++this_sack) { + max_sack = after(sp[this_sack].end_seq, + max_sack) ? + sp[this_sack].end_seq : max_sack; + } + + if (TCP_SKB_CB(skb)->seq == max_sack) + rst_seq_match = true; + } + + if (rst_seq_match) tcp_reset(sk); else tcp_send_challenge_ack(sk, skb); diff --combined net/ipv6/udp.c index acc09705,0a71a312d..ad5292b --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@@ -620,8 -620,6 +620,8 @@@ int udpv6_queue_rcv_skb(struct sock *sk
if (sk_filter(sk, skb)) goto drop; + if (unlikely(skb->len < sizeof(struct udphdr))) + goto drop;
udp_csum_pull_header(skb); if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { @@@ -1209,6 -1207,11 +1209,11 @@@ do_udp_sendmsg
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ if (ipc6.tclass < 0) + ipc6.tclass = np->tclass; + + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); @@@ -1219,9 -1222,6 +1224,6 @@@ if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
- if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: diff --combined net/netfilter/nf_conntrack_core.c index 9f530ad,153e33f..0ad9368 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@@ -327,16 -327,10 +327,10 @@@ struct nf_conn *nf_ct_tmpl_alloc(struc
tmpl->status = IPS_TEMPLATE; write_pnet(&tmpl->ct_net, net); - - if (nf_ct_zone_add(tmpl, flags, zone) < 0) - goto out_free; - + nf_ct_zone_add(tmpl, zone); atomic_set(&tmpl->ct_general.use, 0);
return tmpl; - out_free: - kfree(tmpl); - return NULL; } EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
@@@ -646,7 -640,6 +640,7 @@@ static int nf_ct_resolve_clash(struct n
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->allow_clash && + !nfct_nat(ct) && !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct); @@@ -930,16 -923,13 +924,13 @@@ __nf_conntrack_alloc(struct net *net offsetof(struct nf_conn, proto) - offsetof(struct nf_conn, __nfct_init_offset[0]));
- if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0) - goto out_free; + nf_ct_zone_add(ct, zone);
/* Because we use RCU lookups, we set ct_general.use to zero before * this is inserted in any list. */ atomic_set(&ct->ct_general.use, 0); return ct; - out_free: - kmem_cache_free(nf_conntrack_cachep, ct); out: atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); @@@ -1343,14 -1333,6 +1334,6 @@@ bool __nf_ct_kill_acct(struct nf_conn * } EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
- #ifdef CONFIG_NF_CONNTRACK_ZONES - static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { - .len = sizeof(struct nf_conntrack_zone), - .align = __alignof__(struct nf_conntrack_zone), - .id = NF_CT_EXT_ZONE, - }; - #endif - #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h> @@@ -1533,9 -1515,6 +1516,6 @@@ void nf_conntrack_cleanup_end(void
nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
- #ifdef CONFIG_NF_CONNTRACK_ZONES - nf_ct_extend_unregister(&nf_ct_zone_extend); - #endif nf_conntrack_proto_fini(); nf_conntrack_seqadj_fini(); nf_conntrack_labels_fini(); @@@ -1602,15 -1581,8 +1582,15 @@@ void *nf_ct_alloc_hashtable(unsigned in unsigned int nr_slots, i; size_t sz;
+ if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head))) + return NULL; + BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); + + if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head))) + return NULL; + sz = nr_slots * sizeof(struct hlist_nulls_head); hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, get_order(sz)); @@@ -1625,24 -1597,14 +1605,14 @@@ } EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
- int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) + int nf_conntrack_hash_resize(unsigned int hashsize) { - int i, bucket, rc; - unsigned int hashsize, old_size; + int i, bucket; + unsigned int old_size; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct;
- if (current->nsproxy->net_ns != &init_net) - return -EOPNOTSUPP; - - /* On boot, we can set this without any fancy locking. */ - if (!nf_conntrack_htable_size) - return param_set_uint(val, kp); - - rc = kstrtouint(val, 0, &hashsize); - if (rc) - return rc; if (!hashsize) return -EINVAL;
@@@ -1650,6 -1612,12 +1620,12 @@@ if (!hash) return -ENOMEM;
+ old_size = nf_conntrack_htable_size; + if (old_size == hashsize) { + nf_ct_free_hashtable(hash, hashsize); + return 0; + } + local_bh_disable(); nf_conntrack_all_lock(); write_seqcount_begin(&nf_conntrack_generation); @@@ -1685,6 -1653,25 +1661,25 @@@ nf_ct_free_hashtable(old_hash, old_size); return 0; } + + int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) + { + unsigned int hashsize; + int rc; + + if (current->nsproxy->net_ns != &init_net) + return -EOPNOTSUPP; + + /* On boot, we can set this without any fancy locking. */ + if (!nf_conntrack_htable_size) + return param_set_uint(val, kp); + + rc = kstrtouint(val, 0, &hashsize); + if (rc) + return rc; + + return nf_conntrack_hash_resize(hashsize); + } EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, @@@ -1741,7 -1728,7 +1736,7 @@@ int nf_conntrack_init_start(void
nf_conntrack_cachep = kmem_cache_create("nf_conntrack", sizeof(struct nf_conn), 0, - SLAB_DESTROY_BY_RCU, NULL); + SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); if (!nf_conntrack_cachep) goto err_cachep;
@@@ -1781,11 -1768,6 +1776,6 @@@ if (ret < 0) goto err_seqadj;
- #ifdef CONFIG_NF_CONNTRACK_ZONES - ret = nf_ct_extend_register(&nf_ct_zone_extend); - if (ret < 0) - goto err_extend; - #endif ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; @@@ -1801,10 -1783,6 +1791,6 @@@ return 0;
err_proto: - #ifdef CONFIG_NF_CONNTRACK_ZONES - nf_ct_extend_unregister(&nf_ct_zone_extend); - err_extend: - #endif nf_conntrack_seqadj_fini(); err_seqadj: nf_conntrack_labels_fini(); diff --combined net/netfilter/nf_tables_api.c index cf7c745,18b7f85..f24bed0 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -131,29 -131,8 +131,8 @@@ static void nft_trans_destroy(struct nf kfree(trans); }
- static int nft_register_basechain(struct nft_base_chain *basechain, - unsigned int hook_nops) - { - struct net *net = read_pnet(&basechain->pnet); - - if (basechain->flags & NFT_BASECHAIN_DISABLED) - return 0; - - return nf_register_net_hooks(net, basechain->ops, hook_nops); - } - - static void nft_unregister_basechain(struct nft_base_chain *basechain, - unsigned int hook_nops) - { - struct net *net = read_pnet(&basechain->pnet); - - if (basechain->flags & NFT_BASECHAIN_DISABLED) - return; - - nf_unregister_net_hooks(net, basechain->ops, hook_nops); - } - - static int nf_tables_register_hooks(const struct nft_table *table, + static int nf_tables_register_hooks(struct net *net, + const struct nft_table *table, struct nft_chain *chain, unsigned int hook_nops) { @@@ -161,10 -140,12 +140,12 @@@ !(chain->flags & NFT_BASE_CHAIN)) return 0;
- return nft_register_basechain(nft_base_chain(chain), hook_nops); + return nf_register_net_hooks(net, nft_base_chain(chain)->ops, + hook_nops); }
- static void nf_tables_unregister_hooks(const struct nft_table *table, + static void nf_tables_unregister_hooks(struct net *net, + const struct nft_table *table, struct nft_chain *chain, unsigned int hook_nops) { @@@ -172,12 -153,9 +153,9 @@@ !(chain->flags & NFT_BASE_CHAIN)) return;
- nft_unregister_basechain(nft_base_chain(chain), hook_nops); + nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, hook_nops); }
- /* Internal table flags */ - #define NFT_TABLE_INACTIVE (1 << 15) - static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; @@@ -187,7 -165,7 +165,7 @@@ return -ENOMEM;
if (msg_type == NFT_MSG_NEWTABLE) - ctx->table->flags |= NFT_TABLE_INACTIVE; + nft_activate_next(ctx->net, ctx->table);
list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; @@@ -201,7 -179,7 +179,7 @@@ static int nft_deltable(struct nft_ctx if (err < 0) return err;
- list_del_rcu(&ctx->table->list); + nft_deactivate_next(ctx->net, ctx->table); return err; }
@@@ -214,7 -192,7 +192,7 @@@ static int nft_trans_chain_add(struct n return -ENOMEM;
if (msg_type == NFT_MSG_NEWCHAIN) - ctx->chain->flags |= NFT_CHAIN_INACTIVE; + nft_activate_next(ctx->net, ctx->chain);
list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; @@@ -229,47 -207,17 +207,17 @@@ static int nft_delchain(struct nft_ctx return err;
ctx->table->use--; - list_del_rcu(&ctx->chain->list); + nft_deactivate_next(ctx->net, ctx->chain);
return err; }
- static inline bool - nft_rule_is_active(struct net *net, const struct nft_rule *rule) - { - return (rule->genmask & nft_genmask_cur(net)) == 0; - } - - static inline int - nft_rule_is_active_next(struct net *net, const struct nft_rule *rule) - { - return (rule->genmask & nft_genmask_next(net)) == 0; - } - - static inline void - nft_rule_activate_next(struct net *net, struct nft_rule *rule) - { - /* Now inactive, will be active in the future */ - rule->genmask = nft_genmask_cur(net); - } - - static inline void - nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) - { - rule->genmask = nft_genmask_next(net); - } - - static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) - { - rule->genmask &= ~nft_genmask_next(net); - } - static int nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) { /* You cannot delete the same rule twice */ - if (nft_rule_is_active_next(ctx->net, rule)) { - nft_rule_deactivate_next(ctx->net, rule); + if (nft_is_active_next(ctx->net, rule)) { + nft_deactivate_next(ctx->net, rule); ctx->chain->use--; return 0; } @@@ -322,9 -270,6 +270,6 @@@ static int nft_delrule_by_chain(struct return 0; }
- /* Internal set flag */ - #define NFT_SET_INACTIVE (1 << 15) - static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, struct nft_set *set) { @@@ -337,7 -282,7 +282,7 @@@ if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { nft_trans_set_id(trans) = ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); - set->flags |= NFT_SET_INACTIVE; + nft_activate_next(ctx->net, set); } nft_trans_set(trans) = set; list_add_tail(&trans->list, &ctx->net->nft.commit_list); @@@ -353,7 -298,7 +298,7 @@@ static int nft_delset(struct nft_ctx *c if (err < 0) return err;
- list_del_rcu(&set->list); + nft_deactivate_next(ctx->net, set); ctx->table->use--;
return err; @@@ -364,26 -309,29 +309,29 @@@ */
static struct nft_table *nft_table_lookup(const struct nft_af_info *afi, - const struct nlattr *nla) + const struct nlattr *nla, + u8 genmask) { struct nft_table *table;
list_for_each_entry(table, &afi->tables, list) { - if (!nla_strcmp(nla, table->name)) + if (!nla_strcmp(nla, table->name) && + nft_active_genmask(table, genmask)) return table; } return NULL; }
static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi, - const struct nlattr *nla) + const struct nlattr *nla, + u8 genmask) { struct nft_table *table;
if (nla == NULL) return ERR_PTR(-EINVAL);
- table = nft_table_lookup(afi, nla); + table = nft_table_lookup(afi, nla, genmask); if (table != NULL) return table;
@@@ -524,6 -472,8 +472,8 @@@ static int nf_tables_dump_tables(struc if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); + if (!nft_is_active(net, table)) + continue; if (nf_tables_fill_table_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@@ -548,6 -498,7 +498,7 @@@ static int nf_tables_gettable(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_cur(net); const struct nft_af_info *afi; const struct nft_table *table; struct sk_buff *skb2; @@@ -565,11 -516,9 +516,9 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); + table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask); if (IS_ERR(table)) return PTR_ERR(table); - if (table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) @@@ -588,17 -537,21 +537,21 @@@ err return err; }
- static int nf_tables_table_enable(const struct nft_af_info *afi, + static int nf_tables_table_enable(struct net *net, + const struct nft_af_info *afi, struct nft_table *table) { struct nft_chain *chain; int err, i = 0;
list_for_each_entry(chain, &table->chains, list) { + if (!nft_is_active_next(net, chain)) + continue; if (!(chain->flags & NFT_BASE_CHAIN)) continue;
- err = nft_register_basechain(nft_base_chain(chain), afi->nops); + err = nf_register_net_hooks(net, nft_base_chain(chain)->ops, + afi->nops); if (err < 0) goto err;
@@@ -607,26 -560,34 +560,34 @@@ return 0; err: list_for_each_entry(chain, &table->chains, list) { + if (!nft_is_active_next(net, chain)) + continue; if (!(chain->flags & NFT_BASE_CHAIN)) continue;
if (i-- <= 0) break;
- nft_unregister_basechain(nft_base_chain(chain), afi->nops); + nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, + afi->nops); } return err; }
- static void nf_tables_table_disable(const struct nft_af_info *afi, + static void nf_tables_table_disable(struct net *net, + const struct nft_af_info *afi, struct nft_table *table) { struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list) { - if (chain->flags & NFT_BASE_CHAIN) - nft_unregister_basechain(nft_base_chain(chain), - afi->nops); + if (!nft_is_active_next(net, chain)) + continue; + if (!(chain->flags & NFT_BASE_CHAIN)) + continue; + + nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, + afi->nops); } }
@@@ -656,7 -617,7 +617,7 @@@ static int nf_tables_updtable(struct nf nft_trans_table_enable(trans) = false; } else if (!(flags & NFT_TABLE_F_DORMANT) && ctx->table->flags & NFT_TABLE_F_DORMANT) { - ret = nf_tables_table_enable(ctx->afi, ctx->table); + ret = nf_tables_table_enable(ctx->net, ctx->afi, ctx->table); if (ret >= 0) { ctx->table->flags &= ~NFT_TABLE_F_DORMANT; nft_trans_table_enable(trans) = true; @@@ -678,6 -639,7 +639,7 @@@ static int nf_tables_newtable(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); const struct nlattr *name; struct nft_af_info *afi; struct nft_table *table; @@@ -691,7 -653,7 +653,7 @@@ return PTR_ERR(afi);
name = nla[NFTA_TABLE_NAME]; - table = nf_tables_table_lookup(afi, name); + table = nf_tables_table_lookup(afi, name, genmask); if (IS_ERR(table)) { if (PTR_ERR(table) != -ENOENT) return PTR_ERR(table); @@@ -699,8 -661,6 +661,6 @@@ }
if (table != NULL) { - if (table->flags & NFT_TABLE_INACTIVE) - return -ENOENT; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) @@@ -752,6 -712,9 +712,9 @@@ static int nft_flush_table(struct nft_c struct nft_set *set, *ns;
list_for_each_entry(chain, &ctx->table->chains, list) { + if (!nft_is_active_next(ctx->net, chain)) + continue; + ctx->chain = chain;
err = nft_delrule_by_chain(ctx); @@@ -760,6 -723,9 +723,9 @@@ }
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { + if (!nft_is_active_next(ctx->net, set)) + continue; + if (set->flags & NFT_SET_ANONYMOUS && !list_empty(&set->bindings)) continue; @@@ -770,6 -736,9 +736,9 @@@ }
list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + if (!nft_is_active_next(ctx->net, chain)) + continue; + ctx->chain = chain;
err = nft_delchain(ctx); @@@ -795,6 -764,9 +764,9 @@@ static int nft_flush(struct nft_ctx *ct
ctx->afi = afi; list_for_each_entry_safe(table, nt, &afi->tables, list) { + if (!nft_is_active_next(ctx->net, table)) + continue; + if (nla[NFTA_TABLE_NAME] && nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0) continue; @@@ -815,6 -787,7 +787,7 @@@ static int nf_tables_deltable(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_af_info *afi; struct nft_table *table; int family = nfmsg->nfgen_family; @@@ -828,7 -801,7 +801,7 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); + table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask); if (IS_ERR(table)) return PTR_ERR(table);
@@@ -875,12 -848,14 +848,14 @@@ EXPORT_SYMBOL_GPL(nft_unregister_chain_ */
static struct nft_chain * - nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle) + nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle, + u8 genmask) { struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list) { - if (chain->handle == handle) + if (chain->handle == handle && + nft_active_genmask(chain, genmask)) return chain; }
@@@ -888,7 -863,8 +863,8 @@@ }
static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table, - const struct nlattr *nla) + const struct nlattr *nla, + u8 genmask) { struct nft_chain *chain;
@@@ -896,7 -872,8 +872,8 @@@ return ERR_PTR(-EINVAL);
list_for_each_entry(chain, &table->chains, list) { - if (!nla_strcmp(nla, chain->name)) + if (!nla_strcmp(nla, chain->name) && + nft_active_genmask(chain, genmask)) return chain; }
@@@ -1079,6 -1056,8 +1056,8 @@@ static int nf_tables_dump_chains(struc if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); + if (!nft_is_active(net, chain)) + continue; if (nf_tables_fill_chain_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@@ -1104,6 -1083,7 +1083,7 @@@ static int nf_tables_getchain(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_cur(net); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; @@@ -1122,17 -1102,13 +1102,13 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table); - if (table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
- chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); + chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); - if (chain->flags & NFT_CHAIN_INACTIVE) - return -ENOENT;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) @@@ -1231,6 -1207,7 +1207,7 @@@ static int nf_tables_newchain(struct ne struct nft_chain *chain; struct nft_base_chain *basechain = NULL; struct nlattr *ha[NFTA_HOOK_MAX + 1]; + u8 genmask = nft_genmask_next(net); int family = nfmsg->nfgen_family; struct net_device *dev = NULL; u8 policy = NF_ACCEPT; @@@ -1247,7 -1224,7 +1224,7 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
@@@ -1256,11 -1233,11 +1233,11 @@@
if (nla[NFTA_CHAIN_HANDLE]) { handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE])); - chain = nf_tables_chain_lookup_byhandle(table, handle); + chain = nf_tables_chain_lookup_byhandle(table, handle, genmask); if (IS_ERR(chain)) return PTR_ERR(chain); } else { - chain = nf_tables_chain_lookup(table, name); + chain = nf_tables_chain_lookup(table, name, genmask); if (IS_ERR(chain)) { if (PTR_ERR(chain) != -ENOENT) return PTR_ERR(chain); @@@ -1291,16 -1268,20 +1268,20 @@@ struct nft_stats *stats = NULL; struct nft_trans *trans;
- if (chain->flags & NFT_CHAIN_INACTIVE) - return -ENOENT; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP;
- if (nla[NFTA_CHAIN_HANDLE] && name && - !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]))) - return -EEXIST; + if (nla[NFTA_CHAIN_HANDLE] && name) { + struct nft_chain *chain2; + + chain2 = nf_tables_chain_lookup(table, + nla[NFTA_CHAIN_NAME], + genmask); + if (IS_ERR(chain2)) + return PTR_ERR(chain2); + }
if (nla[NFTA_CHAIN_COUNTERS]) { if (!(chain->flags & NFT_BASE_CHAIN)) @@@ -1455,7 -1436,7 +1436,7 @@@ chain->table = table; nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
- err = nf_tables_register_hooks(table, chain, afi->nops); + err = nf_tables_register_hooks(net, table, chain, afi->nops); if (err < 0) goto err1;
@@@ -1468,7 -1449,7 +1449,7 @@@ list_add_tail_rcu(&chain->list, &table->chains); return 0; err2: - nf_tables_unregister_hooks(table, chain, afi->nops); + nf_tables_unregister_hooks(net, table, chain, afi->nops); err1: nf_tables_chain_destroy(chain); return err; @@@ -1479,6 -1460,7 +1460,7 @@@ static int nf_tables_delchain(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; @@@ -1489,11 -1471,11 +1471,11 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
- chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); + chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->use > 0) @@@ -1724,11 -1706,9 +1706,11 @@@ struct nft_expr *nft_expr_init(const st
err = nf_tables_newexpr(ctx, &info, expr); if (err < 0) - goto err2; + goto err3;
return expr; +err3: + kfree(expr); err2: module_put(info.ops->type->owner); err1: @@@ -1900,7 -1880,7 +1882,7 @@@ static int nf_tables_dump_rules(struct list_for_each_entry_rcu(table, &afi->tables, list) { list_for_each_entry_rcu(chain, &table->chains, list) { list_for_each_entry_rcu(rule, &chain->rules, list) { - if (!nft_rule_is_active(net, rule)) + if (!nft_is_active(net, rule)) goto cont; if (idx < s_idx) goto cont; @@@ -1933,6 -1913,7 +1915,7 @@@ static int nf_tables_getrule(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_cur(net); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; @@@ -1952,17 -1933,13 +1935,13 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table); - if (table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
- chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); + chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); - if (chain->flags & NFT_CHAIN_INACTIVE) - return -ENOENT;
rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); if (IS_ERR(rule)) @@@ -2011,6 -1988,7 +1990,7 @@@ static int nf_tables_newrule(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; @@@ -2031,11 -2009,11 +2011,11 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
- chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); + chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); if (IS_ERR(chain)) return PTR_ERR(chain);
@@@ -2104,7 -2082,7 +2084,7 @@@ if (rule == NULL) goto err1;
- nft_rule_activate_next(net, rule); + nft_activate_next(net, rule);
rule->handle = handle; rule->dlen = size; @@@ -2126,14 -2104,14 +2106,14 @@@ }
if (nlh->nlmsg_flags & NLM_F_REPLACE) { - if (nft_rule_is_active_next(net, old_rule)) { + if (nft_is_active_next(net, old_rule)) { trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, old_rule); if (trans == NULL) { err = -ENOMEM; goto err2; } - nft_rule_deactivate_next(net, old_rule); + nft_deactivate_next(net, old_rule); chain->use--; list_add_tail_rcu(&rule->list, &old_rule->list); } else { @@@ -2176,6 -2154,7 +2156,7 @@@ static int nf_tables_delrule(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain = NULL; @@@ -2187,12 -2166,13 +2168,13 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
if (nla[NFTA_RULE_CHAIN]) { - chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); + chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], + genmask); if (IS_ERR(chain)) return PTR_ERR(chain); } @@@ -2212,6 -2192,9 +2194,9 @@@ } } else { list_for_each_entry(chain, &table->chains, list) { + if (!nft_is_active_next(net, chain)) + continue; + ctx.chain = chain; err = nft_delrule_by_chain(&ctx); if (err < 0) @@@ -2341,7 -2324,8 +2326,8 @@@ static const struct nla_policy nft_set_ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, const struct sk_buff *skb, const struct nlmsghdr *nlh, - const struct nlattr * const nla[]) + const struct nlattr * const nla[], + u8 genmask) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi = NULL; @@@ -2357,7 -2341,8 +2343,8 @@@ if (afi == NULL) return -EAFNOSUPPORT;
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE], + genmask); if (IS_ERR(table)) return PTR_ERR(table); } @@@ -2367,7 -2352,7 +2354,7 @@@ }
struct nft_set *nf_tables_set_lookup(const struct nft_table *table, - const struct nlattr *nla) + const struct nlattr *nla, u8 genmask) { struct nft_set *set;
@@@ -2375,22 -2360,27 +2362,27 @@@ return ERR_PTR(-EINVAL);
list_for_each_entry(set, &table->sets, list) { - if (!nla_strcmp(nla, set->name)) + if (!nla_strcmp(nla, set->name) && + nft_active_genmask(set, genmask)) return set; } return ERR_PTR(-ENOENT); }
struct nft_set *nf_tables_set_lookup_byid(const struct net *net, - const struct nlattr *nla) + const struct nlattr *nla, + u8 genmask) { struct nft_trans *trans; u32 id = ntohl(nla_get_be32(nla));
list_for_each_entry(trans, &net->nft.commit_list, list) { + struct nft_set *set = nft_trans_set(trans); + if (trans->msg_type == NFT_MSG_NEWSET && - id == nft_trans_set_id(trans)) - return nft_trans_set(trans); + id == nft_trans_set_id(trans) && + nft_active_genmask(set, genmask)) + return set; } return ERR_PTR(-ENOENT); } @@@ -2415,6 -2405,8 +2407,8 @@@ cont list_for_each_entry(i, &ctx->table->sets, list) { int tmp;
+ if (!nft_is_active_next(ctx->net, set)) + continue; if (!sscanf(i->name, name, &tmp)) continue; if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE) @@@ -2434,6 -2426,8 +2428,8 @@@
snprintf(set->name, sizeof(set->name), name, min + n); list_for_each_entry(i, &ctx->table->sets, list) { + if (!nft_is_active_next(ctx->net, i)) + continue; if (!strcmp(set->name, i->name)) return -ENFILE; } @@@ -2582,6 -2576,8 +2578,8 @@@ static int nf_tables_dump_sets(struct s list_for_each_entry_rcu(set, &table->sets, list) { if (idx < s_idx) goto cont; + if (!nft_is_active(net, set)) + goto cont;
ctx_set = *ctx; ctx_set.table = table; @@@ -2618,6 -2614,7 +2616,7 @@@ static int nf_tables_getset(struct net struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + u8 genmask = nft_genmask_cur(net); const struct nft_set *set; struct nft_ctx ctx; struct sk_buff *skb2; @@@ -2625,7 -2622,7 +2624,7 @@@ int err;
/* Verify existence before starting dump */ - err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err;
@@@ -2652,11 -2649,9 +2651,9 @@@ if (!nla[NFTA_SET_TABLE]) return -EINVAL;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask); if (IS_ERR(set)) return PTR_ERR(set); - if (set->flags & NFT_SET_INACTIVE) - return -ENOENT;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) @@@ -2695,6 -2690,7 +2692,7 @@@ static int nf_tables_newset(struct net const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); const struct nft_set_ops *ops; struct nft_af_info *afi; struct nft_table *table; @@@ -2792,13 -2788,13 +2790,13 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
- set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); + set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME], genmask); if (IS_ERR(set)) { if (PTR_ERR(set) != -ENOENT) return PTR_ERR(set); @@@ -2897,6 -2893,7 +2895,7 @@@ static int nf_tables_delset(struct net const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_set *set; struct nft_ctx ctx; int err; @@@ -2906,11 -2903,11 +2905,11 @@@ if (nla[NFTA_SET_TABLE] == NULL) return -EINVAL;
- err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask); if (IS_ERR(set)) return PTR_ERR(set); if (!list_empty(&set->bindings)) @@@ -2975,7 -2972,7 +2974,7 @@@ void nf_tables_unbind_set(const struct list_del_rcu(&binding->list);
if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && - !(set->flags & NFT_SET_INACTIVE)) + nft_is_active(ctx->net, set)) nf_tables_set_destroy(ctx, set); }
@@@ -3031,7 -3028,8 +3030,8 @@@ static const struct nla_policy nft_set_ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net, const struct sk_buff *skb, const struct nlmsghdr *nlh, - const struct nlattr * const nla[]) + const struct nlattr * const nla[], + u8 genmask) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; @@@ -3041,7 -3039,8 +3041,8 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE], + genmask); if (IS_ERR(table)) return PTR_ERR(table);
@@@ -3138,6 -3137,7 +3139,7 @@@ static int nf_tables_dump_setelem(cons static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); + u8 genmask = nft_genmask_cur(net); const struct nft_set *set; struct nft_set_dump_args args; struct nft_ctx ctx; @@@ -3154,17 -3154,14 +3156,14 @@@ return err;
err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, - (void *)nla); + (void *)nla, genmask); if (err < 0) return err; - if (ctx.table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + genmask); if (IS_ERR(set)) return PTR_ERR(set); - if (set->flags & NFT_SET_INACTIVE) - return -ENOENT;
event = NFT_MSG_NEWSETELEM; event |= NFNL_SUBSYS_NFTABLES << 8; @@@ -3218,21 -3215,19 +3217,19 @@@ static int nf_tables_getsetelem(struct struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + u8 genmask = nft_genmask_cur(net); const struct nft_set *set; struct nft_ctx ctx; int err;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err; - if (ctx.table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + genmask); if (IS_ERR(set)) return PTR_ERR(set); - if (set->flags & NFT_SET_INACTIVE) - return -ENOENT;
if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { @@@ -3550,6 -3545,7 +3547,7 @@@ static int nf_tables_newsetelem(struct struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + u8 genmask = nft_genmask_next(net); const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; @@@ -3558,15 -3554,17 +3556,17 @@@ if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + genmask); if (IS_ERR(set)) { if (nla[NFTA_SET_ELEM_LIST_SET_ID]) { set = nf_tables_set_lookup_byid(net, - nla[NFTA_SET_ELEM_LIST_SET_ID]); + nla[NFTA_SET_ELEM_LIST_SET_ID], + genmask); } if (IS_ERR(set)) return PTR_ERR(set); @@@ -3672,6 -3670,7 +3672,7 @@@ static int nf_tables_delsetelem(struct struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + u8 genmask = nft_genmask_next(net); const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; @@@ -3680,11 -3679,12 +3681,12 @@@ if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + genmask); if (IS_ERR(set)) return PTR_ERR(set); if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) @@@ -3952,36 -3952,40 +3954,40 @@@ static int nf_tables_commit(struct net case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { if (!nft_trans_table_enable(trans)) { - nf_tables_table_disable(trans->ctx.afi, + nf_tables_table_disable(net, + trans->ctx.afi, trans->ctx.table); trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; } } else { - trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE; + nft_clear(net, trans->ctx.table); } nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE); nft_trans_destroy(trans); break; case NFT_MSG_DELTABLE: + list_del_rcu(&trans->ctx.table->list); nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) nft_chain_commit_update(trans); else - trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE; + nft_clear(net, trans->ctx.chain);
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); nft_trans_destroy(trans); break; case NFT_MSG_DELCHAIN: + list_del_rcu(&trans->ctx.chain->list); nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN); - nf_tables_unregister_hooks(trans->ctx.table, + nf_tables_unregister_hooks(trans->ctx.net, + trans->ctx.table, trans->ctx.chain, trans->ctx.afi->nops); break; case NFT_MSG_NEWRULE: - nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); + nft_clear(trans->ctx.net, nft_trans_rule(trans)); nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_NEWRULE); @@@ -3994,7 -3998,7 +4000,7 @@@ NFT_MSG_DELRULE); break; case NFT_MSG_NEWSET: - nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE; + nft_clear(net, nft_trans_set(trans)); /* This avoids hitting -EBUSY when deleting the table * from the transaction. */ @@@ -4007,6 -4011,7 +4013,7 @@@ nft_trans_destroy(trans); break; case NFT_MSG_DELSET: + list_del_rcu(&nft_trans_set(trans)->list); nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_DELSET, GFP_KERNEL); break; @@@ -4078,7 -4083,8 +4085,8 @@@ static int nf_tables_abort(struct net * case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { if (nft_trans_table_enable(trans)) { - nf_tables_table_disable(trans->ctx.afi, + nf_tables_table_disable(net, + trans->ctx.afi, trans->ctx.table); trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; } @@@ -4088,8 -4094,7 +4096,7 @@@ } break; case NFT_MSG_DELTABLE: - list_add_tail_rcu(&trans->ctx.table->list, - &trans->ctx.afi->tables); + nft_clear(trans->ctx.net, trans->ctx.table); nft_trans_destroy(trans); break; case NFT_MSG_NEWCHAIN: @@@ -4100,15 -4105,15 +4107,15 @@@ } else { trans->ctx.table->use--; list_del_rcu(&trans->ctx.chain->list); - nf_tables_unregister_hooks(trans->ctx.table, + nf_tables_unregister_hooks(trans->ctx.net, + trans->ctx.table, trans->ctx.chain, trans->ctx.afi->nops); } break; case NFT_MSG_DELCHAIN: trans->ctx.table->use++; - list_add_tail_rcu(&trans->ctx.chain->list, - &trans->ctx.table->chains); + nft_clear(trans->ctx.net, trans->ctx.chain); nft_trans_destroy(trans); break; case NFT_MSG_NEWRULE: @@@ -4117,7 -4122,7 +4124,7 @@@ break; case NFT_MSG_DELRULE: trans->ctx.chain->use++; - nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); + nft_clear(trans->ctx.net, nft_trans_rule(trans)); nft_trans_destroy(trans); break; case NFT_MSG_NEWSET: @@@ -4126,8 -4131,7 +4133,7 @@@ break; case NFT_MSG_DELSET: trans->ctx.table->use++; - list_add_tail_rcu(&nft_trans_set(trans)->list, - &trans->ctx.table->sets); + nft_clear(trans->ctx.net, nft_trans_set(trans)); nft_trans_destroy(trans); break; case NFT_MSG_NEWSETELEM: @@@ -4274,6 -4278,8 +4280,8 @@@ static int nf_tables_check_loops(const }
list_for_each_entry(set, &ctx->table->sets, list) { + if (!nft_is_active_next(ctx->net, set)) + continue; if (!(set->flags & NFT_SET_MAP) || set->dtype != NFT_DATA_VERDICT) continue; @@@ -4432,6 -4438,7 +4440,7 @@@ static const struct nla_policy nft_verd static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { + u8 genmask = nft_genmask_next(ctx->net); struct nlattr *tb[NFTA_VERDICT_MAX + 1]; struct nft_chain *chain; int err; @@@ -4464,7 -4471,7 +4473,7 @@@ if (!tb[NFTA_VERDICT_CHAIN]) return -EINVAL; chain = nf_tables_chain_lookup(ctx->table, - tb[NFTA_VERDICT_CHAIN]); + tb[NFTA_VERDICT_CHAIN], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_BASE_CHAIN) @@@ -4642,7 -4649,7 +4651,7 @@@ int __nft_release_basechain(struct nft_
BUG_ON(!(ctx->chain->flags & NFT_BASE_CHAIN));
- nf_tables_unregister_hooks(ctx->chain->table, ctx->chain, + nf_tables_unregister_hooks(ctx->net, ctx->chain->table, ctx->chain, ctx->afi->nops); list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { list_del(&rule->list); @@@ -4671,7 -4678,8 +4680,8 @@@ static void __nft_release_afinfo(struc
list_for_each_entry_safe(table, nt, &afi->tables, list) { list_for_each_entry(chain, &table->chains, list) - nf_tables_unregister_hooks(table, chain, afi->nops); + nf_tables_unregister_hooks(net, table, chain, + afi->nops); /* No packets are walking on these chains anymore. */ ctx.table = table; list_for_each_entry(chain, &table->chains, list) { diff --combined net/netfilter/nft_meta.c index f4bad9d,03e5e33..2863f34 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@@ -199,13 -199,6 +199,6 @@@ err } EXPORT_SYMBOL_GPL(nft_meta_get_eval);
- /* don't change or set _LOOPBACK, _USER, etc. */ - static bool pkt_type_ok(u32 p) - { - return p == PACKET_HOST || p == PACKET_BROADCAST || - p == PACKET_MULTICAST || p == PACKET_OTHERHOST; - } - void nft_meta_set_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@@ -223,11 -216,11 +216,11 @@@ break; case NFT_META_PKTTYPE: if (skb->pkt_type != value && - pkt_type_ok(value) && pkt_type_ok(skb->pkt_type)) + skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type)) skb->pkt_type = value; break; case NFT_META_NFTRACE: - skb->nf_trace = 1; + skb->nf_trace = !!value; break; default: WARN_ON(1); diff --combined net/packet/af_packet.c index b43c401,9d92c4c..33a4697 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@@ -1588,13 -1588,9 +1588,9 @@@ static int fanout_set_data_ebpf(struct if (copy_from_user(&fd, data, len)) return -EFAULT;
- new = bpf_prog_get(fd); + new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(new)) return PTR_ERR(new); - if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) { - bpf_prog_put(new); - return -EINVAL; - }
__fanout_set_data_bpf(po->fanout, new); return 0; @@@ -1927,11 -1923,13 +1923,11 @@@ retry goto out_unlock; }
- sockc.tsflags = 0; + sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); - if (unlikely(err)) { - err = -EINVAL; + if (unlikely(err)) goto out_unlock; - } }
skb->protocol = proto; @@@ -1977,40 -1975,8 +1973,8 @@@ static int __packet_rcv_vnet(const stru { *vnet_hdr = (const struct virtio_net_hdr) { 0 };
- if (skb_is_gso(skb)) { - struct skb_shared_info *sinfo = skb_shinfo(skb); - - /* This is a hint as to how much should be linear. */ - vnet_hdr->hdr_len = - __cpu_to_virtio16(vio_le(), skb_headlen(skb)); - vnet_hdr->gso_size = - __cpu_to_virtio16(vio_le(), sinfo->gso_size); - - if (sinfo->gso_type & SKB_GSO_TCPV4) - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; - else if (sinfo->gso_type & SKB_GSO_TCPV6) - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (sinfo->gso_type & SKB_GSO_UDP) - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; - else if (sinfo->gso_type & SKB_GSO_FCOE) - return -EINVAL; - else - BUG(); - - if (sinfo->gso_type & SKB_GSO_TCP_ECN) - vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; - } else - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - vnet_hdr->csum_start = __cpu_to_virtio16(vio_le(), - skb_checksum_start_offset(skb)); - vnet_hdr->csum_offset = __cpu_to_virtio16(vio_le(), - skb->csum_offset); - } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { - vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; - } /* else everything is zero */ + if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le())) + BUG();
return 0; } @@@ -2676,7 -2642,7 +2640,7 @@@ static int tpacket_snd(struct packet_so dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); }
- sockc.tsflags = 0; + sockc.tsflags = po->sk.sk_tsflags; if (msg->msg_controllen) { err = sock_cmsg_send(&po->sk, msg, &sockc); if (unlikely(err)) @@@ -2879,7 -2845,7 +2843,7 @@@ static int packet_snd(struct socket *so if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock;
- sockc.tsflags = 0; + sockc.tsflags = sk->sk_tsflags; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); diff --combined net/sched/sch_htb.c index 052f84d,91982d9..53dbfa1 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@@ -117,7 -117,6 +117,6 @@@ struct htb_class * Written often fields */ struct gnet_stats_basic_packed bstats; - struct gnet_stats_queue qstats; struct tc_htb_xstats xstats; /* our special stats */
/* token bucket parameters */ @@@ -140,6 -139,8 +139,8 @@@ enum htb_cmode cmode; /* current mode of the class */ struct rb_node pq_node; /* node for event queue */ struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ + + unsigned int drops ____cacheline_aligned_in_smp; };
struct htb_level { @@@ -569,7 -570,8 +570,8 @@@ static inline void htb_deactivate(struc list_del_init(&cl->un.leaf.drop_list); }
- static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) + static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { int uninitialized_var(ret); struct htb_sched *q = qdisc_priv(sch); @@@ -581,19 -583,20 +583,20 @@@ __skb_queue_tail(&q->direct_queue, skb); q->direct_pkts++; } else { - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { + } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q, + to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); - cl->qstats.drops++; + cl->drops++; } return ret; } else { @@@ -889,7 -892,6 +892,6 @@@ static struct sk_buff *htb_dequeue(stru if (skb != NULL) { ok: qdisc_bstats_update(sch, skb); - qdisc_unthrottled(sch); qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; return skb; @@@ -929,38 -931,13 +931,13 @@@ } qdisc_qstats_overlimit(sch); if (likely(next_event > q->now)) - qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true); + qdisc_watchdog_schedule_ns(&q->watchdog, next_event); else schedule_work(&q->work); fin: return skb; }
- /* try to drop from each class (by prio) until one succeed */ - static unsigned int htb_drop(struct Qdisc *sch) - { - struct htb_sched *q = qdisc_priv(sch); - int prio; - - for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) { - struct list_head *p; - list_for_each(p, q->drops + prio) { - struct htb_class *cl = list_entry(p, struct htb_class, - un.leaf.drop_list); - unsigned int len; - if (cl->un.leaf.q->ops->drop && - (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { - sch->qstats.backlog -= len; - sch->q.qlen--; - if (!cl->un.leaf.q->q.qlen) - htb_deactivate(q, cl); - return len; - } - } - } - return 0; - } - /* reset all classes */ /* always caled under BH & queue lock */ static void htb_reset(struct Qdisc *sch) @@@ -983,7 -960,7 +960,7 @@@ } } qdisc_watchdog_cancel(&q->watchdog); - __skb_queue_purge(&q->direct_queue); + __qdisc_reset_queue(&q->direct_queue); sch->q.qlen = 0; sch->qstats.backlog = 0; memset(q->hlevel, 0, sizeof(q->hlevel)); @@@ -1136,18 -1113,22 +1113,24 @@@ static in htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct htb_class *cl = (struct htb_class *)arg; + struct gnet_stats_queue qs = { + .drops = cl->drops, + }; __u32 qlen = 0;
- if (!cl->level && cl->un.leaf.q) + if (!cl->level && cl->un.leaf.q) { qlen = cl->un.leaf.q->q.qlen; + qs.backlog = cl->un.leaf.q->qstats.backlog; + } - cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); - cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens); + cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), + INT_MIN, INT_MAX); + cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), + INT_MIN, INT_MAX);
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), + d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) + gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); @@@ -1260,7 -1241,7 +1243,7 @@@ static void htb_destroy(struct Qdisc *s htb_destroy_class(sch, cl); } qdisc_class_hash_destroy(&q->clhash); - __skb_queue_purge(&q->direct_queue); + __qdisc_reset_queue(&q->direct_queue); }
static int htb_delete(struct Qdisc *sch, unsigned long arg) @@@ -1399,7 -1380,8 +1382,8 @@@ static int htb_change_class(struct Qdis if (htb_rate_est || tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, - qdisc_root_sleeping_lock(sch), + NULL, + qdisc_root_sleeping_running(sch), tca[TCA_RATE] ? : &est.nla); if (err) { kfree(cl); @@@ -1461,11 -1443,10 +1445,10 @@@ parent->children++; } else { if (tca[TCA_RATE]) { - spinlock_t *lock = qdisc_root_sleeping_lock(sch); - err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, - lock, + NULL, + qdisc_root_sleeping_running(sch), tca[TCA_RATE]); if (err) return err; @@@ -1603,7 -1584,6 +1586,6 @@@ static struct Qdisc_ops htb_qdisc_ops _ .enqueue = htb_enqueue, .dequeue = htb_dequeue, .peek = qdisc_peek_dequeued, - .drop = htb_drop, .init = htb_init, .reset = htb_reset, .destroy = htb_destroy, diff --combined net/tipc/bearer.c index a597708,8584cc4..4131d5a --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@@ -1,7 -1,7 +1,7 @@@ /* * net/tipc/bearer.c: TIPC bearer code * - * Copyright (c) 1996-2006, 2013-2014, Ericsson AB + * Copyright (c) 1996-2006, 2013-2016, Ericsson AB * Copyright (c) 2004-2006, 2010-2013, Wind River Systems * All rights reserved. * @@@ -39,6 -39,7 +39,7 @@@ #include "bearer.h" #include "link.h" #include "discover.h" + #include "monitor.h" #include "bcast.h" #include "netlink.h"
@@@ -313,6 -314,10 +314,10 @@@ restart rcu_assign_pointer(tn->bearer_list[bearer_id], b); if (skb) tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); + + if (tipc_mon_create(net, bearer_id)) + return -ENOMEM; + pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", name, tipc_addr_string_fill(addr_string, disc_domain), priority); @@@ -330,21 -335,6 +335,21 @@@ static int tipc_reset_bearer(struct ne return 0; }
+/* tipc_bearer_reset_all - reset all links on all bearers + */ +void tipc_bearer_reset_all(struct net *net) +{ + struct tipc_net *tn = tipc_net(net); + struct tipc_bearer *b; + int i; + + for (i = 0; i < MAX_BEARERS; i++) { + b = rcu_dereference_rtnl(tn->bearer_list[i]); + if (b) + tipc_reset_bearer(net, b); + } +} + /** * bearer_disable * @@@ -363,6 -353,7 +368,7 @@@ static void bearer_disable(struct net * tipc_disc_delete(b->link_req); RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL); kfree_rcu(b, rcu); + tipc_mon_delete(net, bearer_id); }
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, diff --combined net/tipc/bearer.h index 60e49c3,0d337c7..f1e6db5 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@@ -1,7 -1,7 +1,7 @@@ /* * net/tipc/bearer.h: Include file for TIPC bearer code * - * Copyright (c) 1996-2006, 2013-2014, Ericsson AB + * Copyright (c) 1996-2006, 2013-2016, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@@ -198,7 -198,6 +198,7 @@@ void tipc_bearer_add_dest(struct net *n void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest); struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name); struct tipc_media *tipc_media_find(const char *name); +void tipc_bearer_reset_all(struct net *net); int tipc_bearer_setup(void); void tipc_bearer_cleanup(void); void tipc_bearer_stop(struct net *net); diff --combined net/tipc/link.c index 7d89f87,c1df33f..877d94f --- a/net/tipc/link.c +++ b/net/tipc/link.c @@@ -42,6 -42,7 +42,7 @@@ #include "name_distr.h" #include "discover.h" #include "netlink.h" + #include "monitor.h"
#include <linux/pkt_sched.h>
@@@ -87,7 -88,6 +88,6 @@@ struct tipc_stats * @peer_bearer_id: bearer id used by link's peer endpoint * @bearer_id: local bearer id used by link * @tolerance: minimum link continuity loss needed to reset link [in ms] - * @keepalive_intv: link keepalive timer interval * @abort_limit: # of unacknowledged continuity probes needed to reset link * @state: current state of link FSM * @peer_caps: bitmap describing capabilities of peer node @@@ -96,6 -96,7 +96,7 @@@ * @pmsg: convenience pointer to "proto_msg" field * @priority: current link priority * @net_plane: current link network plane ('A' through 'H') + * @mon_state: cookie with information needed by link monitor * @backlog_limit: backlog queue congestion thresholds (indexed by importance) * @exp_msg_count: # of tunnelled messages expected during link changeover * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset @@@ -131,7 -132,6 +132,6 @@@ struct tipc_link u32 peer_bearer_id; u32 bearer_id; u32 tolerance; - unsigned long keepalive_intv; u32 abort_limit; u32 state; u16 peer_caps; @@@ -140,6 -140,7 +140,7 @@@ char if_name[TIPC_MAX_IF_NAME]; u32 priority; char net_plane; + struct tipc_mon_state mon_state; u16 rst_cnt;
/* Failover/synch */ @@@ -349,8 -350,6 +350,8 @@@ void tipc_link_remove_bc_peer(struct ti u16 ack = snd_l->snd_nxt - 1;
snd_l->ackers--; + rcv_l->bc_peer_is_up = true; + rcv_l->state = LINK_ESTABLISHED; tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); tipc_link_reset(rcv_l); rcv_l->state = LINK_RESET; @@@ -713,18 -712,25 +714,25 @@@ int tipc_link_timeout(struct tipc_link bool setup = false; u16 bc_snt = l->bc_sndlink->snd_nxt - 1; u16 bc_acked = l->bc_rcvlink->acked; - - link_profile_stats(l); + struct tipc_mon_state *mstate = &l->mon_state;
switch (l->state) { case LINK_ESTABLISHED: case LINK_SYNCHING: - if (l->silent_intv_cnt > l->abort_limit) - return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); mtyp = STATE_MSG; + link_profile_stats(l); + tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); + if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) + return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); state = bc_acked != bc_snt; - probe = l->silent_intv_cnt; - l->silent_intv_cnt++; + state |= l->bc_rcvlink->rcv_unacked; + state |= l->rcv_unacked; + state |= !skb_queue_empty(&l->transmq); + state |= !skb_queue_empty(&l->deferdq); + probe = mstate->probing; + probe |= l->silent_intv_cnt; + if (probe || mstate->monitoring) + l->silent_intv_cnt++; break; case LINK_RESET: setup = l->rst_cnt++ <= 4; @@@ -835,6 -841,7 +843,7 @@@ void tipc_link_reset(struct tipc_link * l->stats.recv_info = 0; l->stale_count = 0; l->bc_peer_is_up = false; + memset(&l->mon_state, 0, sizeof(l->mon_state)); tipc_link_reset_stats(l); }
@@@ -1243,6 -1250,9 +1252,9 @@@ static void tipc_link_build_proto_msg(s struct tipc_msg *hdr; struct sk_buff_head *dfq = &l->deferdq; bool node_up = link_is_up(l->bc_rcvlink); + struct tipc_mon_state *mstate = &l->mon_state; + int dlen = 0; + void *data;
/* Don't send protocol message during reset or link failover */ if (tipc_link_is_blocked(l)) @@@ -1255,12 -1265,13 +1267,13 @@@ rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, - TIPC_MAX_IF_NAME, l->addr, + tipc_max_domain_size, l->addr, tipc_own_addr(l->net), 0, 0, 0); if (!skb) return;
hdr = buf_msg(skb); + data = msg_data(hdr); msg_set_session(hdr, l->session); msg_set_bearer_id(hdr, l->bearer_id); msg_set_net_plane(hdr, l->net_plane); @@@ -1276,14 -1287,18 +1289,18 @@@
if (mtyp == STATE_MSG) { msg_set_seq_gap(hdr, rcvgap); - msg_set_size(hdr, INT_H_SIZE); msg_set_probe(hdr, probe); + tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); + msg_set_size(hdr, INT_H_SIZE + dlen); + skb_trim(skb, INT_H_SIZE + dlen); l->stats.sent_states++; l->rcv_unacked = 0; } else { /* RESET_MSG or ACTIVATE_MSG */ msg_set_max_pkt(hdr, l->advertised_mtu); - strcpy(msg_data(hdr), l->if_name); + strcpy(data, l->if_name); + msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); + skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); } if (probe) l->stats.sent_probes++; @@@ -1376,7 -1391,9 +1393,9 @@@ static int tipc_link_proto_rcv(struct t u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); u16 rcv_nxt = l->rcv_nxt; + u16 dlen = msg_data_sz(hdr); int mtyp = msg_type(hdr); + void *data; char *if_name; int rc = 0;
@@@ -1386,6 -1403,10 +1405,10 @@@ if (tipc_own_addr(l->net) > msg_prevnode(hdr)) l->net_plane = msg_net_plane(hdr);
+ skb_linearize(skb); + hdr = buf_msg(skb); + data = msg_data(hdr); + switch (mtyp) { case RESET_MSG:
@@@ -1396,8 -1417,6 +1419,6 @@@ /* fall thru' */
case ACTIVATE_MSG: - skb_linearize(skb); - hdr = buf_msg(skb);
/* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; @@@ -1405,7 -1424,7 +1426,7 @@@ break; if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) break; - strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); + strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) @@@ -1453,6 -1472,8 +1474,8 @@@ rc = TIPC_LINK_UP_EVT; break; } + tipc_mon_rcv(l->net, data, dlen, l->addr, + &l->mon_state, l->bearer_id);
/* Send NACK if peer has sent pkts we haven't received yet */ if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) @@@ -1561,12 -1582,7 +1584,12 @@@ void tipc_link_bc_sync_rcv(struct tipc_ if (!msg_peer_node_is_up(hdr)) return;
- l->bc_peer_is_up = true; + /* Open when peer ackowledges our bcast init msg (pkt #1) */ + if (msg_ack(hdr)) + l->bc_peer_is_up = true; + + if (!l->bc_peer_is_up) + return;
/* Ignore if peers_snd_nxt goes beyond receive window */ if (more(peers_snd_nxt, l->rcv_nxt + l->window)) diff --combined net/tipc/node.c index 23d4761,a3fc0a3..95cc78b --- a/net/tipc/node.c +++ b/net/tipc/node.c @@@ -40,6 -40,7 +40,7 @@@ #include "name_distr.h" #include "socket.h" #include "bcast.h" + #include "monitor.h" #include "discover.h" #include "netlink.h"
@@@ -205,17 -206,6 +206,6 @@@ u16 tipc_node_get_capabilities(struct n return caps; }
- /* - * A trivial power-of-two bitmask technique is used for speed, since this - * operation is done for every incoming TIPC packet. The number of hash table - * entries has been chosen so that no hash chain exceeds 8 nodes and will - * usually be much smaller (typically only a single node). - */ - static unsigned int tipc_hashfn(u32 addr) - { - return addr & (NODE_HTABLE_SIZE - 1); - } - static void tipc_node_kref_release(struct kref *kref) { struct tipc_node *n = container_of(kref, struct tipc_node, kref); @@@ -279,6 -269,7 +269,7 @@@ static void tipc_node_write_unlock(stru u32 addr = 0; u32 flags = n->action_flags; u32 link_id = 0; + u32 bearer_id; struct list_head *publ_list;
if (likely(!flags)) { @@@ -288,6 -279,7 +279,7 @@@
addr = n->addr; link_id = n->link_id; + bearer_id = link_id & 0xffff; publ_list = &n->publ_list;
n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | @@@ -301,13 -293,16 +293,16 @@@ if (flags & TIPC_NOTIFY_NODE_UP) tipc_named_node_up(net, addr);
- if (flags & TIPC_NOTIFY_LINK_UP) + if (flags & TIPC_NOTIFY_LINK_UP) { + tipc_mon_peer_up(net, addr, bearer_id); tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, link_id, addr); - - if (flags & TIPC_NOTIFY_LINK_DOWN) + } + if (flags & TIPC_NOTIFY_LINK_DOWN) { + tipc_mon_peer_down(net, addr, bearer_id); tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, link_id, addr); + } }
struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) @@@ -378,14 -373,13 +373,13 @@@ static void tipc_node_calculate_timer(s { unsigned long tol = tipc_link_tolerance(l); unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; - unsigned long keepalive_intv = msecs_to_jiffies(intv);
/* Link with lowest tolerance determines timer interval */ - if (keepalive_intv < n->keepalive_intv) - n->keepalive_intv = keepalive_intv; + if (intv < n->keepalive_intv) + n->keepalive_intv = intv;
- /* Ensure link's abort limit corresponds to current interval */ - tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv)); + /* Ensure link's abort limit corresponds to current tolerance */ + tipc_link_set_abort_limit(l, tol / n->keepalive_intv); }
static void tipc_node_delete(struct tipc_node *node) @@@ -526,7 -520,7 +520,7 @@@ static void tipc_node_timeout(unsigned if (rc & TIPC_LINK_DOWN_EVT) tipc_node_link_down(n, bearer_id, false); } - mod_timer(&n->timer, jiffies + n->keepalive_intv); + mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); }
/** @@@ -692,6 -686,7 +686,7 @@@ static void tipc_node_link_down(struct struct tipc_link *l = le->link; struct tipc_media_addr *maddr; struct sk_buff_head xmitq; + int old_bearer_id = bearer_id;
if (!l) return; @@@ -711,6 -706,8 +706,8 @@@ tipc_link_fsm_evt(l, LINK_RESET_EVT); } tipc_node_write_unlock(n); + if (delete) + tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); tipc_sk_rcv(n->net, &le->inputq); } @@@ -735,6 -732,7 +732,7 @@@ void tipc_node_check_dest(struct net *n bool accept_addr = false; bool reset = true; char *if_name; + unsigned long intv;
*dupl_addr = false; *respond = false; @@@ -840,9 -838,11 +838,11 @@@ le->link = l; n->link_cnt++; tipc_node_calculate_timer(n, l); - if (n->link_cnt == 1) - if (!mod_timer(&n->timer, jiffies + n->keepalive_intv)) + if (n->link_cnt == 1) { + intv = jiffies + msecs_to_jiffies(n->keepalive_intv); + if (!mod_timer(&n->timer, intv)) tipc_node_get(n); + } } memcpy(&le->maddr, maddr, sizeof(*maddr)); exit: @@@ -950,7 -950,7 +950,7 @@@ static void tipc_node_fsm_evt(struct ti state = SELF_UP_PEER_UP; break; case SELF_LOST_CONTACT_EVT: - state = SELF_DOWN_PEER_LEAVING; + state = SELF_DOWN_PEER_DOWN; break; case SELF_ESTABL_CONTACT_EVT: case PEER_LOST_CONTACT_EVT: @@@ -969,7 -969,7 +969,7 @@@ state = SELF_UP_PEER_UP; break; case PEER_LOST_CONTACT_EVT: - state = SELF_LEAVING_PEER_DOWN; + state = SELF_DOWN_PEER_DOWN; break; case SELF_LOST_CONTACT_EVT: case PEER_ESTABL_CONTACT_EVT: @@@ -1297,6 -1297,10 +1297,6 @@@ static void tipc_node_bc_rcv(struct ne
rc = tipc_bcast_rcv(net, be->link, skb);
- /* Broadcast link reset may happen at reassembly failure */ - if (rc & TIPC_LINK_DOWN_EVT) - tipc_node_reset_links(n); - /* Broadcast ACKs are sent on a unicast link */ if (rc & TIPC_LINK_SND_BC_ACK) { tipc_node_read_lock(n); @@@ -1316,17 -1320,6 +1316,17 @@@ spin_unlock_bh(&be->inputq2.lock); tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2); } + + if (rc & TIPC_LINK_DOWN_EVT) { + /* Reception reassembly failure => reset all links to peer */ + if (!tipc_link_is_up(be->link)) + tipc_node_reset_links(n); + + /* Retransmission failure => reset all links to all peers */ + if (!tipc_link_is_up(tipc_bc_sndlink(net))) + tipc_bearer_reset_all(net); + } + tipc_node_put(n); }
diff --combined net/wireless/nl80211.c index 7d72283,5782f71..46417f9 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -167,6 -167,7 +167,7 @@@ __cfg80211_rdev_from_attrs(struct net *
if (attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); + netdev = __dev_get_by_index(netns, ifindex); if (netdev) { if (netdev->ieee80211_ptr) @@@ -404,6 -405,10 +405,10 @@@ static const struct nla_policy nl80211_ [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 }, + [NL80211_ATTR_MU_MIMO_GROUP_DATA] = { + .len = VHT_MUMIMO_GROUPS_DATA_LEN + }, + [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN }, };
/* policy for the key attributes */ @@@ -731,6 -736,7 +736,7 @@@ static int nl80211_parse_key_new(struc
if (tb[NL80211_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; + err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, tb[NL80211_KEY_DEFAULT_TYPES], nl80211_key_default_policy); @@@ -1264,7 -1270,7 +1270,7 @@@ nl80211_send_mgmt_stypes(struct sk_buf struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; - long split_start, band_start, chan_start; + long split_start, band_start, chan_start, capa_start; bool split; };
@@@ -1382,6 -1388,7 +1388,7 @@@ static int nl80211_send_wiphy(struct cf rdev->ops->get_antenna) { u32 tx_ant = 0, rx_ant = 0; int res; + res = rdev_get_antenna(rdev, &tx_ant, &rx_ant); if (!res) { if (nla_put_u32(msg, @@@ -1761,6 -1768,47 +1768,47 @@@ nla_nest_end(msg, nested); }
+ state->split_start++; + break; + case 13: + if (rdev->wiphy.num_iftype_ext_capab && + rdev->wiphy.iftype_ext_capab) { + struct nlattr *nested_ext_capab, *nested; + + nested = nla_nest_start(msg, + NL80211_ATTR_IFTYPE_EXT_CAPA); + if (!nested) + goto nla_put_failure; + + for (i = state->capa_start; + i < rdev->wiphy.num_iftype_ext_capab; i++) { + const struct wiphy_iftype_ext_capab *capab; + + capab = &rdev->wiphy.iftype_ext_capab[i]; + + nested_ext_capab = nla_nest_start(msg, i); + if (!nested_ext_capab || + nla_put_u32(msg, NL80211_ATTR_IFTYPE, + capab->iftype) || + nla_put(msg, NL80211_ATTR_EXT_CAPA, + capab->extended_capabilities_len, + capab->extended_capabilities) || + nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, + capab->extended_capabilities_len, + capab->extended_capabilities_mask)) + goto nla_put_failure; + + nla_nest_end(msg, nested_ext_capab); + if (state->split) + break; + } + nla_nest_end(msg, nested); + if (i < rdev->wiphy.num_iftype_ext_capab) { + state->capa_start = i + 1; + break; + } + } + /* done */ state->split_start = 0; break; @@@ -2116,7 -2164,6 +2164,6 @@@ static int nl80211_set_wds_peer(struct return rdev_set_wds_peer(rdev, dev, bssid); }
- static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; @@@ -2251,6 -2298,7 +2298,7 @@@ if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] && info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) { u32 tx_ant, rx_ant; + if ((!rdev->wiphy.available_antennas_tx && !rdev->wiphy.available_antennas_rx) || !rdev->ops->set_antenna) @@@ -2651,6 -2699,38 +2699,38 @@@ static int nl80211_set_interface(struc change = true; }
+ if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) { + const u8 *mumimo_groups; + u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; + + if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) + return -EOPNOTSUPP; + + mumimo_groups = + nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]); + + /* bits 0 and 63 are reserved and must be zero */ + if ((mumimo_groups[0] & BIT(7)) || + (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0))) + return -EINVAL; + + memcpy(params.vht_mumimo_groups, mumimo_groups, + VHT_MUMIMO_GROUPS_DATA_LEN); + change = true; + } + + if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) { + u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; + + if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) + return -EOPNOTSUPP; + + nla_memcpy(params.macaddr, + info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR], + ETH_ALEN); + change = true; + } + if (flags && (*flags & MONITOR_FLAG_ACTIVE) && !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) return -EOPNOTSUPP; @@@ -2919,6 -2999,7 +2999,7 @@@ static int nl80211_get_key(struct sk_bu pairwise = !!mac_addr; if (info->attrs[NL80211_ATTR_KEY_TYPE]) { u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); + if (kt >= NUM_NL80211_KEYTYPES) return -EINVAL; if (kt != NL80211_KEYTYPE_GROUP && @@@ -3487,16 -3568,16 +3568,16 @@@ static int nl80211_start_ap(struct sk_b params.smps_mode = NL80211_SMPS_OFF; }
+ params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); + if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_ACL_POLICY]) { params.acl = parse_acl_data(&rdev->wiphy, info); if (IS_ERR(params.acl)) return PTR_ERR(params.acl); }
- params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); - if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) - return -EOPNOTSUPP; - wdev_lock(wdev); err = rdev_start_ap(rdev, dev, ¶ms); if (!err) { @@@ -3962,7 -4043,6 +4043,6 @@@ static int nl80211_dump_station(struct sta_idx++; }
- out: cb->args[2] = sta_idx; err = skb->len; @@@ -4366,6 -4446,12 +4446,12 @@@ static int nl80211_set_station(struct s nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); if (params.plink_state >= NUM_NL80211_PLINK_STATES) return -EINVAL; + if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) { + params.peer_aid = nla_get_u16( + info->attrs[NL80211_ATTR_MESH_PEER_AID]); + if (params.peer_aid > IEEE80211_MAX_AID) + return -EINVAL; + } params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE; }
@@@ -4763,7 -4849,6 +4849,6 @@@ static int nl80211_dump_mpath(struct sk path_idx++; }
- out: cb->args[2] = path_idx; err = skb->len; @@@ -5053,7 -5138,6 +5138,6 @@@ static int nl80211_req_set_reg(struct s enum nl80211_user_reg_hint_type user_reg_hint_type; u32 owner_nlportid;
- /* * You should only get this when cfg80211 hasn't yet initialized * completely when built-in to the kernel right between the time @@@ -5245,6 -5329,51 +5329,51 @@@ static const struct nla_polic [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, };
+ static int nl80211_check_bool(const struct nlattr *nla, u8 min, u8 max, bool *out) + { + u8 val = nla_get_u8(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + + static int nl80211_check_u8(const struct nlattr *nla, u8 min, u8 max, u8 *out) + { + u8 val = nla_get_u8(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + + static int nl80211_check_u16(const struct nlattr *nla, u16 min, u16 max, u16 *out) + { + u16 val = nla_get_u16(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + + static int nl80211_check_u32(const struct nlattr *nla, u32 min, u32 max, u32 *out) + { + u32 val = nla_get_u32(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + + static int nl80211_check_s32(const struct nlattr *nla, s32 min, s32 max, s32 *out) + { + s32 val = nla_get_s32(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + static int nl80211_parse_mesh_config(struct genl_info *info, struct mesh_config *cfg, u32 *mask_out) @@@ -5255,14 -5384,12 +5384,12 @@@ #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ do { \ if (tb[attr]) { \ - if (fn(tb[attr]) < min || fn(tb[attr]) > max) \ + if (fn(tb[attr], min, max, &cfg->param)) \ return -EINVAL; \ - cfg->param = fn(tb[attr]); \ mask |= (1 << (attr - 1)); \ } \ } while (0)
- if (!info->attrs[NL80211_ATTR_MESH_CONFIG]) return -EINVAL; if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, @@@ -5277,99 -5404,99 +5404,99 @@@ /* Fill in the params struct */ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255, mask, NL80211_MESHCONF_RETRY_TIMEOUT, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255, mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255, mask, NL80211_MESHCONF_HOLDING_TIMEOUT, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255, mask, NL80211_MESHCONF_MAX_PEER_LINKS, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16, mask, NL80211_MESHCONF_MAX_RETRIES, - nla_get_u8); + nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255, - mask, NL80211_MESHCONF_TTL, nla_get_u8); + mask, NL80211_MESHCONF_TTL, nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255, mask, NL80211_MESHCONF_ELEMENT_TTL, - nla_get_u8); + nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1, mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, - nla_get_u8); + nl80211_check_bool); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, 1, 255, mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255, mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, - nla_get_u8); + nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535, mask, NL80211_MESHCONF_PATH_REFRESH_TIME, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535, mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, 1, 65535, mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPnetDiameterTraversalTime, 1, 65535, mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4, mask, NL80211_MESHCONF_HWMP_ROOTMODE, - nla_get_u8); + nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshGateAnnouncementProtocol, 0, 1, mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, - nla_get_u8); + nl80211_check_bool); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1, mask, NL80211_MESHCONF_FORWARDING, - nla_get_u8); + nl80211_check_bool); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, mask, NL80211_MESHCONF_RSSI_THRESHOLD, - nla_get_s32); + nl80211_check_s32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, mask, NL80211_MESHCONF_HT_OPMODE, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 1, 65535, mask, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPconfirmationInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, NL80211_MESH_POWER_ACTIVE, NL80211_MESH_POWER_MAX, mask, NL80211_MESHCONF_POWER_MODE, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, 0, 65535, mask, - NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); + NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff, mask, NL80211_MESHCONF_PLINK_TIMEOUT, - nla_get_u32); + nl80211_check_u32); if (mask_out) *mask_out = mask;
@@@ -5409,7 -5536,6 +5536,6 @@@ static int nl80211_parse_mesh_setup(str IEEE80211_PATH_METRIC_VENDOR : IEEE80211_PATH_METRIC_AIRTIME;
- if (tb[NL80211_MESH_SETUP_IE]) { struct nlattr *ieattr = tb[NL80211_MESH_SETUP_IE]; @@@ -5796,10 -5922,8 +5922,8 @@@ static int nl80211_set_reg(struct sk_bu } }
- r = set_regdom(rd, REGD_SOURCE_CRDA); - /* set_regdom took ownership */ - rd = NULL; - + /* set_regdom takes ownership of rd */ + return set_regdom(rd, REGD_SOURCE_CRDA); bad_reg: kfree(rd); return r; @@@ -6033,6 -6157,7 +6157,7 @@@ static int nl80211_trigger_scan(struct /* all channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; + if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { @@@ -6104,6 -6229,19 +6229,19 @@@ } }
+ if (info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]) { + if (!wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_SET_SCAN_DWELL)) { + err = -EOPNOTSUPP; + goto out_free; + } + + request->duration = + nla_get_u16(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]); + request->duration_mandatory = + nla_get_flag(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY]); + } + if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) { request->flags = nla_get_u32( info->attrs[NL80211_ATTR_SCAN_FLAGS]); @@@ -6442,6 -6580,7 +6580,7 @@@ nl80211_parse_sched_scan(struct wiphy * /* all channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; + if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { @@@ -6511,7 -6650,7 +6650,7 @@@ nla_data(ssid), nla_len(ssid)); request->match_sets[i].ssid.ssid_len = nla_len(ssid); - /* special attribute - old implemenation w/a */ + /* special attribute - old implementation w/a */ request->match_sets[i].rssi_thold = default_match_rssi; rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; @@@ -6936,6 -7075,13 +7075,13 @@@ static int nl80211_send_bss(struct sk_b jiffies_to_msecs(jiffies - intbss->ts))) goto nla_put_failure;
+ if (intbss->parent_tsf && + (nla_put_u64_64bit(msg, NL80211_BSS_PARENT_TSF, + intbss->parent_tsf, NL80211_BSS_PAD) || + nla_put(msg, NL80211_BSS_PARENT_BSSID, ETH_ALEN, + intbss->parent_bssid))) + goto nla_put_failure; + if (intbss->ts_boottime && nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, intbss->ts_boottime, NL80211_BSS_PAD)) @@@ -7204,6 -7350,7 +7350,7 @@@ static int nl80211_authenticate(struct if (key.idx >= 0) { int i; bool ok = false; + for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) { if (key.p.cipher == rdev->wiphy.cipher_suites[i]) { ok = true; @@@ -7282,6 -7429,7 +7429,7 @@@ static int nl80211_crypto_settings(stru
if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) { u16 proto; + proto = nla_get_u16( info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]); settings->control_port_ethertype = cpu_to_be16(proto); @@@ -8435,6 -8583,7 +8583,7 @@@ static u32 rateset_to_mask(struct ieee8 for (i = 0; i < rates_len; i++) { int rate = (rates[i] & 0x7f) * 5; int ridx; + for (ridx = 0; ridx < sband->n_bitrates; ridx++) { struct ieee80211_rate *srate = &sband->bitrates[ridx]; @@@ -8743,7 -8892,6 +8892,6 @@@ static int nl80211_tx_mgmt(struct sk_bu if (params.wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME || params.wait > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; - }
params.offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; @@@ -10590,7 -10738,6 +10738,6 @@@ int cfg80211_vendor_cmd_reply(struct sk } EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply);
- static int nl80211_set_qos_map(struct sk_buff *skb, struct genl_info *info) { @@@ -10945,7 -11092,7 +11092,7 @@@ static const struct genl_ops nl80211_op .cmd = NL80211_CMD_SET_WIPHY, .doit = nl80211_set_wiphy, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_RTNL, }, { @@@ -10961,7 -11108,7 +11108,7 @@@ .cmd = NL80211_CMD_SET_INTERFACE, .doit = nl80211_set_interface, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -10969,7 -11116,7 +11116,7 @@@ .cmd = NL80211_CMD_NEW_INTERFACE, .doit = nl80211_new_interface, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -10977,7 -11124,7 +11124,7 @@@ .cmd = NL80211_CMD_DEL_INTERFACE, .doit = nl80211_del_interface, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -10985,7 -11132,7 +11132,7 @@@ .cmd = NL80211_CMD_GET_KEY, .doit = nl80211_get_key, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -10993,7 -11140,7 +11140,7 @@@ .cmd = NL80211_CMD_SET_KEY, .doit = nl80211_set_key, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, @@@ -11002,7 -11149,7 +11149,7 @@@ .cmd = NL80211_CMD_NEW_KEY, .doit = nl80211_new_key, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, @@@ -11011,14 -11158,14 +11158,14 @@@ .cmd = NL80211_CMD_DEL_KEY, .doit = nl80211_del_key, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_BEACON, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_set_beacon, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, @@@ -11026,7 -11173,7 +11173,7 @@@ { .cmd = NL80211_CMD_START_AP, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_start_ap, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, @@@ -11034,7 -11181,7 +11181,7 @@@ { .cmd = NL80211_CMD_STOP_AP, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_stop_ap, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, @@@ -11051,7 -11198,7 +11198,7 @@@ .cmd = NL80211_CMD_SET_STATION, .doit = nl80211_set_station, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11059,7 -11206,7 +11206,7 @@@ .cmd = NL80211_CMD_NEW_STATION, .doit = nl80211_new_station, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11067,7 -11214,7 +11214,7 @@@ .cmd = NL80211_CMD_DEL_STATION, .doit = nl80211_del_station, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11076,7 -11223,7 +11223,7 @@@ .doit = nl80211_get_mpath, .dumpit = nl80211_dump_mpath, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11085,7 -11232,7 +11232,7 @@@ .doit = nl80211_get_mpp, .dumpit = nl80211_dump_mpp, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11093,7 -11240,7 +11240,7 @@@ .cmd = NL80211_CMD_SET_MPATH, .doit = nl80211_set_mpath, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11101,7 -11248,7 +11248,7 @@@ .cmd = NL80211_CMD_NEW_MPATH, .doit = nl80211_new_mpath, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11109,7 -11256,7 +11256,7 @@@ .cmd = NL80211_CMD_DEL_MPATH, .doit = nl80211_del_mpath, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11117,7 -11264,7 +11264,7 @@@ .cmd = NL80211_CMD_SET_BSS, .doit = nl80211_set_bss, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11156,7 -11303,7 +11303,7 @@@ .cmd = NL80211_CMD_SET_MESH_CONFIG, .doit = nl80211_update_mesh_config, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11164,7 -11311,7 +11311,7 @@@ .cmd = NL80211_CMD_TRIGGER_SCAN, .doit = nl80211_trigger_scan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11172,7 -11319,7 +11319,7 @@@ .cmd = NL80211_CMD_ABORT_SCAN, .doit = nl80211_abort_scan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11185,7 -11332,7 +11332,7 @@@ .cmd = NL80211_CMD_START_SCHED_SCAN, .doit = nl80211_start_sched_scan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11193,7 -11340,7 +11340,7 @@@ .cmd = NL80211_CMD_STOP_SCHED_SCAN, .doit = nl80211_stop_sched_scan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11201,7 -11348,7 +11348,7 @@@ .cmd = NL80211_CMD_AUTHENTICATE, .doit = nl80211_authenticate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, @@@ -11210,7 -11357,7 +11357,7 @@@ .cmd = NL80211_CMD_ASSOCIATE, .doit = nl80211_associate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11218,7 -11365,7 +11365,7 @@@ .cmd = NL80211_CMD_DEAUTHENTICATE, .doit = nl80211_deauthenticate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11226,7 -11373,7 +11373,7 @@@ .cmd = NL80211_CMD_DISASSOCIATE, .doit = nl80211_disassociate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11234,7 -11381,7 +11381,7 @@@ .cmd = NL80211_CMD_JOIN_IBSS, .doit = nl80211_join_ibss, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11242,7 -11389,7 +11389,7 @@@ .cmd = NL80211_CMD_LEAVE_IBSS, .doit = nl80211_leave_ibss, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11252,7 -11399,7 +11399,7 @@@ .doit = nl80211_testmode_do, .dumpit = nl80211_testmode_dump, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11261,7 -11408,7 +11408,7 @@@ .cmd = NL80211_CMD_CONNECT, .doit = nl80211_connect, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11269,7 -11416,7 +11416,7 @@@ .cmd = NL80211_CMD_DISCONNECT, .doit = nl80211_disconnect, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11277,7 -11424,7 +11424,7 @@@ .cmd = NL80211_CMD_SET_WIPHY_NETNS, .doit = nl80211_wiphy_netns, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11290,7 -11437,7 +11437,7 @@@ .cmd = NL80211_CMD_SET_PMKSA, .doit = nl80211_setdel_pmksa, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11298,7 -11445,7 +11445,7 @@@ .cmd = NL80211_CMD_DEL_PMKSA, .doit = nl80211_setdel_pmksa, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11306,7 -11453,7 +11453,7 @@@ .cmd = NL80211_CMD_FLUSH_PMKSA, .doit = nl80211_flush_pmksa, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11314,7 -11461,7 +11461,7 @@@ .cmd = NL80211_CMD_REMAIN_ON_CHANNEL, .doit = nl80211_remain_on_channel, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11322,7 -11469,7 +11469,7 @@@ .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, .doit = nl80211_cancel_remain_on_channel, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11330,7 -11477,7 +11477,7 @@@ .cmd = NL80211_CMD_SET_TX_BITRATE_MASK, .doit = nl80211_set_tx_bitrate_mask, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11338,7 -11485,7 +11485,7 @@@ .cmd = NL80211_CMD_REGISTER_FRAME, .doit = nl80211_register_mgmt, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11346,7 -11493,7 +11493,7 @@@ .cmd = NL80211_CMD_FRAME, .doit = nl80211_tx_mgmt, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11354,7 -11501,7 +11501,7 @@@ .cmd = NL80211_CMD_FRAME_WAIT_CANCEL, .doit = nl80211_tx_mgmt_cancel_wait, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11362,7 -11509,7 +11509,7 @@@ .cmd = NL80211_CMD_SET_POWER_SAVE, .doit = nl80211_set_power_save, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11378,7 -11525,7 +11525,7 @@@ .cmd = NL80211_CMD_SET_CQM, .doit = nl80211_set_cqm, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11386,7 -11533,7 +11533,7 @@@ .cmd = NL80211_CMD_SET_CHANNEL, .doit = nl80211_set_channel, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11394,7 -11541,7 +11541,7 @@@ .cmd = NL80211_CMD_SET_WDS_PEER, .doit = nl80211_set_wds_peer, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11402,7 -11549,7 +11549,7 @@@ .cmd = NL80211_CMD_JOIN_MESH, .doit = nl80211_join_mesh, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11410,7 -11557,7 +11557,7 @@@ .cmd = NL80211_CMD_LEAVE_MESH, .doit = nl80211_leave_mesh, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11418,7 -11565,7 +11565,7 @@@ .cmd = NL80211_CMD_JOIN_OCB, .doit = nl80211_join_ocb, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11426,7 -11573,7 +11573,7 @@@ .cmd = NL80211_CMD_LEAVE_OCB, .doit = nl80211_leave_ocb, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11443,7 -11590,7 +11590,7 @@@ .cmd = NL80211_CMD_SET_WOWLAN, .doit = nl80211_set_wowlan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11452,7 -11599,7 +11599,7 @@@ .cmd = NL80211_CMD_SET_REKEY_OFFLOAD, .doit = nl80211_set_rekey_data, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, @@@ -11461,7 -11608,7 +11608,7 @@@ .cmd = NL80211_CMD_TDLS_MGMT, .doit = nl80211_tdls_mgmt, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11469,7 -11616,7 +11616,7 @@@ .cmd = NL80211_CMD_TDLS_OPER, .doit = nl80211_tdls_oper, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11477,7 -11624,7 +11624,7 @@@ .cmd = NL80211_CMD_UNEXPECTED_FRAME, .doit = nl80211_register_unexpected_frame, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11485,7 -11632,7 +11632,7 @@@ .cmd = NL80211_CMD_PROBE_CLIENT, .doit = nl80211_probe_client, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11493,7 -11640,7 +11640,7 @@@ .cmd = NL80211_CMD_REGISTER_BEACONS, .doit = nl80211_register_beacons, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11501,7 -11648,7 +11648,7 @@@ .cmd = NL80211_CMD_SET_NOACK_MAP, .doit = nl80211_set_noack_map, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11509,7 -11656,7 +11656,7 @@@ .cmd = NL80211_CMD_START_P2P_DEVICE, .doit = nl80211_start_p2p_device, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11517,7 -11664,7 +11664,7 @@@ .cmd = NL80211_CMD_STOP_P2P_DEVICE, .doit = nl80211_stop_p2p_device, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11525,7 -11672,7 +11672,7 @@@ .cmd = NL80211_CMD_SET_MCAST_RATE, .doit = nl80211_set_mcast_rate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11533,7 -11680,7 +11680,7 @@@ .cmd = NL80211_CMD_SET_MAC_ACL, .doit = nl80211_set_mac_acl, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11541,7 -11688,7 +11688,7 @@@ .cmd = NL80211_CMD_RADAR_DETECT, .doit = nl80211_start_radar_detection, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11554,7 -11701,7 +11701,7 @@@ .cmd = NL80211_CMD_UPDATE_FT_IES, .doit = nl80211_update_ft_ies, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11562,7 -11709,7 +11709,7 @@@ .cmd = NL80211_CMD_CRIT_PROTOCOL_START, .doit = nl80211_crit_protocol_start, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11570,7 -11717,7 +11717,7 @@@ .cmd = NL80211_CMD_CRIT_PROTOCOL_STOP, .doit = nl80211_crit_protocol_stop, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11585,7 -11732,7 +11732,7 @@@ .cmd = NL80211_CMD_SET_COALESCE, .doit = nl80211_set_coalesce, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11593,7 -11740,7 +11740,7 @@@ .cmd = NL80211_CMD_CHANNEL_SWITCH, .doit = nl80211_channel_switch, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11602,7 -11749,7 +11749,7 @@@ .doit = nl80211_vendor_cmd, .dumpit = nl80211_vendor_cmd_dump, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11610,7 -11757,7 +11757,7 @@@ .cmd = NL80211_CMD_SET_QOS_MAP, .doit = nl80211_set_qos_map, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11618,7 -11765,7 +11765,7 @@@ .cmd = NL80211_CMD_ADD_TX_TS, .doit = nl80211_add_tx_ts, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11626,7 -11773,7 +11773,7 @@@ .cmd = NL80211_CMD_DEL_TX_TS, .doit = nl80211_del_tx_ts, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11634,7 -11781,7 +11781,7 @@@ .cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH, .doit = nl80211_tdls_channel_switch, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11642,7 -11789,7 +11789,7 @@@ .cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, .doit = nl80211_tdls_cancel_channel_switch, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11708,6 -11855,13 +11855,13 @@@ static int nl80211_add_scan_req(struct nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags)) goto nla_put_failure;
+ if (req->info.scan_start_tsf && + (nla_put_u64_64bit(msg, NL80211_ATTR_SCAN_START_TIME_TSF, + req->info.scan_start_tsf, NL80211_BSS_PAD) || + nla_put(msg, NL80211_ATTR_SCAN_START_TIME_TSF_BSSID, ETH_ALEN, + req->info.tsf_bssid))) + goto nla_put_failure; + return 0; nla_put_failure: return -ENOBUFS; @@@ -12092,7 -12246,7 +12246,7 @@@ void nl80211_send_connect_result(struc struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, - u16 status, gfp_t gfp) + int status, gfp_t gfp) { struct sk_buff *msg; void *hdr; @@@ -12110,7 -12264,10 +12264,10 @@@ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) || - nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) || + nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, + status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE : + status) || + (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) || (req_ie && nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || (resp_ie && @@@ -12126,7 -12283,6 +12283,6 @@@ nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); - }
void nl80211_send_roamed(struct cfg80211_registered_device *rdev, @@@ -12165,7 -12321,6 +12321,6 @@@ nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); - }
void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, @@@ -12203,7 -12358,6 +12358,6 @@@ nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); - }
void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, @@@ -13545,7 -13699,6 +13699,6 @@@ void cfg80211_crit_proto_stopped(struc if (hdr) genlmsg_cancel(msg, hdr); nlmsg_free(msg); - } EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
linux-merge@lists.open-mesh.org