The following commit has been merged in the master branch: commit 20d7e44c82fb7deaa1edc54f36fba8dcf283d2aa Merge: 012b800f5abec7369dcd5cd88b073757d42c1d0d ce3a380dddd0cb16cb3d8d947b69657d7646c121 Author: Stephen Rothwell sfr@canb.auug.org.au Date: Thu Jul 14 11:24:05 2016 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS index ea26bf2,06e8411..0cc01e7 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -288,7 -288,6 +288,7 @@@ F: include/linux/acpi. F: include/acpi/ F: Documentation/acpi/ F: Documentation/ABI/testing/sysfs-bus-acpi +F: Documentation/ABI/testing/configfs-acpi F: drivers/pci/*acpi* F: drivers/pci/*/*acpi* F: drivers/pci/*/*/*acpi* @@@ -1527,7 -1526,6 +1527,7 @@@ M: David Brown <david.brown@linaro.org L: linux-arm-msm@vger.kernel.org L: linux-soc@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/soc/qcom/ F: arch/arm/boot/dts/qcom-*.dts F: arch/arm/boot/dts/qcom-*.dtsi F: arch/arm/mach-qcom/ @@@ -1605,10 -1603,8 +1605,10 @@@ F: arch/arm/mach-s3c24* F: arch/arm/mach-s3c64xx/ F: arch/arm/mach-s5p*/ F: arch/arm/mach-exynos*/ -F: drivers/*/*s3c2410* -F: drivers/*/*/*s3c2410* +F: drivers/*/*s3c24* +F: drivers/*/*/*s3c24* +F: drivers/*/*s3c64xx* +F: drivers/*/*s5pv210* F: drivers/memory/samsung/* F: drivers/soc/samsung/* F: drivers/spi/spi-s3c* @@@ -1651,13 -1647,6 +1651,13 @@@ L: linux-media@vger.kernel.or S: Maintained F: drivers/media/platform/s5p-tv/
+ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT +M: Kyungmin Park kyungmin.park@samsung.com +L: linux-arm-kernel@lists.infradead.org +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/staging/media/platform/s5p-cec/ + ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT M: Andrzej Pietrasiewicz andrzej.p@samsung.com M: Jacek Anaszewski j.anaszewski@samsung.com @@@ -1680,6 -1669,7 +1680,6 @@@ F: arch/arm/boot/dts/sh F: arch/arm/configs/shmobile_defconfig F: arch/arm/include/debug/renesas-scif.S F: arch/arm/mach-shmobile/ -F: drivers/sh/ F: drivers/soc/renesas/ F: include/linux/soc/renesas/
@@@ -1704,6 -1694,8 +1704,6 @@@ S: Maintaine F: drivers/edac/altera_edac.
ARM/STI ARCHITECTURE -M: Srinivas Kandagatla srinivas.kandagatla@gmail.com -M: Maxime Coquelin maxime.coquelin@st.com M: Patrice Chotard patrice.chotard@st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: kernel@stlinux.com @@@ -1736,7 -1728,6 +1736,7 @@@ F: drivers/ata/ahci_st.
ARM/STM32 ARCHITECTURE M: Maxime Coquelin mcoquelin.stm32@gmail.com +M: Alexandre Torgue alexandre.torgue@st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git @@@ -1747,7 -1738,8 +1747,7 @@@ ARM/TANGO ARCHITECTUR M: Marc Gonzalez marc_gonzalez@sigmadesigns.com L: linux-arm-kernel@lists.infradead.org S: Maintained -F: arch/arm/mach-tango/ -F: arch/arm/boot/dts/tango* +N: tango
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT M: Lennert Buytenhek kernel@wantstofly.org @@@ -2305,6 -2297,7 +2305,7 @@@ S: Maintaine F: Documentation/ABI/testing/sysfs-class-net-batman-adv F: Documentation/ABI/testing/sysfs-class-net-mesh F: Documentation/networking/batman-adv.txt + F: include/uapi/linux/batman_adv.h F: net/batman-adv/
BAYCOM/HDLCDRV DRIVERS FOR AX.25 @@@ -2468,6 -2461,14 +2469,14 @@@ L: netdev@vger.kernel.or S: Supported F: drivers/net/ethernet/broadcom/b44.*
+ BROADCOM B53 ETHERNET SWITCH DRIVER + M: Florian Fainelli f.fainelli@gmail.com + L: netdev@vger.kernel.org + L: openwrt-devel@lists.openwrt.org (subscribers-only) + S: Supported + F: drivers/net/dsa/b53/* + F: include/linux/platform_data/b53.h + BROADCOM GENET ETHERNET DRIVER M: Florian Fainelli f.fainelli@gmail.com L: netdev@vger.kernel.org @@@ -2492,14 -2493,17 +2501,14 @@@ BROADCOM BCM281XX/BCM11XXX/BCM216XX AR M: Florian Fainelli f.fainelli@gmail.com M: Ray Jui rjui@broadcom.com M: Scott Branden sbranden@broadcom.com -L: bcm-kernel-feedback-list@broadcom.com +M: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/mach-bcm S: Maintained +N: bcm281* +N: bcm113* +N: bcm216* +N: kona F: arch/arm/mach-bcm/ -F: arch/arm/boot/dts/bcm113* -F: arch/arm/boot/dts/bcm216* -F: arch/arm/boot/dts/bcm281* -F: arch/arm64/boot/dts/broadcom/ -F: arch/arm/configs/bcm_defconfig -F: drivers/mmc/host/sdhci-bcm-kona.c -F: drivers/clocksource/bcm_kona_timer.c
BROADCOM BCM2835 ARM ARCHITECTURE M: Stephen Warren swarren@wwwdotorg.org @@@ -2522,21 -2526,20 +2531,21 @@@ F: arch/mips/include/asm/mach-bcm47xx/
BROADCOM BCM5301X ARM ARCHITECTURE M: Hauke Mehrtens hauke@hauke-m.de +M: Rafał Miłecki zajec5@gmail.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org S: Maintained F: arch/arm/mach-bcm/bcm_5301x.c -F: arch/arm/boot/dts/bcm5301x.dtsi +F: arch/arm/boot/dts/bcm5301x*.dtsi F: arch/arm/boot/dts/bcm470*
BROADCOM BCM63XX ARM ARCHITECTURE M: Florian Fainelli f.fainelli@gmail.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/stblinux.git S: Maintained -F: arch/arm/mach-bcm/bcm63xx.c -F: arch/arm/include/debug/bcm63xx.S +N: bcm63xx
BROADCOM BCM63XX/BCM33XX UDC DRIVER M: Kevin Cernekee cernekee@gmail.com @@@ -2548,8 -2551,8 +2557,8 @@@ BROADCOM BCM7XXX ARM ARCHITECTUR M: Brian Norris computersforpeace@gmail.com M: Gregory Fong gregory.0xf0@gmail.com M: Florian Fainelli f.fainelli@gmail.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/stblinux.git S: Maintained F: arch/arm/mach-bcm/*brcmstb* @@@ -2582,12 -2585,11 +2591,11 @@@ S: Supporte F: drivers/net/ethernet/broadcom/tg3.*
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER - M: Brett Rudley brudley@broadcom.com - M: Arend van Spriel arend@broadcom.com - M: Franky (Zhenhui) Lin frankyl@broadcom.com - M: Hante Meuleman meuleman@broadcom.com + M: Arend van Spriel arend.vanspriel@broadcom.com + M: Franky Lin franky.lin@broadcom.com + M: Hante Meuleman hante.meuleman@broadcom.com L: linux-wireless@vger.kernel.org - L: brcm80211-dev-list@broadcom.com + L: brcm80211-dev-list.pdl@broadcom.com S: Supported F: drivers/net/wireless/broadcom/brcm80211/
@@@ -2607,13 -2609,13 +2615,13 @@@ BROADCOM IPROC ARM ARCHITECTUR M: Ray Jui rjui@broadcom.com M: Scott Branden sbranden@broadcom.com M: Jon Mason jonmason@broadcom.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: bcm-kernel-feedback-list@broadcom.com T: git git://github.com/broadcom/cygnus-linux.git S: Maintained N: iproc N: cygnus -N: nsp +N: bcm[-_]nsp N: bcm9113* N: bcm9583* N: bcm9585* @@@ -2624,9 -2626,6 +2632,9 @@@ N: bcm583 N: bcm585* N: bcm586* N: bcm88312 +F: arch/arm64/boot/dts/broadcom/ns2* +F: drivers/clk/bcm/clk-ns* +F: drivers/pinctrl/bcm/pinctrl-ns*
BROADCOM BRCMSTB GPIO DRIVER M: Gregory Fong gregory.0xf0@gmail.com @@@ -2671,8 -2670,8 +2679,8 @@@ F: drivers/net/ethernet/broadcom/bcmsys
BROADCOM VULCAN ARM64 SOC M: Jayachandran C. jchandra@broadcom.com +M: bcm-kernel-feedback-list@broadcom.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -L: bcm-kernel-feedback-list@broadcom.com S: Maintained F: arch/arm64/boot/dts/broadcom/vulcan*
@@@ -2822,6 -2821,7 +2830,7 @@@ W: https://github.com/linux-ca T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained + F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h F: include/linux/can/platform/ @@@ -2861,22 -2861,6 +2870,22 @@@ F: drivers/net/ieee802154/cc2520. F: include/linux/spi/cc2520.h F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
+CEC DRIVER +M: Hans Verkuil hans.verkuil@cisco.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +W: http://linuxtv.org +S: Supported +F: Documentation/cec.txt +F: Documentation/DocBook/media/v4l/cec* +F: drivers/staging/media/cec/ +F: drivers/media/cec-edid.c +F: drivers/media/rc/keymaps/rc-cec.c +F: include/media/cec.h +F: include/media/cec-edid.h +F: include/linux/cec.h +F: include/linux/cec-funcs.h + CELL BROADBAND ENGINE ARCHITECTURE M: Arnd Bergmann arnd@arndb.de L: linuxppc-dev@lists.ozlabs.org @@@ -4912,6 -4896,13 +4921,13 @@@ F: drivers/net/ethernet/freescale/gianf X: drivers/net/ethernet/freescale/gianfar_ptp.c F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+ FREESCALE QUICC ENGINE UCC HDLC DRIVER + M: Zhao Qiang qiang.zhao@nxp.com + L: netdev@vger.kernel.org + L: linuxppc-dev@lists.ozlabs.org + S: Maintained + F: drivers/net/wan/fsl_ucc_hdlc* + FREESCALE QUICC ENGINE UCC UART DRIVER M: Timur Tabi timur@tabi.org L: linuxppc-dev@lists.ozlabs.org @@@ -5195,10 -5186,10 +5211,10 @@@ S: Maintaine F: drivers/media/usb/gspca/m5602/
GSPCA PAC207 SONIXB SUBDRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Odd Fixes F: drivers/media/usb/gspca/pac207.c
GSPCA SN9C20X SUBDRIVER @@@ -5216,10 -5207,10 +5232,10 @@@ S: Maintaine F: drivers/media/usb/gspca/t613.c
GSPCA USB WEBCAM DRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Odd Fixes F: drivers/media/usb/gspca/
GUID PARTITION TABLE (GPT) @@@ -5447,6 -5438,15 +5463,15 @@@ F: include/uapi/linux/if_hippi. F: net/802/hippi.c F: drivers/net/hippi/
+ HISILICON NETWORK SUBSYSTEM DRIVER + M: Yisen Zhuang yisen.zhuang@huawei.com + M: Salil Mehta salil.mehta@huawei.com + L: netdev@vger.kernel.org + W: http://www.hisilicon.com + S: Maintained + F: drivers/net/ethernet/hisilicon/ + F: Documentation/devicetree/bindings/net/hisilicon*.txt + HISILICON SAS Controller M: John Garry john.garry@huawei.com W: http://www.hisilicon.com @@@ -6899,7 -6899,6 +6924,7 @@@ F: drivers/crypto/nx F: drivers/crypto/vmx/ F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmvnic.* +F: drivers/pci/hotplug/pnv_php.c F: drivers/pci/hotplug/rpa* F: drivers/scsi/ibmvscsi/ N: opal @@@ -7199,6 -7198,12 +7224,12 @@@ W: http://www.kernel.org/doc/man-page L: linux-man@vger.kernel.org S: Maintained
+ MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER + M: Andrew Lunn andrew@lunn.ch + M: Vivien Didelot vivien.didelot@savoirfairelinux.com + S: Maintained + F: drivers/net/dsa/mv88e6xxx/ + MARVELL ARMADA DRM SUPPORT M: Russell King rmk+kernel@armlinux.org.uk S: Maintained @@@ -7206,11 -7211,6 +7237,6 @@@ F: drivers/gpu/drm/armada F: include/uapi/drm/armada_drm.h F: Documentation/devicetree/bindings/display/armada/
- MARVELL 88E6352 DSA support - M: Guenter Roeck linux@roeck-us.net - S: Maintained - F: drivers/net/dsa/mv88e6352.c - MARVELL CRYPTO DRIVER M: Boris Brezillon boris.brezillon@free-electrons.com M: Arnaud Ebalard arno@natisbad.org @@@ -7346,16 -7346,6 +7372,16 @@@ L: linux-iio@vger.kernel.or S: Maintained F: drivers/iio/potentiometer/mcp4531.c
+MEDIA DRIVERS FOR RENESAS - FCP +M: Laurent Pinchart laurent.pinchart@ideasonboard.com +L: linux-media@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Supported +F: Documentation/devicetree/bindings/media/renesas,fcp.txt +F: drivers/media/platform/rcar-fcp.c +F: include/media/rcar-fcp.h + MEDIA DRIVERS FOR RENESAS - VSP1 M: Laurent Pinchart laurent.pinchart@ideasonboard.com L: linux-media@vger.kernel.org @@@ -7365,18 -7355,8 +7391,18 @@@ S: Supporte F: Documentation/devicetree/bindings/media/renesas,vsp1.txt F: drivers/media/platform/vsp1/
+MEDIA DRIVERS FOR HELENE +M: Abylay Ospan aospan@netup.ru +L: linux-media@vger.kernel.org +W: https://linuxtv.org +W: http://netup.tv/ +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/dvb-frontends/helene* + MEDIA DRIVERS FOR ASCOT2E M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7386,7 -7366,6 +7412,7 @@@ F: drivers/media/dvb-frontends/ascot2e
MEDIA DRIVERS FOR CXD2841ER M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7396,7 -7375,6 +7422,7 @@@ F: drivers/media/dvb-frontends/cxd2841e
MEDIA DRIVERS FOR HORUS3A M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7406,7 -7384,6 +7432,7 @@@ F: drivers/media/dvb-frontends/horus3a
MEDIA DRIVERS FOR LNBH25 M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7416,7 -7393,6 +7442,7 @@@ F: drivers/media/dvb-frontends/lnbh25
MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7665,8 -7641,10 +7691,8 @@@ L: linux-media@vger.kernel.or W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ -T: git git://linuxtv.org/anttip/media_tree.git S: Maintained -F: drivers/staging/media/mn88472/ -F: drivers/media/dvb-frontends/mn88472.h +F: drivers/media/dvb-frontends/mn88472*
MN88473 MEDIA DRIVER M: Antti Palosaari crope@iki.fi @@@ -8743,7 -8721,6 +8769,7 @@@ L: linux-pci@vger.kernel.or Q: http://patchwork.ozlabs.org/project/linux-pci/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git S: Supported +F: Documentation/devicetree/bindings/pci/ F: Documentation/PCI/ F: drivers/pci/ F: include/linux/pci* @@@ -8889,15 -8866,6 +8915,15 @@@ S: Maintaine F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt F: drivers/pci/host/pci-xgene-msi.c
+PCIE DRIVER FOR AXIS ARTPEC +M: Niklas Cassel niklas.cassel@axis.com +M: Jesper Nilsson jesper.nilsson@axis.com +L: linux-arm-kernel@axis.com +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/axis,artpec* +F: drivers/pci/host/*artpec* + PCIE DRIVER FOR HISILICON M: Zhou Wang wangzhou1@hisilicon.com M: Gabriele Paoloni gabriele.paoloni@huawei.com @@@ -9157,12 -9125,6 +9183,12 @@@ F: drivers/firmware/psci. F: include/linux/psci.h F: include/uapi/linux/psci.h
+POWERNV OPERATOR PANEL LCD DISPLAY DRIVER +M: Suraj Jitindar Singh sjitindarsingh@gmail.com +L: linuxppc-dev@lists.ozlabs.org +S: Maintained +F: drivers/char/powernv-op-panel.c + PNP SUPPORT M: "Rafael J. Wysocki" rafael.j.wysocki@intel.com S: Maintained @@@ -9287,13 -9249,6 +9313,13 @@@ F: include/linux/tracehook. F: include/uapi/linux/ptrace.h F: kernel/ptrace.c
+PULSE8-CEC DRIVER +M: Hans Verkuil hverkuil@xs4all.nl +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/staging/media/pulse8-cec + PVRUSB2 VIDEO4LINUX DRIVER M: Mike Isely isely@pobox.com L: pvrusb2@isely.net (subscribers-only) @@@ -9305,10 -9260,10 +9331,10 @@@ F: Documentation/video4linux/README.pvr F: drivers/media/usb/pvrusb2/
PWC WEBCAM DRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git -S: Maintained +S: Odd Fixes F: drivers/media/usb/pwc/*
PWM FAN DRIVER @@@ -9523,14 -9478,14 +9549,14 @@@ F: drivers/video/fbdev/aty/radeon F: include/uapi/linux/radeonfb.h
RADIOSHARK RADIO DRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained F: drivers/media/radio/radio-shark.c
RADIOSHARK2 RADIO DRIVER -M: Hans de Goede hdegoede@redhat.com +M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git S: Maintained @@@ -9604,7 -9559,7 +9630,7 @@@ M: Florian Fainelli <florian@openwrt.or S: Maintained
RDC R6040 FAST ETHERNET DRIVER - M: Florian Fainelli florian@openwrt.org + M: Florian Fainelli f.fainelli@gmail.com L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/rdc/r6040.c @@@ -9755,7 -9710,6 +9781,6 @@@ F: Documentation/ABI/*/sysfs-driver-hid
ROCKER DRIVER M: Jiri Pirko jiri@resnulli.us - M: Scott Feldman sfeldma@gmail.com L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/rocker/ @@@ -10352,10 -10306,9 +10377,9 @@@ W: http://www.avagotech.co S: Supported F: drivers/scsi/be2iscsi/
- Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER + Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) M: Sathya Perla sathya.perla@broadcom.com M: Ajit Khaparde ajit.khaparde@broadcom.com - M: Padmanabh Ratnakar padmanabh.ratnakar@broadcom.com M: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com M: Somnath Kotur somnath.kotur@broadcom.com L: netdev@vger.kernel.org @@@ -11386,6 -11339,11 +11410,6 @@@ F: Documentation/thermal/cpu-cooling-ap F: drivers/thermal/cpu_cooling.c F: include/linux/cpu_cooling.h
-THINGM BLINK(1) USB RGB LED DRIVER -M: Vivien Didelot vivien.didelot@savoirfairelinux.com -S: Maintained -F: drivers/hid/hid-thingm.c - THINKPAD ACPI EXTRAS DRIVER M: Henrique de Moraes Holschuh ibm-acpi@hmh.eng.br L: ibm-acpi-devel@lists.sourceforge.net diff --combined arch/arm/boot/dts/am33xx.dtsi index af18c47,7fa2951..98748c6 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@@ -45,9 -45,19 +45,9 @@@ device_type = "cpu"; reg = <0>;
- /* - * To consider voltage drop between PMIC and SoC, - * tolerance value is reduced to 2% from 4% and - * voltage value is increased as a precaution. - */ - operating-points = < - /* kHz uV */ - 720000 1285000 - 600000 1225000 - 500000 1125000 - 275000 1125000 - >; - voltage-tolerance = <2>; /* 2 percentage */ + operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_conf 0x7fc 0x1fff 0>; + ti,syscon-rev = <&scm_conf 0x600>;
clocks = <&dpll_mpu_ck>; clock-names = "cpu"; @@@ -56,78 -66,6 +56,78 @@@ }; };
+ cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + + /* + * The three following nodes are marked with opp-suspend + * because the can not be enabled simultaneously on a + * single SoC. + */ + opp50@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <950000 931000 969000>; + opp-supported-hw = <0x06 0x0010>; + opp-suspend; + }; + + opp100@275000000 { + opp-hz = /bits/ 64 <275000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x01 0x00FF>; + opp-suspend; + }; + + opp100@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x06 0x0020>; + opp-suspend; + }; + + opp100@500000000 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + opp100@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x06 0x0040>; + }; + + opp120@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + opp120@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0x06 0x0080>; + }; + + oppturbo@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + oppturbo@800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0x06 0x0100>; + }; + + oppnitro@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1325000 1298500 1351500>; + opp-supported-hw = <0x04 0x0200>; + }; + }; + pmu { compatible = "arm,cortex-a8-pmu"; interrupts = <3>; @@@ -249,7 -187,7 +249,7 @@@ reg = <0x49000000 0x10000>; reg-names = "edma3_cc"; interrupts = <12 13 14>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -741,24 -679,20 +741,24 @@@ 0x48300200 0x48300200 0x80>; /* EHRPWM */
ecap0: ecap@48300100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48300100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <31>; interrupt-names = "ecap0"; - ti,hwmods = "ecap0"; status = "disabled"; };
ehrpwm0: pwm@48300200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48300200 0x80>; - ti,hwmods = "ehrpwm0"; + clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -775,24 -709,20 +775,24 @@@ 0x48302200 0x48302200 0x80>; /* EHRPWM */
ecap1: ecap@48302100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48302100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <47>; interrupt-names = "ecap1"; - ti,hwmods = "ecap1"; status = "disabled"; };
ehrpwm1: pwm@48302200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48302200 0x80>; - ti,hwmods = "ehrpwm1"; + clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -809,24 -739,20 +809,24 @@@ 0x48304200 0x48304200 0x80>; /* EHRPWM */
ecap2: ecap@48304100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48304100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <61>; interrupt-names = "ecap2"; - ti,hwmods = "ecap2"; status = "disabled"; };
ehrpwm2: pwm@48304200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48304200 0x80>; - ti,hwmods = "ehrpwm2"; + clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -840,7 -766,6 +840,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -863,7 -788,7 +862,7 @@@ status = "disabled";
davinci_mdio: mdio@4a101000 { - compatible = "ti,davinci_mdio"; + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; #address-cells = <1>; #size-cells = <0>; ti,hwmods = "davinci_mdio"; diff --combined arch/arm/boot/dts/am4372.dtsi index e7b53ef,cd81ecf..0fadae5 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@@ -44,49 -44,10 +44,49 @@@ clocks = <&dpll_mpu_ck>; clock-names = "cpu";
+ operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_conf 0x610 0x3f 0>; + ti,syscon-rev = <&scm_conf 0x600>; + clock-latency = <300000>; /* From omap-cpufreq driver */ }; };
+ cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + + opp50@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <950000 931000 969000>; + opp-supported-hw = <0xFF 0x01>; + opp-suspend; + }; + + opp100@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0xFF 0x04>; + }; + + opp120@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0xFF 0x08>; + }; + + oppturbo@800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0xFF 0x10>; + }; + + oppnitro@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1325000 1298500 1351500>; + opp-supported-hw = <0xFF 0x20>; + }; + }; + gic: interrupt-controller@48241000 { compatible = "arm,cortex-a9-gic"; interrupt-controller; @@@ -238,7 -199,7 +238,7 @@@ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -665,7 -626,6 +665,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -675,7 -635,7 +674,7 @@@ syscon = <&scm_conf>;
davinci_mdio: mdio@4a101000 { - compatible = "ti,am4372-mdio","ti,davinci_mdio"; + compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio"; reg = <0x4a101000 0x100>; #address-cells = <1>; #size-cells = <0>; @@@ -711,24 -671,18 +710,24 @@@ status = "disabled";
ecap0: ecap@48300100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48300100 0x80>; - ti,hwmods = "ecap0"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm0: pwm@48300200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48300200 0x80>; - ti,hwmods = "ehrpwm0"; + clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -743,24 -697,18 +742,24 @@@ status = "disabled";
ecap1: ecap@48302100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48302100 0x80>; - ti,hwmods = "ecap1"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm1: pwm@48302200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48302200 0x80>; - ti,hwmods = "ehrpwm1"; + clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -775,24 -723,18 +774,24 @@@ status = "disabled";
ecap2: ecap@48304100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48304100 0x80>; - ti,hwmods = "ecap2"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm2: pwm@48304200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48304200 0x80>; - ti,hwmods = "ehrpwm2"; + clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -807,13 -749,10 +806,13 @@@ status = "disabled";
ehrpwm3: pwm@48306200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48306200 0x80>; - ti,hwmods = "ehrpwm3"; + clocks = <&ehrpwm3_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -828,13 -767,10 +827,13 @@@ status = "disabled";
ehrpwm4: pwm@48308200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48308200 0x80>; - ti,hwmods = "ehrpwm4"; + clocks = <&ehrpwm4_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -849,13 -785,10 +848,13 @@@ status = "disabled";
ehrpwm5: pwm@4830a200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x4830a200 0x80>; - ti,hwmods = "ehrpwm5"; + clocks = <&ehrpwm5_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -909,13 -842,6 +908,13 @@@ dma-names = "tx", "rx"; };
+ rng: rng@48310000 { + compatible = "ti,omap4-rng"; + ti,hwmods = "rng"; + reg = <0x48310000 0x2000>; + interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; + }; + mcasp0: mcasp@48038000 { compatible = "ti,am33xx-mcasp-audio"; ti,hwmods = "mcasp0"; diff --combined arch/arm/boot/dts/dm814x.dtsi index a6e0aebc,f23cae0c..68e412c --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@@ -448,7 -448,7 +448,7 @@@ reg = <0x49000000 0x10000>; reg-names = "edma3_cc"; interrupts = <12 13 14>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -509,7 -509,6 +509,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; diff --combined arch/arm/boot/dts/dra7.dtsi index 40b69a5,de559f6..d9bfb94 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@@ -73,49 -73,6 +73,49 @@@ interrupt-parent = <&gic>; };
+ cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0>; + + operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_wkup 0x20c 0xf80000 19>; + ti,syscon-rev = <&scm_wkup 0x204>; + + clocks = <&dpll_mpu_ck>; + clock-names = "cpu"; + + clock-latency = <300000>; /* From omap-cpufreq driver */ + + /* cooling options */ + cooling-min-level = <0>; + cooling-max-level = <2>; + #cooling-cells = <2>; /* min followed by max */ + }; + }; + + cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + opp-shared; + + opp_nom@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1060000 850000 1150000>; + opp-supported-hw = <0xFF 0x01>; + opp-suspend; + }; + + opp_od@1176000000 { + opp-hz = /bits/ 64 <1176000000>; + opp-microvolt = <1160000 885000 1160000>; + opp-supported-hw = <0xFF 0x02>; + }; + }; + /* * The soc node represents the soc top level view. It is used for IPs * that are not memory mapped in the MPU view or for the MPU itself. @@@ -276,11 -233,6 +276,11 @@@ prm_clockdomains: clockdomains { }; }; + + scm_wkup: scm_conf@c000 { + compatible = "syscon"; + reg = <0xc000 0x1000>; + }; };
axi@0 { @@@ -324,7 -276,7 +324,7 @@@ ranges = <0x51800000 0x51800000 0x3000 0x0 0x30000000 0x10000000>; status = "disabled"; - pcie@51000000 { + pcie@51800000 { compatible = "ti,dra7-pcie"; reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; @@@ -352,53 -304,6 +352,53 @@@ }; };
+ ocmcram1: ocmcram@40300000 { + compatible = "mmio-sram"; + reg = <0x40300000 0x80000>; + ranges = <0x0 0x40300000 0x80000>; + #address-cells = <1>; + #size-cells = <1>; + /* + * This is a placeholder for an optional reserved + * region for use by secure software. The size + * of this region is not known until runtime so it + * is set as zero to either be updated to reserve + * space or left unchanged to leave all SRAM for use. + * On HS parts that that require the reserved region + * either the bootloader can update the size to + * the required amount or the node can be overridden + * from the board dts file for the secure platform. + */ + sram-hs@0 { + compatible = "ti,secure-ram"; + reg = <0x0 0x0>; + }; + }; + + /* + * NOTE: ocmcram2 and ocmcram3 are not available on all + * DRA7xx and AM57xx variants. Confirm availability in + * the data manual for the exact part number in use + * before enabling these nodes in the board dts file. + */ + ocmcram2: ocmcram@40400000 { + status = "disabled"; + compatible = "mmio-sram"; + reg = <0x40400000 0x100000>; + ranges = <0x0 0x40400000 0x100000>; + #address-cells = <1>; + #size-cells = <1>; + }; + + ocmcram3: ocmcram@40500000 { + status = "disabled"; + compatible = "mmio-sram"; + reg = <0x40500000 0x100000>; + ranges = <0x0 0x40500000 0x100000>; + #address-cells = <1>; + #size-cells = <1>; + }; + bandgap: bandgap@4a0021e0 { reg = <0x4a0021e0 0xc 0x4a00232c 0xc @@@ -436,7 -341,7 +436,7 @@@ interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -1723,7 -1628,6 +1723,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -1758,7 -1662,7 +1757,7 @@@ status = "disabled";
davinci_mdio: mdio@48485000 { - compatible = "ti,davinci_mdio"; + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; #address-cells = <1>; #size-cells = <0>; ti,hwmods = "davinci_mdio"; @@@ -1840,149 -1744,6 +1839,149 @@@ clock-names = "fck", "sys_clk"; }; }; + + epwmss0: epwmss@4843e000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x4843e000 0x30>; + ti,hwmods = "epwmss0"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm0: pwm@4843e200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x4843e200 0x80>; + clocks = <&ehrpwm0_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap0: ecap@4843e100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x4843e100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + epwmss1: epwmss@48440000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x48440000 0x30>; + ti,hwmods = "epwmss1"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm1: pwm@48440200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x48440200 0x80>; + clocks = <&ehrpwm1_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap1: ecap@48440100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x48440100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + epwmss2: epwmss@48442000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x48442000 0x30>; + ti,hwmods = "epwmss2"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm2: pwm@48442200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x48442200 0x80>; + clocks = <&ehrpwm2_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap2: ecap@48442100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x48442100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + aes1: aes@4b500000 { + compatible = "ti,omap4-aes"; + ti,hwmods = "aes1"; + reg = <0x4b500000 0xa0>; + interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 111 0>, <&edma_xbar 110 0>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + aes2: aes@4b700000 { + compatible = "ti,omap4-aes"; + ti,hwmods = "aes2"; + reg = <0x4b700000 0xa0>; + interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 114 0>, <&edma_xbar 113 0>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + des: des@480a5000 { + compatible = "ti,omap4-des"; + ti,hwmods = "des"; + reg = <0x480a5000 0xa0>; + interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&sdma_xbar 117>, <&sdma_xbar 116>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + sham: sham@53100000 { + compatible = "ti,omap5-sham"; + ti,hwmods = "sham"; + reg = <0x4b101000 0x300>; + interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 119 0>; + dma-names = "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + rng: rng@48090000 { + compatible = "ti,omap4-rng"; + ti,hwmods = "rng"; + reg = <0x48090000 0x2000>; + interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; };
thermal_zones: thermal-zones { diff --combined arch/arm/boot/dts/rk3288.dtsi index 7fa932f,3ebee53..cd33f01 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@@ -539,8 -539,9 +539,9 @@@ gmac: ethernet@ff290000 { compatible = "rockchip,rk3288-gmac"; reg = <0xff290000 0x10000>; - interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "macirq"; + interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq", "eth_wake_irq"; rockchip,grf = <&grf>; clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>, @@@ -826,11 -827,6 +827,11 @@@ #phy-cells = <0>; status = "disabled"; }; + + io_domains: io-domains { + compatible = "rockchip,rk3288-io-voltage-domain"; + status = "disabled"; + }; };
wdt: watchdog@ff800000 { diff --combined arch/arm64/boot/dts/broadcom/ns2-svk.dts index b062a44,ea5603f..0862b3e --- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts +++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts @@@ -40,14 -40,10 +40,14 @@@
aliases { serial0 = &uart3; + serial1 = &uart0; + serial2 = &uart1; + serial3 = &uart2; };
chosen { stdout-path = "serial0:115200n8"; + bootargs = "earlycon=uart8250,mmio32,0x66130000"; };
memory { @@@ -56,6 -52,14 +56,14 @@@ }; };
+ &pci_phy0 { + status = "ok"; + }; + + &pci_phy1 { + status = "ok"; + }; + &pcie0 { status = "ok"; }; @@@ -72,18 -76,6 +80,18 @@@ status = "ok"; };
+&uart0 { + status = "ok"; +}; + +&uart1 { + status = "ok"; +}; + +&uart2 { + status = "ok"; +}; + &uart3 { status = "ok"; }; @@@ -133,18 -125,6 +141,18 @@@ }; };
+&sata_phy0 { + status = "ok"; +}; + +&sata_phy1 { + status = "ok"; +}; + +&sata { + status = "ok"; +}; + &sdio0 { status = "ok"; }; @@@ -161,11 -141,10 +169,19 @@@ }; };
+&pinctrl { + pinctrl-names = "default"; + pinctrl-0 = <&nand_sel>; + nand_sel: nand_sel { + function = "nand"; + groups = "nand_grp"; + }; +}; ++ + &mdio_mux_iproc { + mdio@10 { + gphy0: eth-phy@10 { + reg = <0x10>; + }; + }; + }; diff --combined arch/arm64/boot/dts/broadcom/ns2.dtsi index d1dc812,46b78fa..f53b095 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi @@@ -251,22 -251,6 +251,22 @@@ mmu-masters; };
+ pinctrl: pinctrl@6501d130 { + compatible = "brcm,ns2-pinmux"; + reg = <0x6501d130 0x08>, + <0x660a0028 0x04>, + <0x660009b0 0x40>; + }; + + gpio_aon: gpio@65024800 { + compatible = "brcm,iproc-gpio"; + reg = <0x65024800 0x50>, + <0x65024008 0x18>; + ngpios = <6>; + #gpio-cells = <2>; + gpio-controller; + }; + gic: interrupt-controller@65210000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; @@@ -279,26 -263,45 +279,65 @@@ IRQ_TYPE_LEVEL_HIGH)>; };
+ cci@65590000 { + compatible = "arm,cci-400"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x65590000 0x1000>; + ranges = <0 0x65590000 0x10000>; + + pmu@9000 { + compatible = "arm,cci-400-pmu,r1", + "arm,cci-400-pmu"; + reg = <0x9000 0x4000>; + interrupts = <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + mdio_mux_iproc: mdio-mux@6602023c { + compatible = "brcm,mdio-mux-iproc"; + reg = <0x6602023c 0x14>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + pci_phy0: pci-phy@0 { + compatible = "brcm,ns2-pcie-phy"; + reg = <0x0>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + mdio@7 { + reg = <0x7>; + #address-cells = <1>; + #size-cells = <0>; + + pci_phy1: pci-phy@0 { + compatible = "brcm,ns2-pcie-phy"; + reg = <0x0>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + mdio@10 { + reg = <0x10>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + timer0: timer@66030000 { compatible = "arm,sp804", "arm,primecell"; reg = <0x66030000 0x1000>; @@@ -357,16 -360,6 +396,16 @@@ clock-names = "wdogclk", "apb_pclk"; };
+ gpio_g: gpio@660a0000 { + compatible = "brcm,iproc-gpio"; + reg = <0x660a0000 0x50>; + ngpios = <32>; + #gpio-cells = <2>; + gpio-controller; + interrupt-controller; + interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>; + }; + i2c1: i2c@660b0000 { compatible = "brcm,iproc-i2c"; reg = <0x660b0000 0x100>; @@@ -377,36 -370,6 +416,36 @@@ status = "disabled"; };
+ uart0: serial@66100000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66100000 0x100>; + interrupts = <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: serial@66110000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66110000 0x100>; + interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart2: serial@66120000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66120000 0x100>; + interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + uart3: serial@66130000 { compatible = "snps,dw-apb-uart"; reg = <0x66130000 0x100>; @@@ -444,49 -407,6 +483,49 @@@ reg = <0x66220000 0x28>; };
+ sata_phy: sata_phy@663f0100 { + compatible = "brcm,iproc-ns2-sata-phy"; + reg = <0x663f0100 0x1f00>, + <0x663f004c 0x10>; + reg-names = "phy", "phy-ctrl"; + #address-cells = <1>; + #size-cells = <0>; + + sata_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + status = "disabled"; + }; + + sata_phy1: sata-phy@1 { + reg = <1>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + sata: ahci@663f2000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x663f2000 0x1000>; + reg-names = "ahci"; + interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata0: sata-port@0 { + reg = <0>; + phys = <&sata_phy0>; + phy-names = "sata-phy"; + }; + + sata1: sata-port@1 { + reg = <1>; + phys = <&sata_phy1>; + phy-names = "sata-phy"; + }; + }; + sdio0: sdhci@66420000 { compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66420000 0x100>; diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1b0ae4a,0f7dd86..64466f5 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@@ -56,6 -56,8 +56,8 @@@ static int bnxt_get_coalesce(struct net coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
+ coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; + return 0; }
@@@ -63,6 -65,7 +65,7 @@@ static int bnxt_set_coalesce(struct net struct ethtool_coalesce *coal) { struct bnxt *bp = netdev_priv(dev); + bool update_stats = false; int rc = 0;
bp->rx_coal_ticks = coal->rx_coalesce_usecs; @@@ -76,8 -79,26 +79,26 @@@ bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
- if (netif_running(dev)) - rc = bnxt_hwrm_set_coal(bp); + if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { + u32 stats_ticks = coal->stats_block_coalesce_usecs; + + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); + stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); + bp->stats_coal_ticks = stats_ticks; + update_stats = true; + } + + if (netif_running(dev)) { + if (update_stats) { + rc = bnxt_close_nic(bp, true, false); + if (!rc) + rc = bnxt_open_nic(bp, true, false); + } else { + rc = bnxt_hwrm_set_coal(bp); + } + }
return rc; } @@@ -628,7 -649,66 +649,66 @@@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw return speed_mask; }
- static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) + #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ + { \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 100baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 1000baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 10000baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 25000baseCR_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 40000baseCR4_Full);\ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 50000baseCR2_Full);\ + if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + Pause); \ + if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ + ethtool_link_ksettings_add_link_mode( \ + lk_ksettings, name, Asym_Pause);\ + } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + Asym_Pause); \ + } \ + } + + #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ + { \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100baseT_Full) || \ + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100baseT_Half)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 1000baseT_Full) || \ + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 1000baseT_Half)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 10000baseT_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 25000baseCR_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 40000baseCR4_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 50000baseCR2_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ + } + + static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->auto_link_speeds; u8 fw_pause = 0; @@@ -636,10 -716,11 +716,11 @@@ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) fw_pause = link_info->auto_pause_setting;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); }
- static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) + static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->lp_auto_link_speeds; u8 fw_pause = 0; @@@ -647,16 -728,24 +728,24 @@@ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) fw_pause = link_info->lp_pause;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, + lp_advertising); }
- static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) + static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->support_speeds; - u32 supported;
- supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); - return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); + + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Asym_Pause); + + if (link_info->support_auto_speeds) + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Autoneg); }
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) @@@ -683,65 -772,62 +772,62 @@@ } }
- static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + static int bnxt_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *lk_ksettings) { struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - u16 ethtool_speed; - - cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); + struct ethtool_link_settings *base = &lk_ksettings->base; + u32 ethtool_speed;
- if (link_info->auto_link_speeds) - cmd->supported |= SUPPORTED_Autoneg; + ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); if (link_info->autoneg) { - cmd->advertising = - bnxt_fw_to_ethtool_advertised_spds(link_info); - cmd->advertising |= ADVERTISED_Autoneg; - cmd->autoneg = AUTONEG_ENABLE; + bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); + ethtool_link_ksettings_add_link_mode(lk_ksettings, + advertising, Autoneg); + base->autoneg = AUTONEG_ENABLE; if (link_info->phy_link_status == BNXT_LINK_LINK) - cmd->lp_advertising = - bnxt_fw_to_ethtool_lp_adv(link_info); + bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); if (!netif_carrier_ok(dev)) - cmd->duplex = DUPLEX_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + base->duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + base->duplex = DUPLEX_HALF; } else { - cmd->autoneg = AUTONEG_DISABLE; - cmd->advertising = 0; + base->autoneg = AUTONEG_DISABLE; ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); - cmd->duplex = DUPLEX_HALF; + base->duplex = DUPLEX_HALF; if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + base->duplex = DUPLEX_FULL; } - ethtool_cmd_speed_set(cmd, ethtool_speed); + base->speed = ethtool_speed;
- cmd->port = PORT_NONE; + base->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { - cmd->port = PORT_TP; - cmd->supported |= SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; + base->port = PORT_TP; + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + TP); + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, + TP); } else { - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, + FIBRE);
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) - cmd->port = PORT_DA; + base->port = PORT_DA; else if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) - cmd->port = PORT_FIBRE; + base->port = PORT_FIBRE; } - - if (link_info->transceiver == - PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) - cmd->transceiver = XCVR_INTERNAL; - else - cmd->transceiver = XCVR_EXTERNAL; - cmd->phy_address = link_info->phy_addr; + base->phy_address = link_info->phy_addr;
return 0; } @@@ -815,37 -901,25 +901,25 @@@ u16 bnxt_get_fw_auto_link_speeds(u32 ad return fw_speed_mask; }
- static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + static int bnxt_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *lk_ksettings) { - int rc = 0; struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; + const struct ethtool_link_settings *base = &lk_ksettings->base; u32 speed, fw_advertising = 0; bool set_pause = false; + int rc = 0;
- if (BNXT_VF(bp)) - return rc; - - if (cmd->autoneg == AUTONEG_ENABLE) { - u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info); + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP;
- if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | - ADVERTISED_TP | ADVERTISED_FIBRE)) { - netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", - cmd->advertising); - rc = -EINVAL; - goto set_setting_exit; - } - fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising); - if (fw_advertising & ~link_info->support_speeds) { - netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n", - cmd->advertising); - rc = -EINVAL; - goto set_setting_exit; - } + if (base->autoneg == AUTONEG_ENABLE) { + BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, + advertising); link_info->autoneg |= BNXT_AUTONEG_SPEED; if (!fw_advertising) - link_info->advertising = link_info->support_speeds; + link_info->advertising = link_info->support_auto_speeds; else link_info->advertising = fw_advertising; /* any change to autoneg will cause link change, therefore the @@@ -863,16 -937,12 +937,12 @@@ rc = -EINVAL; goto set_setting_exit; } - /* TODO: currently don't support half duplex */ - if (cmd->duplex == DUPLEX_HALF) { + if (base->duplex == DUPLEX_HALF) { netdev_err(dev, "HALF DUPLEX is not supported!\n"); rc = -EINVAL; goto set_setting_exit; } - /* If received a request for an unknown duplex, assume full*/ - if (cmd->duplex == DUPLEX_UNKNOWN) - cmd->duplex = DUPLEX_FULL; - speed = ethtool_cmd_speed(cmd); + speed = base->speed; fw_speed = bnxt_get_fw_speed(dev, speed); if (!fw_speed) { rc = -EINVAL; @@@ -911,8 -981,8 +981,8 @@@ static int bnxt_set_pauseparam(struct n struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info;
- if (BNXT_VF(bp)) - return rc; + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP;
if (epause->autoneg) { if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) @@@ -1010,6 -1080,8 +1080,8 @@@ static int bnxt_firmware_reset(struct n case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + /* Self-reset APE upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: @@@ -1043,9 -1115,27 +1115,27 @@@ static int bnxt_flash_firmware(struct n case BNX_DIR_TYPE_BOOTCODE_2: code_type = CODE_BOOT; break; + case BNX_DIR_TYPE_CHIMP_PATCH: + code_type = CODE_CHIMP_PATCH; + break; case BNX_DIR_TYPE_APE_FW: code_type = CODE_MCTP_PASSTHRU; break; + case BNX_DIR_TYPE_APE_PATCH: + code_type = CODE_APE_PATCH; + break; + case BNX_DIR_TYPE_KONG_FW: + code_type = CODE_KONG_FW; + break; + case BNX_DIR_TYPE_KONG_PATCH: + code_type = CODE_KONG_PATCH; + break; + case BNX_DIR_TYPE_BONO_FW: + code_type = CODE_BONO_FW; + break; + case BNX_DIR_TYPE_BONO_PATCH: + code_type = CODE_BONO_PATCH; + break; default: netdev_err(dev, "Unsupported directory entry type: %u\n", dir_type); @@@ -1100,6 -1190,8 +1190,8 @@@ static bool bnxt_dir_type_is_ape_bin_fo case BNX_DIR_TYPE_APE_PATCH: case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: return true; }
@@@ -1137,7 -1229,8 +1229,8 @@@ static int bnxt_flash_firmware_from_fil const struct firmware *fw; int rc;
- if (bnxt_dir_type_is_executable(dir_type) == false) + if (dir_type != BNX_DIR_TYPE_UPDATE && + bnxt_dir_type_is_executable(dir_type) == false) return -EINVAL;
rc = request_firmware(&fw, filename, &dev->dev); @@@ -1433,8 -1526,8 +1526,8 @@@ static int bnxt_set_eee(struct net_devi _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); int rc = 0;
- if (BNXT_VF(bp)) - return 0; + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP;
if (!(bp->flags & BNXT_FLAG_EEE_CAP)) return -EOPNOTSUPP; @@@ -1591,7 -1684,7 +1684,7 @@@ static int bnxt_get_module_eeprom(struc { struct bnxt *bp = netdev_priv(dev); u16 start = eeprom->offset, length = eeprom->len; - int rc; + int rc = 0;
memset(data, 0, eeprom->len);
@@@ -1618,8 -1711,8 +1711,8 @@@ }
const struct ethtool_ops bnxt_ethtool_ops = { - .get_settings = bnxt_get_settings, - .set_settings = bnxt_set_settings, + .get_link_ksettings = bnxt_get_link_ksettings, + .set_link_ksettings = bnxt_set_link_ksettings, .get_pauseparam = bnxt_get_pauseparam, .set_pauseparam = bnxt_set_pauseparam, .get_drvinfo = bnxt_get_drvinfo, diff --combined drivers/net/ethernet/freescale/fec.h index dc71a88,92fd5c0..4e98b24 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@@ -442,8 -442,8 +442,10 @@@ struct bufdesc_ex #define FEC_QUIRK_SINGLE_MDIO (1 << 11) /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +/* Interrupt doesn't wake CPU from deep idle */ +#define FEC_QUIRK_ERR006687 (1 << 13) + /* Controller supports interrupt coalesc */ -#define FEC_QUIRK_HAS_COALESCE (1 << 13) ++#define FEC_QUIRK_HAS_COALESCE (1 << 14)
struct bufdesc_prop { int qid; diff --combined drivers/net/ethernet/freescale/fec_main.c index d9ecc30,4040003..01f7e81 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -60,7 -60,6 +60,7 @@@ #include <linux/if_vlan.h> #include <linux/pinctrl/consumer.h> #include <linux/prefetch.h> +#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@@ -112,7 -111,13 +112,13 @@@ static struct platform_device_id fec_de FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + }, { + .name = "imx6ul-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, }, { /* sentinel */ } @@@ -126,6 -131,7 +132,7 @@@ enum imx_fec_type IMX6Q_FEC, MVF600_FEC, IMX6SX_FEC, + IMX6UL_FEC, };
static const struct of_device_id fec_dt_ids[] = { @@@ -135,6 -141,7 +142,7 @@@ { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@@ -2359,9 -2366,6 +2367,6 @@@ static void fec_enet_itr_coal_set(struc struct fec_enet_private *fep = netdev_priv(ndev); int rx_itr, tx_itr;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) - return; - /* Must be greater than zero to avoid unpredictable behavior */ if (!fep->rx_time_itr || !fep->rx_pkts_itr || !fep->tx_time_itr || !fep->tx_pkts_itr) @@@ -2384,10 -2388,12 +2389,12 @@@
writel(tx_itr, fep->hwp + FEC_TXIC0); writel(rx_itr, fep->hwp + FEC_RXIC0); - writel(tx_itr, fep->hwp + FEC_TXIC1); - writel(rx_itr, fep->hwp + FEC_RXIC1); - writel(tx_itr, fep->hwp + FEC_TXIC2); - writel(rx_itr, fep->hwp + FEC_RXIC2); + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); + } }
static int @@@ -2395,7 -2401,7 +2402,7 @@@ fec_enet_get_coalesce(struct net_devic { struct fec_enet_private *fep = netdev_priv(ndev);
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr; @@@ -2413,7 -2419,7 +2420,7 @@@ fec_enet_set_coalesce(struct net_devic struct fec_enet_private *fep = netdev_priv(ndev); unsigned int cycle;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) { @@@ -2819,9 -2825,6 +2826,9 @@@ fec_enet_open(struct net_device *ndev if (ret) goto err_enet_mii_probe;
+ if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@@ -2857,9 -2860,6 +2864,9 @@@ fec_enet_close(struct net_device *ndev
phy_disconnect(ndev->phydev);
+ if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_unused(); + fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); @@@ -3198,7 -3198,12 +3205,12 @@@ static void fec_reset_phy(struct platfo dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } - msleep(msec); + + if (msec > 20) + msleep(msec); + else + usleep_range(msec * 1000, msec * 1000 + 1000); + gpio_set_value_cansleep(phy_reset, !active_high); } #else /* CONFIG_OF */ @@@ -3299,11 -3304,6 +3311,11 @@@ fec_probe(struct platform_device *pdev
platform_set_drvdata(pdev, ndev);
+ if ((of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6dl")) && + !of_property_read_bool(np, "fsl,err006687-workaround-present")) + fep->quirks |= FEC_QUIRK_ERR006687; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
diff --combined drivers/net/ethernet/intel/e1000e/netdev.c index 0382de0,41f32c0..02f4439 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@@ -4352,7 -4352,8 +4352,8 @@@ static cycle_t e1000e_cyclecounter_read
time_delta = systim_next - systim; temp = time_delta; - rem = do_div(temp, incvalue); + /* VMWare users have seen incvalue of zero, don't div / 0 */ + rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
systim = systim_next;
@@@ -7329,7 -7330,8 +7330,7 @@@ err_flashmap err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@@ -7396,7 -7398,8 +7397,7 @@@ static void e1000_remove(struct pci_de if ((adapter->hw.flash_address) && (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
free_netdev(netdev);
diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index 2e10d23,2b11405..545b15a --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -31,12 -31,7 +31,7 @@@ /* Local includes */ #include "i40e.h" #include "i40e_diag.h" - #if IS_ENABLED(CONFIG_VXLAN) - #include <net/vxlan.h> - #endif - #if IS_ENABLED(CONFIG_GENEVE) - #include <net/geneve.h> - #endif + #include <net/udp_tunnel.h>
const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@@ -45,8 -40,8 +40,8 @@@ #define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1 - #define DRV_VERSION_MINOR 5 - #define DRV_VERSION_BUILD 16 + #define DRV_VERSION_MINOR 6 + #define DRV_VERSION_BUILD 4 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@@ -1584,14 -1579,8 +1579,8 @@@ static void i40e_vsi_setup_queue_map(st vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ - /* In MFP case we can have a much lower count of MSIx - * vectors available and so we need to lower the used - * q count. - */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) - qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); - else - qcount = vsi->alloc_queue_pairs; + qcount = vsi->alloc_queue_pairs; + num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
@@@ -1845,8 -1834,10 +1834,10 @@@ int i40e_sync_vsi_filters(struct i40e_v { struct list_head tmp_del_list, tmp_add_list; struct i40e_mac_filter *f, *ftmp, *fclone; + struct i40e_hw *hw = &vsi->back->hw; bool promisc_forced_on = false; bool add_happened = false; + char vsi_name[16] = "PF"; int filter_list_len = 0; u32 changed_flags = 0; i40e_status aq_ret = 0; @@@ -1874,6 -1865,11 +1865,11 @@@ INIT_LIST_HEAD(&tmp_del_list); INIT_LIST_HEAD(&tmp_add_list);
+ if (vsi->type == I40E_VSI_SRIOV) + snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); + else if (vsi->type != I40E_VSI_MAIN) + snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
@@@ -1925,7 -1921,7 +1921,7 @@@ if (!list_empty(&tmp_del_list)) { int del_list_size;
- filter_list_len = pf->hw.aq.asq_buf_size / + filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); @@@ -1957,21 -1953,21 +1953,21 @@@
/* flush a full buffer */ if (num_del == filter_list_len) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, - del_list, - num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_ret = + i40e_aq_remove_macvlan(hw, vsi->seid, + del_list, + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0; memset(del_list, 0, del_list_size);
if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { retval = -EIO; dev_err(&pf->pdev->dev, - "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + "ignoring delete macvlan error on %s, err %s, aq_err %s while flushing a full buffer\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); } } /* Release memory for MAC filter entries which were @@@ -1982,17 -1978,17 +1978,17 @@@ }
if (num_del) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, - del_list, num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0;
if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + "ignoring delete macvlan error on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); }
kfree(del_list); @@@ -2003,7 -1999,7 +1999,7 @@@ int add_list_size;
/* do all the adds now */ - filter_list_len = pf->hw.aq.asq_buf_size / + filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data), add_list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); @@@ -2038,10 -2034,10 +2034,10 @@@
/* flush a full buffer */ if (num_add == filter_list_len) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_err = hw->aq.asq_last_status; num_add = 0;
if (aq_ret) @@@ -2056,9 -2052,9 +2052,9 @@@ }
if (num_add) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_err = hw->aq.asq_last_status; num_add = 0; } kfree(add_list); @@@ -2067,16 -2063,18 +2063,18 @@@ if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { retval = i40e_aq_rc_to_posix(aq_ret, aq_err); dev_info(&pf->pdev->dev, - "add filter failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && + "add filter failed on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); + if ((hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { promisc_forced_on = true; set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); - dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); + dev_info(&pf->pdev->dev, "promiscuous mode forced on %s\n", + vsi_name); } } } @@@ -2098,12 -2096,12 +2096,12 @@@ NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multi promisc failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "set multi promisc failed on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); } } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { @@@ -2122,33 -2120,58 +2120,58 @@@ */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + if (cur_promisc) + aq_ret = + i40e_aq_set_default_vsi(hw, + vsi->seid, + NULL); + else + aq_ret = + i40e_aq_clear_default_vsi(hw, + vsi->seid, + NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + hw->aq.asq_last_status); + dev_info(&pf->pdev->dev, + "Set default VSI failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + } } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL, true); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set unicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set unicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set multicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } } aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, @@@ -2159,9 -2182,9 +2182,9 @@@ pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set brdcast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } } out: @@@ -3952,6 -3975,7 +3975,7 @@@ static void i40e_vsi_free_irq(struct i4 /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(pf->msix_entries[vector].vector, NULL); + synchronize_irq(pf->msix_entries[vector].vector); free_irq(pf->msix_entries[vector].vector, vsi->q_vectors[i]);
@@@ -4958,7 -4982,6 +4982,6 @@@ static void i40e_dcb_reconfigure(struc if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } - i40e_notify_client_of_l2_param_changes(pf->vsi[v]); } }
@@@ -5183,12 -5206,6 +5206,6 @@@ static void i40e_vsi_reinit_locked(stru usleep_range(1000, 2000); i40e_down(vsi);
- /* Give a VF some time to respond to the reset. The - * two second wait is based upon the watchdog cycle in - * the VF driver. - */ - if (vsi->type == I40E_VSI_SRIOV) - msleep(2000); i40e_up(vsi); clear_bit(__I40E_CONFIG_BUSY, &pf->state); } @@@ -5231,6 -5248,9 +5248,9 @@@ void i40e_down(struct i40e_vsi *vsi i40e_clean_tx_ring(vsi->tx_rings[i]); i40e_clean_rx_ring(vsi->rx_rings[i]); } + + i40e_notify_client_of_netdev_close(vsi, false); + }
/** @@@ -5342,14 -5362,7 +5362,7 @@@ int i40e_open(struct net_device *netdev TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
- #ifdef CONFIG_I40E_VXLAN - vxlan_get_rx_port(netdev); - #endif - #ifdef CONFIG_I40E_GENEVE - if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) - geneve_get_rx_port(netdev); - #endif - + udp_tunnel_get_rx_info(netdev); i40e_notify_client_of_netdev_open(vsi);
return 0; @@@ -5716,6 -5729,8 +5729,8 @@@ static int i40e_handle_lldp_event(struc i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); + /* Notify the client for the DCB changes */ + i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); }
exit: @@@ -5940,7 -5955,6 +5955,6 @@@ static void i40e_fdir_flush_and_replay( if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } - }
/** @@@ -7057,7 -7071,6 +7071,6 @@@ static void i40e_handle_mdd_event(struc **/ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { - #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; @@@ -7092,7 -7105,6 +7105,6 @@@ } } } - #endif }
/** @@@ -7174,7 -7186,7 +7186,7 @@@ static int i40e_set_num_rings_in_vsi(st vsi->alloc_queue_pairs = 1; vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); - vsi->num_q_vectors = 1; + vsi->num_q_vectors = pf->num_fdsb_msix; break;
case I40E_VSI_VMDQ2: @@@ -7558,9 -7570,11 +7570,11 @@@ static int i40e_init_msix(struct i40e_p /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (vectors_left) { + pf->num_fdsb_msix = 1; v_budget++; vectors_left--; } else { + pf->num_fdsb_msix = 0; pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; } } @@@ -8579,7 -8593,9 +8593,9 @@@ bool i40e_set_ntuple(struct i40e_pf *pf /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; - pf->flags |= I40E_FLAG_FD_SB_ENABLED; + /* enable FD_SB only if there is MSI-X vector */ + if (pf->num_fdsb_msix > 0) + pf->flags |= I40E_FLAG_FD_SB_ENABLED; } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@@ -8628,7 -8644,6 +8644,6 @@@ static int i40e_set_features(struct net return 0; }
- #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure @@@ -8648,21 -8663,18 +8663,18 @@@ static u8 i40e_get_udp_port_idx(struct return i; }
- #endif - - #if IS_ENABLED(CONFIG_VXLAN) /** - * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ - static void i40e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + static void i40e_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 next_idx; u8 idx;
@@@ -8670,7 -8682,7 +8682,7 @@@
/* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "vxlan port %d already offloaded\n", + netdev_info(netdev, "port %d already offloaded\n", ntohs(port)); return; } @@@ -8679,131 -8691,75 +8691,75 @@@ next_idx = i40e_get_udp_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", - ntohs(port)); - return; - } - - /* New port: add it and mark its index in the bitmap */ - pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; - pf->pending_udp_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - } - - /** - * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away - * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to - **/ - static void i40e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) - { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 idx; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - } else { - netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", - ntohs(port)); - } - } - #endif - - #if IS_ENABLED(CONFIG_GENEVE) - /** - * i40e_add_geneve_port - Get notifications about GENEVE ports that come up - * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: New UDP port number that GENEVE started listening to - **/ - static void i40e_add_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) - { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 next_idx; - u8 idx; - - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "udp port %d already offloaded\n", + netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", ntohs(port)); return; }
- /* Now check if there is space to add the new port */ - next_idx = i40e_get_udp_port_idx(pf, 0); - - if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; + break; + default: return; }
/* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); }
/** - * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: UDP port number that GENEVE stopped listening to + * @ti: Tunnel endpoint information **/ - static void i40e_del_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + static void i40e_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 idx;
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) + goto not_found;
- dev_info(&pf->pdev->dev, "deleting geneve port %d\n", - ntohs(port)); - } else { - netdev_warn(netdev, "geneve port %d was not found, not deleting\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) + goto not_found; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) + goto not_found; + break; + default: + goto not_found; } + + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + return; + not_found: + netdev_warn(netdev, "UDP port %d was not found, not deleting\n", + ntohs(port)); } - #endif
static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) @@@ -9033,14 -8989,8 +8989,8 @@@ static const struct net_device_ops i40e .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, .ndo_set_vf_trust = i40e_ndo_set_vf_trust, - #if IS_ENABLED(CONFIG_VXLAN) - .ndo_add_vxlan_port = i40e_add_vxlan_port, - .ndo_del_vxlan_port = i40e_del_vxlan_port, - #endif - #if IS_ENABLED(CONFIG_GENEVE) - .ndo_add_geneve_port = i40e_add_geneve_port, - .ndo_del_geneve_port = i40e_del_geneve_port, - #endif + .ndo_udp_tunnel_add = i40e_udp_tunnel_add, + .ndo_udp_tunnel_del = i40e_udp_tunnel_del, .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, @@@ -10133,14 -10083,14 +10083,14 @@@ void i40e_veb_release(struct i40e_veb * static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; - bool is_default = veb->pf->cur_promisc; bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret;
- /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, - veb->enabled_tc, is_default, + veb->enabled_tc, false, &veb->seid, enable_stats, NULL); + + /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", @@@ -10689,12 -10639,8 +10639,8 @@@ static void i40e_print_features(struct } if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += snprintf(&buf[i], REMAIN(i), " DCB"); - #if IS_ENABLED(CONFIG_VXLAN) i += snprintf(&buf[i], REMAIN(i), " VxLAN"); - #endif - #if IS_ENABLED(CONFIG_GENEVE) i += snprintf(&buf[i], REMAIN(i), " Geneve"); - #endif if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE @@@ -10769,7 -10715,8 +10715,7 @@@ static int i40e_probe(struct pci_dev *p }
/* set up pci connections */ - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), i40e_driver_name); + err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); @@@ -11266,7 -11213,8 +11212,7 @@@ err_ioremap kfree(pf); err_pf_alloc: pci_disable_pcie_error_reporting(pdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@@ -11377,7 -11325,8 +11323,7 @@@ static void i40e_remove(struct pci_dev
iounmap(hw->hw_addr); kfree(pf); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@@ -11522,6 -11471,7 +11468,7 @@@ static int i40e_suspend(struct pci_dev { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; + int retval = 0;
set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); @@@ -11533,10 -11483,16 +11480,16 @@@ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_stop_misc_vector(pf); + + retval = pci_save_state(pdev); + if (retval) + return retval; + pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot);
- return 0; + return retval; }
/** diff --combined drivers/net/ethernet/intel/igb/igb_main.c index 1c96fe8,9bcba42..942a89f --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@@ -2027,7 -2027,8 +2027,8 @@@ void igb_reset(struct igb_adapter *adap wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
/* Re-enable PTP, where applicable. */ - igb_ptp_reset(adapter); + if (adapter->ptp_flags & IGB_PTP_ENABLED) + igb_ptp_reset(adapter);
igb_get_phy_info(hw); } @@@ -2323,7 -2324,9 +2324,7 @@@ static int igb_probe(struct pci_dev *pd } }
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), - igb_driver_name); + err = pci_request_mem_regions(pdev, igb_driver_name); if (err) goto err_pci_reg;
@@@ -2747,7 -2750,8 +2748,7 @@@ err_sw_init err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@@ -2912,7 -2916,8 +2913,7 @@@ static void igb_remove(struct pci_dev * pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
kfree(adapter->shadow_vfta); free_netdev(netdev); @@@ -6851,12 -6856,12 +6852,12 @@@ static bool igb_can_reuse_rx_page(struc **/ static bool igb_add_rx_frag(struct igb_ring *rx_ring, struct igb_rx_buffer *rx_buffer, + unsigned int size, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else @@@ -6908,6 -6913,7 +6909,7 @@@ static struct sk_buff *igb_fetch_rx_buf union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); struct igb_rx_buffer *rx_buffer; struct page *page;
@@@ -6943,11 -6949,11 +6945,11 @@@ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - IGB_RX_BUFSZ, + size, DMA_FROM_DEVICE);
/* pull page into skb */ - if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@@ -7523,6 -7529,8 +7525,8 @@@ static int __igb_shutdown(struct pci_de if (netif_running(netdev)) __igb_close(netdev, true);
+ igb_ptp_suspend(adapter); + igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1629468,918b94b..ef82cf9 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -50,7 -50,7 +50,7 @@@ #include <linux/if_bridge.h> #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> - #include <net/vxlan.h> + #include <net/udp_tunnel.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> @@@ -5722,9 -5722,7 +5722,7 @@@ static int ixgbe_sw_init(struct ixgbe_a #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif - #ifdef CONFIG_IXGBE_VXLAN adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; - #endif break; default: break; @@@ -6158,9 -6156,7 +6156,7 @@@ int ixgbe_open(struct net_device *netde ixgbe_up_complete(adapter);
ixgbe_clear_vxlan_port(adapter); - #ifdef CONFIG_IXGBE_VXLAN - vxlan_get_rx_port(netdev); - #endif + udp_tunnel_get_rx_info(netdev);
return 0;
@@@ -7262,14 -7258,12 +7258,12 @@@ static void ixgbe_service_task(struct w ixgbe_service_event_complete(adapter); return; } - #ifdef CONFIG_IXGBE_VXLAN - rtnl_lock(); if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; - vxlan_get_rx_port(adapter->netdev); + udp_tunnel_get_rx_info(adapter->netdev); + rtnl_unlock(); } - rtnl_unlock(); - #endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@@ -7697,7 -7691,6 +7691,6 @@@ static void ixgbe_atr(struct ixgbe_rin /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); - #ifdef CONFIG_IXGBE_VXLAN if (skb->encapsulation && first->protocol == htons(ETH_P_IP) && hdr.ipv4->protocol != IPPROTO_UDP) { @@@ -7708,7 -7701,6 +7701,6 @@@ udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); } - #endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */ switch (hdr.ipv4->version) { @@@ -8308,14 -8300,53 +8300,53 @@@ int ixgbe_setup_tc(struct net_device *d static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls) { + u32 hdl = cls->knode.handle; u32 uhtid = TC_U32_USERHTID(cls->knode.handle); - u32 loc; - int err; + u32 loc = cls->knode.handle & 0xfffff; + int err = 0, i, j; + struct ixgbe_jump_table *jump = NULL; + + if (loc > IXGBE_MAX_HW_ENTRIES) + return -EINVAL;
if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) return -EINVAL;
- loc = cls->knode.handle & 0xfffff; + /* Clear this filter in the link data it is associated with */ + if (uhtid != 0x800) { + jump = adapter->jump_tables[uhtid]; + if (!jump) + return -EINVAL; + if (!test_bit(loc - 1, jump->child_loc_map)) + return -EINVAL; + clear_bit(loc - 1, jump->child_loc_map); + } + + /* Check if the filter being deleted is a link */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + jump = adapter->jump_tables[i]; + if (jump && jump->link_hdl == hdl) { + /* Delete filters in the hardware in the child hash + * table associated with this link + */ + for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { + if (!test_bit(j, jump->child_loc_map)) + continue; + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, + NULL, + j + 1); + spin_unlock(&adapter->fdir_perfect_lock); + clear_bit(j, jump->child_loc_map); + } + /* Remove resources for this link */ + kfree(jump->input); + kfree(jump->mask); + kfree(jump); + adapter->jump_tables[i] = NULL; + return err; + } + }
spin_lock(&adapter->fdir_perfect_lock); err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); @@@ -8549,6 -8580,18 +8580,18 @@@ static int ixgbe_configure_clsu32(struc if (!test_bit(link_uhtid - 1, &adapter->tables)) return err;
+ /* Multiple filters as links to the same hash table are not + * supported. To add a new filter with the same next header + * but different match/jump conditions, create a new hash table + * and link to it. + */ + if (adapter->jump_tables[link_uhtid] && + (adapter->jump_tables[link_uhtid])->link_hdl) { + e_err(drv, "Link filter exists for link: %x\n", + link_uhtid); + return err; + } + for (i = 0; nexthdr[i].jump; i++) { if (nexthdr[i].o != cls->knode.sel->offoff || nexthdr[i].s != cls->knode.sel->offshift || @@@ -8570,6 -8613,8 +8613,8 @@@ } jump->input = input; jump->mask = mask; + jump->link_hdl = cls->knode.handle; + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, &nexthdr[i]); if (!err) { @@@ -8597,6 -8642,20 +8642,20 @@@ if ((adapter->jump_tables[uhtid])->mask) memcpy(mask, (adapter->jump_tables[uhtid])->mask, sizeof(*mask)); + + /* Lookup in all child hash tables if this location is already + * filled with a filter + */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + struct ixgbe_jump_table *link = adapter->jump_tables[i]; + + if (link && (test_bit(loc - 1, link->child_loc_map))) { + e_err(drv, "Filter exists in location: %x\n", + loc); + err = -EINVAL; + goto err_out; + } + } } err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); if (err) @@@ -8628,6 -8687,9 +8687,9 @@@ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock);
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) + set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); + kfree(mask); return err; err_out_w_lock: @@@ -8770,14 -8832,12 +8832,12 @@@ static int ixgbe_set_features(struct ne
netdev->features = features;
- #ifdef CONFIG_IXGBE_VXLAN if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { if (features & NETIF_F_RXCSUM) adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; else ixgbe_clear_vxlan_port(adapter); } - #endif /* CONFIG_IXGBE_VXLAN */
if (need_reset) ixgbe_do_reset(netdev); @@@ -8788,23 -8848,25 +8848,25 @@@ return 0; }
- #ifdef CONFIG_IXGBE_VXLAN /** * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifiying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ - static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) + static void ixgbe_add_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; + __be16 port = ti->port;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) return;
if (adapter->vxlan_port == port) @@@ -8824,30 -8886,31 +8886,31 @@@ /** * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to + * @ti: Tunnel endpoint information **/ - static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) + static void ixgbe_del_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev);
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) return;
- if (adapter->vxlan_port != port) { + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { netdev_info(dev, "Port %d was not found, not deleting\n", - ntohs(port)); + ntohs(ti->port)); return; }
ixgbe_clear_vxlan_port(adapter); adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; } - #endif /* CONFIG_IXGBE_VXLAN */
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, @@@ -9160,10 -9223,8 +9223,8 @@@ static const struct net_device_ops ixgb .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - #ifdef CONFIG_IXGBE_VXLAN - .ndo_add_vxlan_port = ixgbe_add_vxlan_port, - .ndo_del_vxlan_port = ixgbe_del_vxlan_port, - #endif /* CONFIG_IXGBE_VXLAN */ + .ndo_udp_tunnel_add = ixgbe_add_vxlan_port, + .ndo_udp_tunnel_del = ixgbe_del_vxlan_port, .ndo_features_check = ixgbe_features_check, };
@@@ -9331,7 -9392,8 +9392,7 @@@ static int ixgbe_probe(struct pci_dev * pci_using_dac = 0; }
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), ixgbe_driver_name); + err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); @@@ -9717,7 -9779,8 +9778,7 @@@ err_ioremap disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: if (!adapter || disable_dev) @@@ -9784,7 -9847,8 +9845,7 @@@ static void ixgbe_remove(struct pci_de
#endif iounmap(adapter->io_addr); - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
e_dev_info("complete\n");
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a4d88c,611ab55..bdcb699 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -47,8 -47,9 +47,9 @@@ enum };
struct mlx5e_rq_param { - u32 rqc[MLX5_ST_SZ_DW(rqc)]; - struct mlx5_wq_param wq; + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; + bool am_enabled; };
struct mlx5e_sq_param { @@@ -62,6 -63,7 +63,7 @@@ struct mlx5e_cq_param u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; u16 eq_ix; + u8 cq_period_mode; };
struct mlx5e_channel_param { @@@ -254,14 -256,14 +256,14 @@@ void mlx5e_update_stats(struct mlx5e_pr mlx5e_update_sw_counters(priv); }
- static void mlx5e_update_stats_work(struct work_struct *work) + void mlx5e_update_stats_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, update_stats_work); mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - mlx5e_update_stats(priv); + priv->profile->update_stats(priv); queue_delayed_work(priv->wq, dwork, msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); } @@@ -367,6 -369,9 +369,9 @@@ static int mlx5e_create_rq(struct mlx5e wqe->data.byte_count = cpu_to_be32(byte_count); }
+ INIT_WORK(&rq->am.work, mlx5e_rx_am_work); + rq->am.mode = priv->params.rx_cq_period_mode; + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@@ -539,6 -544,9 +544,9 @@@ static int mlx5e_open_rq(struct mlx5e_c if (err) goto err_disable_rq;
+ if (param->am_enabled) + set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; @@@ -574,6 -582,8 +582,8 @@@ static void mlx5e_close_rq(struct mlx5e /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ napi_synchronize(&rq->channel->napi);
+ cancel_work_sync(&rq->am.work); + mlx5e_disable_rq(rq); mlx5e_free_rx_descs(rq); mlx5e_destroy_rq(rq); @@@ -741,7 -751,8 +751,8 @@@ static int mlx5e_enable_sq(struct mlx5e return err; }
- static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) + static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, + int next_state, bool update_rl, int rl_index) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; @@@ -761,6 -772,10 +772,10 @@@
MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); + if (update_rl && next_state == MLX5_SQC_STATE_RDY) { + MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); + }
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
@@@ -776,6 -791,8 +791,8 @@@ static void mlx5e_disable_sq(struct mlx struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_sq(mdev, sq->sqn); + if (sq->rate_limit) + mlx5_rl_remove_rate(mdev, sq->rate_limit); }
static int mlx5e_open_sq(struct mlx5e_channel *c, @@@ -793,7 -810,8 +810,8 @@@ if (err) goto err_destroy_sq;
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + false, 0); if (err) goto err_disable_sq;
@@@ -836,7 -854,7 +854,7 @@@ static void mlx5e_close_sq(struct mlx5e mlx5e_send_nop(sq, true);
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, - MLX5_SQC_STATE_ERR); + MLX5_SQC_STATE_ERR, false, 0); if (err) set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); } @@@ -891,7 -909,7 +909,7 @@@ static int mlx5e_create_cq(struct mlx5e mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &priv->cq_uar; + mcq->uar = &mdev->mlx5e_res.cq_uar;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); @@@ -938,6 -956,7 +956,7 @@@ static int mlx5e_enable_cq(struct mlx5e
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@@ -967,8 -986,7 +986,7 @@@ static void mlx5e_disable_cq(struct mlx static int mlx5e_open_cq(struct mlx5e_channel *c, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, - u16 moderation_usecs, - u16 moderation_frames) + struct mlx5e_cq_moder moderation) { int err; struct mlx5e_priv *priv = c->priv; @@@ -984,8 -1002,8 +1002,8 @@@
if (MLX5_CAP_GEN(mdev, cq_moderation)) mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); + moderation.usec, + moderation.pkts); return 0;
err_destroy_cq: @@@ -1014,8 -1032,7 +1032,7 @@@ static int mlx5e_open_tx_cqs(struct mlx
for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, - priv->params.tx_cq_moderation_usec, - priv->params.tx_cq_moderation_pkts); + priv->params.tx_cq_moderation); if (err) goto err_close_tx_cqs; } @@@ -1070,19 -1087,96 +1087,96 @@@ static void mlx5e_build_channeltc_to_tx { int i;
- for (i = 0; i < MLX5E_MAX_NUM_TC; i++) + for (i = 0; i < priv->profile->max_tc; i++) priv->channeltc_to_txq_map[ix][i] = ix + i * priv->params.num_channels; }
+ static int mlx5e_set_sq_maxrate(struct net_device *dev, + struct mlx5e_sq *sq, u32 rate) + { + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 rl_index = 0; + int err; + + if (rate == sq->rate_limit) + /* nothing to do */ + return 0; + + if (sq->rate_limit) + /* remove current rl index to free space to next ones */ + mlx5_rl_remove_rate(mdev, sq->rate_limit); + + sq->rate_limit = 0; + + if (rate) { + err = mlx5_rl_add_rate(mdev, rate, &rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + return err; + } + } + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RDY, true, rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + /* remove the rate from the table */ + if (rate) + mlx5_rl_remove_rate(mdev, rate); + return err; + } + + sq->rate_limit = rate; + return 0; + } + + static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) + { + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_sq *sq = priv->txq_to_sq_map[index]; + int err = 0; + + if (!mlx5_rl_is_supported(mdev)) { + netdev_err(dev, "Rate limiting is not supported on this device\n"); + return -EINVAL; + } + + /* rate is given in Mb/sec, HW config is in Kb/sec */ + rate = rate << 10; + + /* Check whether rate in valid range, 0 is always valid */ + if (rate && !mlx5_rl_is_in_range(mdev, rate)) { + netdev_err(dev, "TX rate %u, is not in range\n", rate); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_set_sq_maxrate(dev, sq, rate); + if (!err) + priv->tx_rates[index] = rate; + mutex_unlock(&priv->state_lock); + + return err; + } + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { + struct mlx5e_cq_moder icosq_cq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + struct mlx5e_cq_moder rx_cq_profile; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; + struct mlx5e_sq *sq; int err; + int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) @@@ -1093,14 -1187,19 +1187,19 @@@ c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = priv->params.num_tc;
+ if (priv->params.rx_am_enabled) + rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); + else + rx_cq_profile = priv->params.rx_cq_moderation; + mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder); if (err) goto err_napi_del;
@@@ -1109,8 -1208,7 +1208,7 @@@ goto err_close_icosq_cq;
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, - priv->params.rx_cq_moderation_usec, - priv->params.rx_cq_moderation_pkts); + rx_cq_profile); if (err) goto err_close_tx_cqs;
@@@ -1124,6 -1222,16 +1222,16 @@@ if (err) goto err_close_icosq;
+ for (i = 0; i < priv->params.num_tc; i++) { + u32 txq_ix = priv->channeltc_to_txq_map[ix][i]; + + if (priv->tx_rates[txq_ix]) { + sq = priv->txq_to_sq_map[txq_ix]; + mlx5e_set_sq_maxrate(priv->netdev, sq, + priv->tx_rates[txq_ix]); + } + } + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@@ -1195,11 -1303,13 +1303,13 @@@ static void mlx5e_build_rq_param(struc MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); - MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; + + param->am_enabled = priv->params.rx_am_enabled; }
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) @@@ -1218,7 -1328,7 +1328,7 @@@ static void mlx5e_build_sq_param_common void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); - MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); } @@@ -1240,7 -1350,7 +1350,7 @@@ static void mlx5e_build_common_cq_param { void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); + MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index); }
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, @@@ -1265,6 -1375,8 +1375,8 @@@ }
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = priv->params.rx_cq_period_mode; }
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, @@@ -1275,6 -1387,8 +1387,8 @@@ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; }
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, @@@ -1286,6 -1400,8 +1400,8 @@@ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; }
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@@ -1348,11 -1464,6 +1464,11 @@@ static int mlx5e_open_channels(struct m goto err_close_channels; }
+ /* FIXME: This is a W/A for tx timeout watch dog false alarm when + * polling for inactive tx queues. + */ + netif_tx_start_all_queues(priv->netdev); + kfree(cparam); return 0;
@@@ -1372,12 -1483,6 +1488,12 @@@ static void mlx5e_close_channels(struc { int i;
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when + * polling for inactive tx queues. + */ + netif_tx_stop_all_queues(priv->netdev); + netif_tx_disable(priv->netdev); + for (i = 0; i < priv->params.num_channels; i++) mlx5e_close_channel(priv->channel[i]);
@@@ -1432,7 -1537,8 +1548,8 @@@ static void mlx5e_fill_direct_rqt_rqn(s MLX5_SET(rqtc, rqtc, rq_num[0], rqn); }
- static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) + static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, + int ix, struct mlx5e_rqt *rqt) { struct mlx5_core_dev *mdev = priv->mdev; void *rqtc; @@@ -1455,34 -1561,36 +1572,36 @@@ else mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
- err = mlx5_core_create_rqt(mdev, in, inlen, rqtn); + err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); + if (!err) + rqt->enabled = true;
kvfree(in); return err; }
- static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn) + void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) { - mlx5_core_destroy_rqt(priv->mdev, rqtn); + rqt->enabled = false; + mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); }
- static int mlx5e_create_rqts(struct mlx5e_priv *priv) + static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); - u32 *rqtn; + struct mlx5e_rqt *rqt = &priv->indir_rqt; + + return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt); + } + + int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) + { + struct mlx5e_rqt *rqt; int err; int ix;
- /* Indirect RQT */ - rqtn = &priv->indir_rqtn; - err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn); - if (err) - return err; - - /* Direct RQTs */ - for (ix = 0; ix < nch; ix++) { - rqtn = &priv->direct_tir[ix].rqtn; - err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn); + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { + rqt = &priv->direct_tir[ix].rqt; + err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt); if (err) goto err_destroy_rqts; } @@@ -1491,24 -1599,11 +1610,11 @@@
err_destroy_rqts: for (ix--; ix >= 0; ix--) - mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn); - - mlx5e_destroy_rqt(priv, priv->indir_rqtn); + mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
return err; }
- static void mlx5e_destroy_rqts(struct mlx5e_priv *priv) - { - int nch = mlx5e_get_max_num_channels(priv->mdev); - int i; - - for (i = 0; i < nch; i++) - mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn); - - mlx5e_destroy_rqt(priv, priv->indir_rqtn); - } - int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) { struct mlx5_core_dev *mdev = priv->mdev; @@@ -1544,10 -1639,15 +1650,15 @@@ static void mlx5e_redirect_rqts(struct u32 rqtn; int ix;
- rqtn = priv->indir_rqtn; - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + if (priv->indir_rqt.enabled) { + rqtn = priv->indir_rqt.rqtn; + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + } + for (ix = 0; ix < priv->params.num_channels; ix++) { - rqtn = priv->direct_tir[ix].rqtn; + if (!priv->direct_tir[ix].rqt.enabled) + continue; + rqtn = priv->direct_tir[ix].rqt.rqtn; mlx5e_redirect_rqt(priv, rqtn, 1, ix); } } @@@ -1607,13 -1707,13 +1718,13 @@@ static int mlx5e_modify_tirs_lro(struc mlx5e_build_tir_ctx_lro(tirc, priv);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in, + err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); if (err) goto free_in; }
- for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) { + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in, inlen); if (err) @@@ -1626,40 -1726,6 +1737,6 @@@ free_in return err; }
- static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) - { - void *in; - int inlen; - int err; - int i; - - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { - err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in, - inlen); - if (err) - return err; - } - - for (i = 0; i < priv->params.num_channels; i++) { - err = mlx5_core_modify_tir(priv->mdev, - priv->direct_tir[i].tirn, in, - inlen); - if (err) - return err; - } - - kvfree(in); - - return 0; - } - static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) { struct mlx5_core_dev *mdev = priv->mdev; @@@ -1731,6 -1797,7 +1808,7 @@@ static void mlx5e_netdev_set_tcs(struc int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; int num_txqs; int err;
@@@ -1753,7 -1820,7 +1831,7 @@@ goto err_clear_state_opened_flag; }
- err = mlx5e_refresh_tirs_self_loopback_enable(priv); + err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev); if (err) { netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", __func__, err); @@@ -1766,9 -1833,14 +1844,14 @@@ #ifdef CONFIG_RFS_ACCEL priv->netdev->rx_cpu_rmap = priv->mdev->rmap; #endif + if (priv->profile->update_stats) + queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + err = mlx5e_add_sqs_fwd_rules(priv); + if (err) + goto err_close_channels; + } return 0;
err_close_channels: @@@ -1778,7 -1850,7 +1861,7 @@@ err_clear_state_opened_flag return err; }
- static int mlx5e_open(struct net_device *netdev) + int mlx5e_open(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@@ -1793,6 -1865,7 +1876,7 @@@ int mlx5e_close_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev;
/* May already be CLOSED in case a previous configuration operation * (e.g RX/TX queue size change) that involves close&open failed. @@@ -1802,6 -1875,9 +1886,9 @@@
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5e_remove_sqs_fwd_rules(priv); + mlx5e_timestamp_cleanup(priv); netif_carrier_off(priv->netdev); mlx5e_redirect_rqts(priv); @@@ -1810,7 -1886,7 +1897,7 @@@ return 0; }
- static int mlx5e_close(struct net_device *netdev) + int mlx5e_close(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@@ -1869,7 -1945,7 +1956,7 @@@ static int mlx5e_create_drop_cq(struct mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &priv->cq_uar; + mcq->uar = &mdev->mlx5e_res.cq_uar;
cq->priv = priv;
@@@ -1935,7 -2011,7 +2022,7 @@@ static int mlx5e_create_tis(struct mlx5 memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc << 1); - MLX5_SET(tisc, tisc, transport_domain, priv->tdn); + MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); } @@@ -1945,12 -2021,12 +2032,12 @@@ static void mlx5e_destroy_tis(struct ml mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); }
- static int mlx5e_create_tises(struct mlx5e_priv *priv) + int mlx5e_create_tises(struct mlx5e_priv *priv) { int err; int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) { + for (tc = 0; tc < priv->profile->max_tc; tc++) { err = mlx5e_create_tis(priv, tc); if (err) goto err_close_tises; @@@ -1965,11 -2041,11 +2052,11 @@@ err_close_tises return err; }
- static void mlx5e_destroy_tises(struct mlx5e_priv *priv) + void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) + for (tc = 0; tc < priv->profile->max_tc; tc++) mlx5e_destroy_tis(priv, tc); }
@@@ -1978,7 -2054,7 +2065,7 @@@ static void mlx5e_build_indir_tir_ctx(s { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@@ -1995,7 -2071,7 +2082,7 @@@ mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn); + MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); mlx5e_build_tir_ctx_hash(tirc, priv);
switch (tt) { @@@ -2085,7 -2161,7 +2172,7 @@@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, u32 rqtn) { - MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
mlx5e_build_tir_ctx_lro(tirc, priv);
@@@ -2094,15 -2170,13 +2181,13 @@@ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); }
- static int mlx5e_create_tirs(struct mlx5e_priv *priv) + static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); + struct mlx5e_tir *tir; void *tirc; int inlen; - u32 *tirn; int err; u32 *in; - int ix; int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in); @@@ -2110,25 -2184,51 +2195,51 @@@ if (!in) return -ENOMEM;
- /* indirect tirs */ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { memset(in, 0, inlen); - tirn = &priv->indir_tirn[tt]; + tir = &priv->indir_tir[tt]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_indir_tir_ctx(priv, tirc, tt); - err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_tirs; }
- /* direct tirs */ + kvfree(in); + + return 0; + + err_destroy_tirs: + for (tt--; tt >= 0; tt--) + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); + + kvfree(in); + + return err; + } + + int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) + { + int nch = priv->profile->max_nch(priv->mdev); + struct mlx5e_tir *tir; + void *tirc; + int inlen; + int err; + u32 *in; + int ix; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + for (ix = 0; ix < nch; ix++) { memset(in, 0, inlen); - tirn = &priv->direct_tir[ix].tirn; + tir = &priv->direct_tir[ix]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_direct_tir_ctx(priv, tirc, - priv->direct_tir[ix].rqtn); - err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + priv->direct_tir[ix].rqt.rqtn); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_ch_tirs; } @@@ -2139,27 -2239,28 +2250,28 @@@
err_destroy_ch_tirs: for (ix--; ix >= 0; ix--) - mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn); - - err_destroy_tirs: - for (tt--; tt >= 0; tt--) - mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]); + mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
kvfree(in);
return err; }
- static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) + static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); int i;
- for (i = 0; i < nch; i++) - mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn); - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]); + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); + } + + void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) + { + int nch = priv->profile->max_nch(priv->mdev); + int i; + + for (i = 0; i < nch; i++) + mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]); }
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) @@@ -2233,7 -2334,7 +2345,7 @@@ mqprio return mlx5e_setup_tc(dev, tc->tc); }
- static struct rtnl_link_stats64 * + struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@@ -2585,25 -2686,31 +2697,31 @@@ static int mlx5e_get_vf_stats(struct ne }
static void mlx5e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); }
static void mlx5e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); }
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, @@@ -2667,7 -2774,7 +2785,7 @@@ static void mlx5e_tx_timeout(struct net for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) { struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
- if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) + if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) continue; sched_work = true; set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); @@@ -2693,6 -2800,7 +2811,7 @@@ static const struct net_device_ops mlx5 .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@@ -2713,8 -2821,9 +2832,9 @@@ static const struct net_device_ops mlx5 .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, - .ndo_add_vxlan_port = mlx5e_add_vxlan_port, - .ndo_del_vxlan_port = mlx5e_del_vxlan_port, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, @@@ -2844,13 -2953,31 +2964,31 @@@ static bool cqe_compress_heuristic(u32 (pci_bw < 40000) && (pci_bw < link_speed)); }
- static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, - struct net_device *netdev, - int num_channels) + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) + { + params->rx_cq_period_mode = cq_period_mode; + + params->rx_cq_moderation.pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; + } + + static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); u32 link_speed = 0; u32 pci_bw = 0; + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@@ -2896,13 -3023,13 +3034,13 @@@
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); - priv->params.rx_cq_moderation_usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - priv->params.rx_cq_moderation_pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - priv->params.tx_cq_moderation_usec = + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - priv->params.tx_cq_moderation_pkts = + priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.num_tc = 1; @@@ -2912,14 -3039,20 +3050,20 @@@ sizeof(priv->params.toeplitz_hash_key));
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, num_channels); + MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ /* Initialize pflags */ + MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + priv->mdev = mdev; priv->netdev = netdev; - priv->params.num_channels = num_channels; + priv->params.num_channels = profile->max_nch(mdev); + priv->profile = profile; + priv->ppriv = ppriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_ets_init(priv); @@@ -2945,7 -3078,11 +3089,11 @@@ static void mlx5e_set_netdev_dev_addr(s } }
- static void mlx5e_build_netdev(struct net_device *netdev) + static const struct switchdev_ops mlx5e_switchdev_ops = { + .switchdev_port_attr_get = mlx5e_attr_get, + }; + + static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@@ -3026,31 -3163,11 +3174,11 @@@ netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev); - } - - static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, - struct mlx5_core_mkey *mkey) - { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_create_mkey_mbox_in *in; - int err; - - in = mlx5_vzalloc(sizeof(*in)); - if (!in) - return -ENOMEM; - - in->seg.flags = MLX5_PERM_LOCAL_WRITE | - MLX5_PERM_LOCAL_READ | - MLX5_ACCESS_MODE_PA; - in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - - err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, - NULL);
- kvfree(in); - - return err; + #ifdef CONFIG_NET_SWITCHDEV + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + netdev->switchdev_ops = &mlx5e_switchdev_ops; + #endif }
static void mlx5e_create_q_counter(struct mlx5e_priv *priv) @@@ -3080,7 -3197,7 +3208,7 @@@ static int mlx5e_create_umr_mkey(struc struct mlx5_mkey_seg *mkc; int inlen = sizeof(*in); u64 npages = - mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; + priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; int err;
in = mlx5_vzalloc(inlen); @@@ -3095,7 -3212,7 +3223,7 @@@ MLX5_ACCESS_MODE_MTT;
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - mkc->flags_pd = cpu_to_be32(priv->pdn); + mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); mkc->len = cpu_to_be64(npages << PAGE_SHIFT); mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); mkc->log2_page_size = PAGE_SHIFT; @@@ -3108,160 -3225,233 +3236,233 @@@ return err; }
- static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) + static void mlx5e_nic_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { - struct net_device *netdev; - struct mlx5e_priv *priv; - int nch = mlx5e_get_max_num_channels(mdev); - int err; - - if (mlx5e_check_required_hca_cap(mdev)) - return NULL; + struct mlx5e_priv *priv = netdev_priv(netdev);
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), - nch * MLX5E_MAX_NUM_TC, - nch); - if (!netdev) { - mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); - return NULL; - } + mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); + mlx5e_build_nic_netdev(netdev); + mlx5e_vxlan_init(priv); + }
- mlx5e_build_netdev_priv(mdev, netdev, nch); - mlx5e_build_netdev(netdev); + static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) + { + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch;
- netif_carrier_off(netdev); + mlx5e_vxlan_cleanup(priv);
- priv = netdev_priv(netdev); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5_eswitch_unregister_vport_rep(esw, 0); + }
- priv->wq = create_singlethread_workqueue("mlx5e"); - if (!priv->wq) - goto err_free_netdev; + static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) + { + struct mlx5_core_dev *mdev = priv->mdev; + int err; + int i;
- err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); + err = mlx5e_create_indirect_rqts(priv); if (err) { - mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); - goto err_destroy_wq; + mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err); + return err; }
- err = mlx5_core_alloc_pd(mdev, &priv->pdn); + err = mlx5e_create_direct_rqts(priv); if (err) { - mlx5_core_err(mdev, "alloc pd failed, %d\n", err); - goto err_unmap_free_uar; + mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); + goto err_destroy_indirect_rqts; }
- err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn); + err = mlx5e_create_indirect_tirs(priv); if (err) { - mlx5_core_err(mdev, "alloc td failed, %d\n", err); - goto err_dealloc_pd; + mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err); + goto err_destroy_direct_rqts; }
- err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey); + err = mlx5e_create_direct_tirs(priv); if (err) { - mlx5_core_err(mdev, "create mkey failed, %d\n", err); - goto err_dealloc_transport_domain; + mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); + goto err_destroy_indirect_tirs; }
- err = mlx5e_create_umr_mkey(priv); + err = mlx5e_create_flow_steering(priv); if (err) { - mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); - goto err_destroy_mkey; + mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); + goto err_destroy_direct_tirs; }
+ err = mlx5e_tc_init(priv); + if (err) + goto err_destroy_flow_steering; + + return 0; + + err_destroy_flow_steering: + mlx5e_destroy_flow_steering(priv); + err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv); + err_destroy_indirect_tirs: + mlx5e_destroy_indirect_tirs(priv); + err_destroy_direct_rqts: + for (i = 0; i < priv->profile->max_nch(mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + err_destroy_indirect_rqts: + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + return err; + } + + static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) + { + int i; + + mlx5e_tc_cleanup(priv); + mlx5e_destroy_flow_steering(priv); + mlx5e_destroy_direct_tirs(priv); + mlx5e_destroy_indirect_tirs(priv); + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + } + + static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) + { + int err; + err = mlx5e_create_tises(priv); if (err) { - mlx5_core_warn(mdev, "create tises failed, %d\n", err); - goto err_destroy_umr_mkey; + mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); + return err; }
- err = mlx5e_open_drop_rq(priv); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_tises; + #ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); + #endif + return 0; + } + + static void mlx5e_nic_enable(struct mlx5e_priv *priv) + { + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_eswitch_rep rep; + + if (mlx5e_vxlan_allowed(mdev)) { + rtnl_lock(); + udp_tunnel_get_rx_info(netdev); + rtnl_unlock(); }
- err = mlx5e_create_rqts(priv); - if (err) { - mlx5_core_warn(mdev, "create rqts failed, %d\n", err); - goto err_close_drop_rq; + mlx5e_enable_async_events(priv); + queue_work(priv->wq, &priv->set_rx_mode_work); + + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + rep.load = mlx5e_nic_rep_load; + rep.unload = mlx5e_nic_rep_unload; + rep.vport = 0; + rep.priv_data = priv; + mlx5_eswitch_register_vport_rep(esw, &rep); } + }
- err = mlx5e_create_tirs(priv); - if (err) { - mlx5_core_warn(mdev, "create tirs failed, %d\n", err); - goto err_destroy_rqts; + static void mlx5e_nic_disable(struct mlx5e_priv *priv) + { + queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_disable_async_events(priv); + } + + static const struct mlx5e_profile mlx5e_nic_profile = { + .init = mlx5e_nic_init, + .cleanup = mlx5e_nic_cleanup, + .init_rx = mlx5e_init_nic_rx, + .cleanup_rx = mlx5e_cleanup_nic_rx, + .init_tx = mlx5e_init_nic_tx, + .cleanup_tx = mlx5e_cleanup_nic_tx, + .enable = mlx5e_nic_enable, + .disable = mlx5e_nic_disable, + .update_stats = mlx5e_update_stats, + .max_nch = mlx5e_get_max_num_channels, + .max_tc = MLX5E_MAX_NUM_TC, + }; + + void *mlx5e_create_netdev(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile, void *ppriv) + { + struct net_device *netdev; + struct mlx5e_priv *priv; + int nch = profile->max_nch(mdev); + int err; + + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), + nch * profile->max_tc, + nch); + if (!netdev) { + mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); + return NULL; }
- err = mlx5e_create_flow_steering(priv); + profile->init(mdev, netdev, profile, ppriv); + + netif_carrier_off(netdev); + + priv = netdev_priv(netdev); + + priv->wq = create_singlethread_workqueue("mlx5e"); + if (!priv->wq) + goto err_free_netdev; + + err = mlx5e_create_umr_mkey(priv); if (err) { - mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); - goto err_destroy_tirs; + mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); + goto err_destroy_wq; }
- mlx5e_create_q_counter(priv); - - mlx5e_init_l2_addr(priv); + err = profile->init_tx(priv); + if (err) + goto err_destroy_umr_mkey;
- mlx5e_vxlan_init(priv); + err = mlx5e_open_drop_rq(priv); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_cleanup_tx; + }
- err = mlx5e_tc_init(priv); + err = profile->init_rx(priv); if (err) - goto err_dealloc_q_counters; + goto err_close_drop_rq;
- #ifdef CONFIG_MLX5_CORE_EN_DCB - mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); - #endif + mlx5e_create_q_counter(priv); + + mlx5e_init_l2_addr(priv);
err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_tc_cleanup; - } - - if (mlx5e_vxlan_allowed(mdev)) { - rtnl_lock(); - vxlan_get_rx_port(netdev); - rtnl_unlock(); + goto err_dealloc_q_counters; }
- mlx5e_enable_async_events(priv); - queue_work(priv->wq, &priv->set_rx_mode_work); + if (profile->enable) + profile->enable(priv);
return priv;
- err_tc_cleanup: - mlx5e_tc_cleanup(priv); - err_dealloc_q_counters: mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_steering(priv); - - err_destroy_tirs: - mlx5e_destroy_tirs(priv); - - err_destroy_rqts: - mlx5e_destroy_rqts(priv); + profile->cleanup_rx(priv);
err_close_drop_rq: mlx5e_close_drop_rq(priv);
- err_destroy_tises: - mlx5e_destroy_tises(priv); + err_cleanup_tx: + profile->cleanup_tx(priv);
err_destroy_umr_mkey: mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
- err_destroy_mkey: - mlx5_core_destroy_mkey(mdev, &priv->mkey); - - err_dealloc_transport_domain: - mlx5_core_dealloc_transport_domain(mdev, priv->tdn); - - err_dealloc_pd: - mlx5_core_dealloc_pd(mdev, priv->pdn); - - err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &priv->cq_uar); - err_destroy_wq: destroy_workqueue(priv->wq);
@@@ -3271,15 -3461,59 +3472,59 @@@ err_free_netdev return NULL; }
- static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) + static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) { - struct mlx5e_priv *priv = vpriv; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + int vport; + + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return; + + for (vport = 1; vport < total_vfs; vport++) { + struct mlx5_eswitch_rep rep; + + rep.load = mlx5e_vport_rep_load; + rep.unload = mlx5e_vport_rep_unload; + rep.vport = vport; + mlx5_eswitch_register_vport_rep(esw, &rep); + } + } + + static void *mlx5e_add(struct mlx5_core_dev *mdev) + { + struct mlx5_eswitch *esw = mdev->priv.eswitch; + void *ppriv = NULL; + void *ret; + + if (mlx5e_check_required_hca_cap(mdev)) + return NULL; + + if (mlx5e_create_mdev_resources(mdev)) + return NULL; + + mlx5e_register_vport_rep(mdev); + + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + ppriv = &esw->offloads.vport_reps[0]; + + ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv); + if (!ret) { + mlx5e_destroy_mdev_resources(mdev); + return NULL; + } + return ret; + } + + void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) + { + const struct mlx5e_profile *profile = priv->profile; struct net_device *netdev = priv->netdev;
set_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (profile->disable) + profile->disable(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work); - mlx5e_disable_async_events(priv); flush_workqueue(priv->wq); if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { netif_device_detach(netdev); @@@ -3288,26 -3522,35 +3533,35 @@@ unregister_netdev(netdev); }
- mlx5e_tc_cleanup(priv); - mlx5e_vxlan_cleanup(priv); mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_steering(priv); - mlx5e_destroy_tirs(priv); - mlx5e_destroy_rqts(priv); + profile->cleanup_rx(priv); mlx5e_close_drop_rq(priv); - mlx5e_destroy_tises(priv); + profile->cleanup_tx(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); - mlx5_core_destroy_mkey(priv->mdev, &priv->mkey); - mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); - mlx5_core_dealloc_pd(priv->mdev, priv->pdn); - mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); cancel_delayed_work_sync(&priv->update_stats_work); destroy_workqueue(priv->wq); + if (profile->cleanup) + profile->cleanup(priv);
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) free_netdev(netdev); }
+ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) + { + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + struct mlx5e_priv *priv = vpriv; + int vport; + + mlx5e_destroy_netdev(mdev, priv); + + for (vport = 1; vport < total_vfs; vport++) + mlx5_eswitch_unregister_vport_rep(esw, vport); + + mlx5e_destroy_mdev_resources(mdev); + } + static void *mlx5e_get_netdev(void *vpriv) { struct mlx5e_priv *priv = vpriv; @@@ -3316,8 -3559,8 +3570,8 @@@ }
static struct mlx5_interface mlx5e_interface = { - .add = mlx5e_create_netdev, - .remove = mlx5e_destroy_netdev, + .add = mlx5e_add, + .remove = mlx5e_remove, .event = mlx5e_async_event, .protocol = MLX5_INTERFACE_PROTOCOL_ETH, .get_dev = mlx5e_get_netdev, @@@ -3325,6 -3568,7 +3579,7 @@@
void mlx5e_init(void) { + mlx5e_build_ptys2ethtool_map(); mlx5_register_interface(&mlx5e_interface); }
diff --combined drivers/net/ppp/ppp_generic.c index a30ee42,17953ab..f226db4 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@@ -1312,10 -1312,9 +1312,9 @@@ ppp_get_stats64(struct net_device *dev return stats64; }
- static struct lock_class_key ppp_tx_busylock; static int ppp_dev_init(struct net_device *dev) { - dev->qdisc_tx_busylock = &ppp_tx_busylock; + netdev_lockdep_set_classes(dev); return 0; }
@@@ -2601,6 -2600,8 +2600,6 @@@ ppp_unregister_channel(struct ppp_chann spin_lock_bh(&pn->all_channels_lock); list_del(&pch->list); spin_unlock_bh(&pn->all_channels_lock); - put_net(pch->chan_net); - pch->chan_net = NULL;
pch->file.dead = 1; wake_up_interruptible(&pch->file.rwait); @@@ -3134,9 -3135,6 +3133,9 @@@ ppp_disconnect_channel(struct channel * */ static void ppp_destroy_channel(struct channel *pch) { + put_net(pch->chan_net); + pch->chan_net = NULL; + atomic_dec(&channel_count);
if (!pch->file.dead) { diff --combined drivers/net/usb/r8152.c index 63f4018,168a8e2..669f625 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -26,7 -26,6 +26,7 @@@ #include <linux/mdio.h> #include <linux/usb/cdc.h> #include <linux/suspend.h> +#include <linux/acpi.h>
/* Information for net-next */ #define NETNEXT_VERSION "08" @@@ -461,11 -460,6 +461,11 @@@ /* SRAM_IMPEDANCE */ #define RX_DRIVING_MASK 0x6000
+/* MAC PASSTHRU */ +#define AD_MASK 0xfee0 +#define EFUSE 0xcfdb +#define PASS_THRU_MASK 0x1 + enum rtl_register_content { _1000bps = 0x10, _100bps = 0x08, @@@ -613,7 -607,7 +613,7 @@@ struct r8152 struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; - struct delayed_work schedule; + struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ #ifdef CONFIG_PM_SLEEP @@@ -630,6 -624,7 +630,7 @@@ int (*eee_get)(struct r8152 *, struct ethtool_eee *); int (*eee_set)(struct r8152 *, struct ethtool_eee *); bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); void (*autosuspend_en)(struct r8152 *tp, bool enable); } rtl_ops;
@@@ -639,8 -634,11 +640,11 @@@ u32 tx_qlen; u32 coalesce; u16 ocp_base; + u16 speed; u8 *intr_buff; u8 version; + u8 duplex; + u8 autoneg; };
enum rtl_version { @@@ -1042,65 -1040,6 +1046,65 @@@ out1 return ret; }
+/* Devices containing RTL8153-AD can support a persistent + * host system provided MAC address. + * Examples of this are Dell TB15 and Dell WD15 docks + */ +static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) +{ + acpi_status status; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj; + int ret = -EINVAL; + u32 ocp_data; + unsigned char buf[6]; + + /* test for -AD variant of RTL8153 */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + if ((ocp_data & AD_MASK) != 0x1000) + return -ENODEV; + + /* test for MAC address pass-through bit */ + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE); + if ((ocp_data & PASS_THRU_MASK) != 1) + return -ENODEV; + + /* returns _AUXMAC_#AABBCCDDEEFF# */ + status = acpi_evaluate_object(NULL, "\_SB.AMAC", NULL, &buffer); + obj = (union acpi_object *)buffer.pointer; + if (!ACPI_SUCCESS(status)) + return -ENODEV; + if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) { + netif_warn(tp, probe, tp->netdev, + "Invalid buffer when reading pass-thru MAC addr: " + "(%d, %d)\n", + obj->type, obj->string.length); + goto amacout; + } + if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 || + strncmp(obj->string.pointer + 0x15, "#", 1) != 0) { + netif_warn(tp, probe, tp->netdev, + "Invalid header when reading pass-thru MAC addr\n"); + goto amacout; + } + ret = hex2bin(buf, obj->string.pointer + 9, 6); + if (!(ret == 0 && is_valid_ether_addr(buf))) { + netif_warn(tp, probe, tp->netdev, + "Invalid MAC when reading pass-thru MAC addr: " + "%d, %pM\n", ret, buf); + ret = -EINVAL; + goto amacout; + } + memcpy(sa->sa_data, buf, 6); + ether_addr_copy(tp->netdev->dev_addr, sa->sa_data); + netif_info(tp, probe, tp->netdev, + "Using pass-thru MAC addr %pM\n", sa->sa_data); + +amacout: + kfree(obj); + return ret; +} + static int set_ethernet_addr(struct r8152 *tp) { struct net_device *dev = tp->netdev; @@@ -1109,15 -1048,8 +1113,15 @@@
if (tp->version == RTL_VER_01) ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data); - else - ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); + else { + /* if this is not an RTL8153-AD, no eFuse mac pass thru set, + * or system doesn't provide valid _SB.AMAC this will be + * be expected to non-zero + */ + ret = vendor_mac_passthru_addr_read(tp, &sa); + if (ret < 0) + ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data); + }
if (ret < 0) { netif_err(tp, probe, dev, "Get ether addr fail\n"); @@@ -1820,7 -1752,7 +1824,7 @@@ static int rx_bottom(struct r8152 *tp, pkt_len -= CRC_SIZE; rx_data += sizeof(struct rx_desc);
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len); + skb = napi_alloc_skb(&tp->napi, pkt_len); if (!skb) { stats->rx_dropped++; goto find_next_rx; @@@ -2368,6 -2300,10 +2372,6 @@@ static u32 __rtl_get_wol(struct r8152 * u32 ocp_data; u32 wolopts = 0;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5); - if (!(ocp_data & LAN_WAKE_EN)) - return 0; - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34); if (ocp_data & LINK_ON_WAKE_EN) wolopts |= WAKE_PHY; @@@ -2400,13 -2336,15 +2404,13 @@@ static void __rtl_set_wol(struct r8152 ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5); - ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN); + ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN); if (wolopts & WAKE_UCAST) ocp_data |= UWF_EN; if (wolopts & WAKE_BCAST) ocp_data |= BWF_EN; if (wolopts & WAKE_MCAST) ocp_data |= MWF_EN; - if (wolopts & WAKE_ANY) - ocp_data |= LAN_WAKE_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); @@@ -2512,27 -2450,6 +2516,6 @@@ static void rtl8153_runtime_enable(stru } }
- static void rtl_phy_reset(struct r8152 *tp) - { - u16 data; - int i; - - data = r8152_mdio_read(tp, MII_BMCR); - - /* don't reset again before the previous one complete */ - if (data & BMCR_RESET) - return; - - data |= BMCR_RESET; - r8152_mdio_write(tp, MII_BMCR, data); - - for (i = 0; i < 50; i++) { - msleep(20); - if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) - break; - } - } - static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; @@@ -2600,8 -2517,6 +2583,6 @@@ static void r8152b_exit_oob(struct r815
rxdy_gated_en(tp, true); r8153_teredo_off(tp); - r8152b_hw_phy_cfg(tp); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@@ -2779,8 -2694,6 +2760,6 @@@ static void r8153_first_init(struct r81 ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- r8153_hw_phy_cfg(tp); - rtl8152_nic_reset(tp); rtl_reset_bmu(tp);
@@@ -2916,7 -2829,6 +2895,6 @@@ static int rtl8152_set_speed(struct r81 u16 bmcr, anar, gbcr; int ret = 0;
- cancel_delayed_work_sync(&tp->schedule); anar = r8152_mdio_read(tp, MII_ADVERTISE); anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); @@@ -2976,7 -2888,7 +2954,7 @@@ bmcr = BMCR_ANENABLE | BMCR_ANRESTART; }
- if (test_bit(PHY_RESET, &tp->flags)) + if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET;
if (tp->mii.supports_gmii) @@@ -2985,7 -2897,7 +2963,7 @@@ r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr);
- if (test_and_clear_bit(PHY_RESET, &tp->flags)) { + if (bmcr & BMCR_RESET) { int i;
for (i = 0; i < 50; i++) { @@@ -3135,15 -3047,33 +3113,33 @@@ static void rtl_work_func_t(struct work netif_carrier_ok(tp->netdev)) napi_schedule(&tp->napi);
- if (test_and_clear_bit(PHY_RESET, &tp->flags)) - rtl_phy_reset(tp); - mutex_unlock(&tp->control);
out1: usb_autopm_put_interface(tp->intf); }
+ static void rtl_hw_phy_work_func_t(struct work_struct *work) + { + struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + if (usb_autopm_get_interface(tp->intf) < 0) + return; + + mutex_lock(&tp->control); + + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + } + #ifdef CONFIG_PM_SLEEP static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) @@@ -3180,8 -3110,6 +3176,6 @@@ static int rtl8152_open(struct net_devi if (res) goto out;
- netif_carrier_off(netdev); - res = usb_autopm_get_interface(tp->intf); if (res < 0) { free_all_mem(tp); @@@ -3192,9 -3120,6 +3186,6 @@@
tp->rtl_ops.up(tp);
- rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); @@@ -3618,6 -3543,7 +3609,7 @@@ static int rtl8152_resume(struct usb_in
if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); netif_device_attach(tp->netdev); }
@@@ -3632,10 -3558,6 +3624,6 @@@ napi_enable(&tp->napi); } else { tp->rtl_ops.up(tp); - rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? - SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(tp->netdev); set_bit(WORK_ENABLE, &tp->flags); } @@@ -3765,6 -3687,11 +3753,11 @@@ static int rtl8152_set_settings(struct mutex_lock(&tp->control);
ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); + if (!ret) { + tp->autoneg = cmd->autoneg; + tp->speed = cmd->speed; + tp->duplex = cmd->duplex; + }
mutex_unlock(&tp->control);
@@@ -4222,6 -4149,7 +4215,7 @@@ static int rtl_ops_init(struct r8152 *t ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; + ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; break;
@@@ -4238,6 -4166,7 +4232,7 @@@ ops->eee_get = r8153_get_eee; ops->eee_set = r8153_set_eee; ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; break;
@@@ -4285,6 -4214,7 +4280,7 @@@ static int rtl8152_probe(struct usb_int
mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); + INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; @@@ -4324,9 -4254,14 +4320,14 @@@ break; }
+ tp->autoneg = AUTONEG_ENABLE; + tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; + tp->duplex = DUPLEX_FULL; + intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp);
usb_set_intfdata(intf, tp); @@@ -4372,6 -4307,7 +4373,7 @@@ static void rtl8152_disconnect(struct u
netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); + cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); free_netdev(tp->netdev); } diff --combined include/linux/acpi.h index 08235a6,4d4bb49..94e5665 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@@ -208,6 -208,7 +208,6 @@@ void acpi_boot_table_init (void) int acpi_mps_check (void); int acpi_numa_init (void);
-void early_acpi_table_init(void *data, size_t size); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init acpi_parse_entries(char *id, unsigned long table_size, @@@ -231,26 -232,12 +231,26 @@@ int acpi_table_parse_madt(enum acpi_mad int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
-/* the following four functions are architecture-dependent */ +/* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); + +#if defined(CONFIG_X86) || defined(CONFIG_IA64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +#else +static inline void +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } +#endif + void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + +#ifdef CONFIG_ARM64 +void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +#else +static inline void +acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +#endif + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); -void acpi_numa_arch_fixup(void);
#ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; @@@ -545,24 -532,6 +545,24 @@@ void acpi_walk_dep_device_list(acpi_han struct platform_device *acpi_create_platform_device(struct acpi_device *); #define ACPI_PTR(_ptr) (_ptr)
+static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = true; +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ + adev->flags.visited = false; +} + +enum acpi_reconfig_event { + ACPI_RECONFIG_DEVICE_ADD = 0, + ACPI_RECONFIG_DEVICE_REMOVE, +}; + +int acpi_reconfig_notifier_register(struct notifier_block *nb); +int acpi_reconfig_notifier_unregister(struct notifier_block *nb); + #else /* !CONFIG_ACPI */
#define acpi_disabled 1 @@@ -574,6 -543,11 +574,11 @@@
struct fwnode_handle;
+ static inline bool acpi_dev_found(const char *hid) + { + return false; + } + static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; @@@ -619,6 -593,7 +624,6 @@@ static inline const char *acpi_dev_name return NULL; }
-static inline void early_acpi_table_init(void *data, size_t size) { } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { }
@@@ -684,6 -659,14 +689,14 @@@ static inline bool acpi_driver_match_de return false; }
+ static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, + const u8 *uuid, + int rev, int func, + union acpi_object *argv4) + { + return NULL; + } + static inline int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) { @@@ -708,24 -691,6 +721,24 @@@ static inline enum dev_dma_attr acpi_ge
#define ACPI_PTR(_ptr) (NULL)
+static inline void acpi_device_set_enumerated(struct acpi_device *adev) +{ +} + +static inline void acpi_device_clear_enumerated(struct acpi_device *adev) +{ +} + +static inline int acpi_reconfig_notifier_register(struct notifier_block *nb) +{ + return -EINVAL; +} + +static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb) +{ + return -EINVAL; +} + #endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI @@@ -1045,10 -1010,4 +1058,10 @@@ static inline struct fwnode_handle *acp #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif
+#ifdef CONFIG_ACPI_TABLE_UPGRADE +void acpi_table_upgrade(void); +#else +static inline void acpi_table_upgrade(void) { } +#endif + #endif /*_LINUX_ACPI_H*/ diff --combined include/net/netfilter/nf_conntrack.h index b6083c3,5d3397f..092ca19 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@@ -85,6 -85,9 +85,9 @@@ struct nf_conn spinlock_t lock; u16 cpu;
+ #ifdef CONFIG_NF_CONNTRACK_ZONES + struct nf_conntrack_zone zone; + #endif /* XXX should I move this to the tail ? - Y.K */ /* These are my tuples; original and reply */ struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; @@@ -284,17 -287,10 +287,18 @@@ static inline bool nf_is_loopback_packe return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK; }
+/* jiffies until ct expires, 0 if already expired */ +static inline unsigned long nf_ct_expires(const struct nf_conn *ct) +{ + long timeout = (long)ct->timeout.expires - (long)jiffies; + + return timeout > 0 ? timeout : 0; +} + struct kernel_param;
int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); + int nf_conntrack_hash_resize(unsigned int hashsize); extern unsigned int nf_conntrack_htable_size; extern unsigned int nf_conntrack_max;
diff --combined kernel/events/core.c index 43d43a2d,9c51ec3..61cab45 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@@ -1678,33 -1678,12 +1678,33 @@@ static bool is_orphaned_event(struct pe return event->state == PERF_EVENT_STATE_DEAD; }
-static inline int pmu_filter_match(struct perf_event *event) +static inline int __pmu_filter_match(struct perf_event *event) { struct pmu *pmu = event->pmu; return pmu->filter_match ? pmu->filter_match(event) : 1; }
+/* + * Check whether we should attempt to schedule an event group based on + * PMU-specific filtering. An event group can consist of HW and SW events, + * potentially with a SW leader, so we must check all the filters, to + * determine whether a group is schedulable: + */ +static inline int pmu_filter_match(struct perf_event *event) +{ + struct perf_event *child; + + if (!__pmu_filter_match(event)) + return 0; + + list_for_each_entry(child, &event->sibling_list, group_entry) { + if (!__pmu_filter_match(child)) + return 0; + } + + return 1; +} + static inline int event_filter_match(struct perf_event *event) { @@@ -7550,7 -7529,7 +7550,7 @@@ static void perf_event_free_bpf_prog(st prog = event->tp_event->prog; if (prog) { event->tp_event->prog = NULL; - bpf_prog_put_rcu(prog); + bpf_prog_put(prog); } }
diff --combined net/batman-adv/bridge_loop_avoidance.c index 825a5cd,e4f7494..ad2ffe1 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@@ -48,6 -48,7 +48,7 @@@
#include "hard-interface.h" #include "hash.h" + #include "log.h" #include "originator.h" #include "packet.h" #include "sysfs.h" @@@ -177,21 -178,10 +178,21 @@@ static void batadv_backbone_gw_put(stru static void batadv_claim_release(struct kref *ref) { struct batadv_bla_claim *claim; + struct batadv_bla_backbone_gw *old_backbone_gw;
claim = container_of(ref, struct batadv_bla_claim, refcount);
- batadv_backbone_gw_put(claim->backbone_gw); + spin_lock_bh(&claim->backbone_lock); + old_backbone_gw = claim->backbone_gw; + claim->backbone_gw = NULL; + spin_unlock_bh(&claim->backbone_lock); + + spin_lock_bh(&old_backbone_gw->crc_lock); + old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + spin_unlock_bh(&old_backbone_gw->crc_lock); + + batadv_backbone_gw_put(old_backbone_gw); + kfree_rcu(claim, rcu); }
@@@ -429,12 -419,9 +430,12 @@@ static void batadv_bla_send_claim(struc break; }
- if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb) + goto out; + }
skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, soft_iface); @@@ -688,10 -675,8 +689,10 @@@ static void batadv_bla_add_claim(struc const u8 *mac, const unsigned short vid, struct batadv_bla_backbone_gw *backbone_gw) { + struct batadv_bla_backbone_gw *old_backbone_gw; struct batadv_bla_claim *claim; struct batadv_bla_claim search_claim; + bool remove_crc = false; int hash_added;
ether_addr_copy(search_claim.addr, mac); @@@ -705,10 -690,8 +706,10 @@@ return;
ether_addr_copy(claim->addr, mac); + spin_lock_init(&claim->backbone_lock); claim->vid = vid; claim->lasttime = jiffies; + kref_get(&backbone_gw->refcount); claim->backbone_gw = backbone_gw;
kref_init(&claim->refcount); @@@ -736,26 -719,15 +737,26 @@@ "bla_add_claim(): changing ownership for %pM, vid %d\n", mac, BATADV_PRINT_VID(vid));
- spin_lock_bh(&claim->backbone_gw->crc_lock); - claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); - spin_unlock_bh(&claim->backbone_gw->crc_lock); - batadv_backbone_gw_put(claim->backbone_gw); + remove_crc = true; } - /* set (new) backbone gw */ + + /* replace backbone_gw atomically and adjust reference counters */ + spin_lock_bh(&claim->backbone_lock); + old_backbone_gw = claim->backbone_gw; kref_get(&backbone_gw->refcount); claim->backbone_gw = backbone_gw; + spin_unlock_bh(&claim->backbone_lock); + + if (remove_crc) { + /* remove claim address from old backbone_gw */ + spin_lock_bh(&old_backbone_gw->crc_lock); + old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); + spin_unlock_bh(&old_backbone_gw->crc_lock); + }
+ batadv_backbone_gw_put(old_backbone_gw); + + /* add claim address to new backbone_gw */ spin_lock_bh(&backbone_gw->crc_lock); backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); spin_unlock_bh(&backbone_gw->crc_lock); @@@ -766,26 -738,6 +767,26 @@@ claim_free_ref }
/** + * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of + * claim + * @claim: claim whose backbone_gw should be returned + * + * Return: valid reference to claim::backbone_gw + */ +static struct batadv_bla_backbone_gw * +batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim) +{ + struct batadv_bla_backbone_gw *backbone_gw; + + spin_lock_bh(&claim->backbone_lock); + backbone_gw = claim->backbone_gw; + kref_get(&backbone_gw->refcount); + spin_unlock_bh(&claim->backbone_lock); + + return backbone_gw; +} + +/** * batadv_bla_del_claim - delete a claim from the claim hash * @bat_priv: the bat priv with all the soft interface information * @mac: mac address of the claim to be removed @@@ -809,6 -761,10 +810,6 @@@ static void batadv_bla_del_claim(struc batadv_choose_claim, claim); batadv_claim_put(claim); /* reference from the hash is gone */
- spin_lock_bh(&claim->backbone_gw->crc_lock); - claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); - spin_unlock_bh(&claim->backbone_gw->crc_lock); - /* don't need the reference from hash_find() anymore */ batadv_claim_put(claim); } @@@ -1261,7 -1217,6 +1262,7 @@@ static void batadv_bla_purge_claims(str struct batadv_hard_iface *primary_if, int now) { + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_bla_claim *claim; struct hlist_head *head; struct batadv_hashtable *hash; @@@ -1276,17 -1231,14 +1277,17 @@@
rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); if (now) goto purge_now; - if (!batadv_compare_eth(claim->backbone_gw->orig, + + if (!batadv_compare_eth(backbone_gw->orig, primary_if->net_dev->dev_addr)) - continue; + goto skip; + if (!batadv_has_timed_out(claim->lasttime, BATADV_BLA_CLAIM_TIMEOUT)) - continue; + goto skip;
batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_purge_claims(): %pM, vid %d, time out\n", @@@ -1294,10 -1246,8 +1295,10 @@@
purge_now: batadv_handle_unclaim(bat_priv, primary_if, - claim->backbone_gw->orig, + backbone_gw->orig, claim->addr, claim->vid); +skip: + batadv_backbone_gw_put(backbone_gw); } rcu_read_unlock(); } @@@ -1808,11 -1758,9 +1809,11 @@@ batadv_bla_loopdetect_check(struct bata bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, bool is_bcast) { + struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; + bool own_claim; bool ret;
ethhdr = eth_hdr(skb); @@@ -1847,12 -1795,8 +1848,12 @@@ }
/* if it is our own claim ... */ - if (batadv_compare_eth(claim->backbone_gw->orig, - primary_if->net_dev->dev_addr)) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + own_claim = batadv_compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr); + batadv_backbone_gw_put(backbone_gw); + + if (own_claim) { /* ... allow it in any case */ claim->lasttime = jiffies; goto allow; @@@ -1916,9 -1860,7 +1917,9 @@@ bool batadv_bla_tx(struct batadv_priv * { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_hard_iface *primary_if; + bool client_roamed; bool ret = false;
primary_if = batadv_primary_if_get_selected(bat_priv); @@@ -1948,12 -1890,8 +1949,12 @@@ goto allow;
/* check if we are responsible. */ - if (batadv_compare_eth(claim->backbone_gw->orig, - primary_if->net_dev->dev_addr)) { + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + client_roamed = batadv_compare_eth(backbone_gw->orig, + primary_if->net_dev->dev_addr); + batadv_backbone_gw_put(backbone_gw); + + if (client_roamed) { /* if yes, the client has roamed and we have * to unclaim it. */ @@@ -2001,7 -1939,6 +2002,7 @@@ int batadv_bla_claim_table_seq_print_te struct net_device *net_dev = (struct net_device *)seq->private; struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_hashtable *hash = bat_priv->bla.claim_hash; + struct batadv_bla_backbone_gw *backbone_gw; struct batadv_bla_claim *claim; struct batadv_hard_iface *primary_if; struct hlist_head *head; @@@ -2026,21 -1963,17 +2027,21 @@@
rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { - is_own = batadv_compare_eth(claim->backbone_gw->orig, + backbone_gw = batadv_bla_claim_get_backbone_gw(claim); + + is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
- spin_lock_bh(&claim->backbone_gw->crc_lock); - backbone_crc = claim->backbone_gw->crc; - spin_unlock_bh(&claim->backbone_gw->crc_lock); + spin_lock_bh(&backbone_gw->crc_lock); + backbone_crc = backbone_gw->crc; + spin_unlock_bh(&backbone_gw->crc_lock); seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n", claim->addr, BATADV_PRINT_VID(claim->vid), - claim->backbone_gw->orig, + backbone_gw->orig, (is_own ? 'x' : ' '), backbone_crc); + + batadv_backbone_gw_put(backbone_gw); } rcu_read_unlock(); } diff --combined net/batman-adv/distributed-arp-table.c index aee3b39,fa76465..b1cc8bf --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@@ -45,9 -45,11 +45,11 @@@
#include "hard-interface.h" #include "hash.h" + #include "log.h" #include "originator.h" #include "send.h" #include "translation-table.h" + #include "tvlv.h"
static void batadv_dat_purge(struct work_struct *work);
@@@ -1009,12 -1011,9 +1011,12 @@@ bool batadv_dat_snoop_outgoing_arp_requ if (!skb_new) goto out;
- if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb_new) + goto out; + }
skb_reset_mac_header(skb_new); skb_new->protocol = eth_type_trans(skb_new, @@@ -1092,12 -1091,9 +1094,12 @@@ bool batadv_dat_snoop_incoming_arp_requ */ skb_reset_mac_header(skb_new);
- if (vid & BATADV_VLAN_HAS_TAG) + if (vid & BATADV_VLAN_HAS_TAG) { skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); + if (!skb_new) + goto out; + }
/* To preserve backwards compatibility, the node has choose the outgoing * format based on the incoming request packet type. The assumption is diff --combined net/batman-adv/originator.c index ab8c4f9,7d1e542..3940b5d --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@@ -34,11 -34,13 +34,13 @@@ #include <linux/spinlock.h> #include <linux/workqueue.h>
+ #include "bat_algo.h" #include "distributed-arp-table.h" #include "fragmentation.h" #include "gateway_client.h" #include "hard-interface.h" #include "hash.h" + #include "log.h" #include "multicast.h" #include "network-coding.h" #include "routing.h" @@@ -251,10 -253,8 +253,8 @@@ static void batadv_neigh_node_release(s struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; - struct batadv_algo_ops *bao;
neigh_node = container_of(ref, struct batadv_neigh_node, refcount); - bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, &neigh_node->ifinfo_list, list) { @@@ -263,9 -263,6 +263,6 @@@
batadv_hardif_neigh_put(neigh_node->hardif_neigh);
- if (bao->bat_neigh_free) - bao->bat_neigh_free(neigh_node); - batadv_hardif_put(neigh_node->if_incoming);
kfree_rcu(neigh_node, rcu); @@@ -537,8 -534,8 +534,8 @@@ batadv_hardif_neigh_create(struct batad
kref_init(&hardif_neigh->refcount);
- if (bat_priv->bat_algo_ops->bat_hardif_neigh_init) - bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh); + if (bat_priv->algo_ops->neigh.hardif_init) + bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
@@@ -602,19 -599,19 +599,19 @@@ batadv_hardif_neigh_get(const struct ba }
/** - * batadv_neigh_node_new - create and init a new neigh_node object + * batadv_neigh_node_create - create a neigh node object * @orig_node: originator object representing the neighbour * @hard_iface: the interface where the neighbour is connected to * @neigh_addr: the mac address of the neighbour interface * * Allocates a new neigh_node object and initialises all the generic fields. * - * Return: neighbor when found. Othwerwise NULL + * Return: the neighbour node if found or created or NULL otherwise. */ - struct batadv_neigh_node * - batadv_neigh_node_new(struct batadv_orig_node *orig_node, - struct batadv_hard_iface *hard_iface, - const u8 *neigh_addr) + static struct batadv_neigh_node * + batadv_neigh_node_create(struct batadv_orig_node *orig_node, + struct batadv_hard_iface *hard_iface, + const u8 *neigh_addr) { struct batadv_neigh_node *neigh_node; struct batadv_hardif_neigh_node *hardif_neigh = NULL; @@@ -667,6 -664,29 +664,29 @@@ out }
/** + * batadv_neigh_node_get_or_create - retrieve or create a neigh node object + * @orig_node: originator object representing the neighbour + * @hard_iface: the interface where the neighbour is connected to + * @neigh_addr: the mac address of the neighbour interface + * + * Return: the neighbour node if found or created or NULL otherwise. + */ + struct batadv_neigh_node * + batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node, + struct batadv_hard_iface *hard_iface, + const u8 *neigh_addr) + { + struct batadv_neigh_node *neigh_node = NULL; + + /* first check without locking to avoid the overhead */ + neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr); + if (neigh_node) + return neigh_node; + + return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr); + } + + /** * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list * @seq: neighbour table seq_file struct * @offset: not used @@@ -686,17 -706,17 +706,17 @@@ int batadv_hardif_neigh_seq_print_text( seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", BATADV_SOURCE_VERSION, primary_if->net_dev->name, primary_if->net_dev->dev_addr, net_dev->name, - bat_priv->bat_algo_ops->name); + bat_priv->algo_ops->name);
batadv_hardif_put(primary_if);
- if (!bat_priv->bat_algo_ops->bat_neigh_print) { + if (!bat_priv->algo_ops->neigh.print) { seq_puts(seq, "No printing function for this routing protocol\n"); return 0; }
- bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq); + bat_priv->algo_ops->neigh.print(bat_priv, seq); return 0; }
@@@ -747,8 -767,8 +767,8 @@@ static void batadv_orig_node_free_rcu(s
batadv_frag_purge_orig(orig_node, NULL);
- if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) - orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); + if (orig_node->bat_priv->algo_ops->orig.free) + orig_node->bat_priv->algo_ops->orig.free(orig_node);
kfree(orig_node->tt_buff); kfree(orig_node); @@@ -765,8 -785,6 +785,8 @@@ static void batadv_orig_node_release(st struct batadv_neigh_node *neigh_node; struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo; + struct batadv_orig_node_vlan *vlan; + struct batadv_orig_ifinfo *last_candidate;
orig_node = container_of(ref, struct batadv_orig_node, refcount);
@@@ -784,21 -802,8 +804,21 @@@ hlist_del_rcu(&orig_ifinfo->list); batadv_orig_ifinfo_put(orig_ifinfo); } + + last_candidate = orig_node->last_bonding_candidate; + orig_node->last_bonding_candidate = NULL; spin_unlock_bh(&orig_node->neigh_list_lock);
+ if (last_candidate) + batadv_orig_ifinfo_put(last_candidate); + + spin_lock_bh(&orig_node->vlan_list_lock); + hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) { + hlist_del_rcu(&vlan->list); + batadv_orig_node_vlan_put(vlan); + } + spin_unlock_bh(&orig_node->vlan_list_lock); + /* Free nc_nodes */ batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
@@@ -1092,12 -1097,12 +1112,12 @@@ batadv_find_best_neighbor(struct batadv struct batadv_hard_iface *if_outgoing) { struct batadv_neigh_node *best = NULL, *neigh; - struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; + struct batadv_algo_ops *bao = bat_priv->algo_ops;
rcu_read_lock(); hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) { - if (best && (bao->bat_neigh_cmp(neigh, if_outgoing, - best, if_outgoing) <= 0)) + if (best && (bao->neigh.cmp(neigh, if_outgoing, best, + if_outgoing) <= 0)) continue;
if (!kref_get_unless_zero(&neigh->refcount)) @@@ -1249,18 -1254,17 +1269,17 @@@ int batadv_orig_seq_print_text(struct s seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n", BATADV_SOURCE_VERSION, primary_if->net_dev->name, primary_if->net_dev->dev_addr, net_dev->name, - bat_priv->bat_algo_ops->name); + bat_priv->algo_ops->name);
batadv_hardif_put(primary_if);
- if (!bat_priv->bat_algo_ops->bat_orig_print) { + if (!bat_priv->algo_ops->orig.print) { seq_puts(seq, "No printing function for this routing protocol\n"); return 0; }
- bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, - BATADV_IF_DEFAULT); + bat_priv->algo_ops->orig.print(bat_priv, seq, BATADV_IF_DEFAULT);
return 0; } @@@ -1287,7 -1291,7 +1306,7 @@@ int batadv_orig_hardif_seq_print_text(s }
bat_priv = netdev_priv(hard_iface->soft_iface); - if (!bat_priv->bat_algo_ops->bat_orig_print) { + if (!bat_priv->algo_ops->orig.print) { seq_puts(seq, "No printing function for this routing protocol\n"); goto out; @@@ -1301,9 -1305,9 +1320,9 @@@ seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n", BATADV_SOURCE_VERSION, hard_iface->net_dev->name, hard_iface->net_dev->dev_addr, - hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name); + hard_iface->soft_iface->name, bat_priv->algo_ops->name);
- bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface); + bat_priv->algo_ops->orig.print(bat_priv, seq, hard_iface);
out: if (hard_iface) @@@ -1315,7 -1319,7 +1334,7 @@@ int batadv_orig_hash_add_if(struct bata int max_if_num) { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; + struct batadv_algo_ops *bao = bat_priv->algo_ops; struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node; @@@ -1331,9 -1335,8 +1350,8 @@@ rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { ret = 0; - if (bao->bat_orig_add_if) - ret = bao->bat_orig_add_if(orig_node, - max_if_num); + if (bao->orig.add_if) + ret = bao->orig.add_if(orig_node, max_if_num); if (ret == -ENOMEM) goto err; } @@@ -1355,7 -1358,7 +1373,7 @@@ int batadv_orig_hash_del_if(struct bata struct hlist_head *head; struct batadv_hard_iface *hard_iface_tmp; struct batadv_orig_node *orig_node; - struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; + struct batadv_algo_ops *bao = bat_priv->algo_ops; u32 i; int ret;
@@@ -1368,10 -1371,9 +1386,9 @@@ rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, head, hash_entry) { ret = 0; - if (bao->bat_orig_del_if) - ret = bao->bat_orig_del_if(orig_node, - max_if_num, - hard_iface->if_num); + if (bao->orig.del_if) + ret = bao->orig.del_if(orig_node, max_if_num, + hard_iface->if_num); if (ret == -ENOMEM) goto err; } diff --combined net/batman-adv/routing.c index bfac086,af8e119..7602c00 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@@ -40,12 -40,15 +40,15 @@@ #include "fragmentation.h" #include "hard-interface.h" #include "icmp_socket.h" + #include "log.h" #include "network-coding.h" #include "originator.h" #include "packet.h" #include "send.h" #include "soft-interface.h" + #include "tp_meter.h" #include "translation-table.h" + #include "tvlv.h"
static int batadv_route_unicast_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if); @@@ -268,10 -271,19 +271,19 @@@ static int batadv_recv_my_icmp_packet(s icmph->ttl = BATADV_TTL;
res = batadv_send_skb_to_orig(skb, orig_node, NULL); - if (res != NET_XMIT_DROP) - ret = NET_RX_SUCCESS; + if (res == -1) + goto out; + + ret = NET_RX_SUCCESS;
break; + case BATADV_TP: + if (!pskb_may_pull(skb, sizeof(struct batadv_icmp_tp_packet))) + goto out; + + batadv_tp_meter_recv(bat_priv, skb); + ret = NET_RX_SUCCESS; + goto out; default: /* drop unknown type */ goto out; @@@ -290,7 -302,7 +302,7 @@@ static int batadv_recv_icmp_ttl_exceede struct batadv_hard_iface *primary_if = NULL; struct batadv_orig_node *orig_node = NULL; struct batadv_icmp_packet *icmp_packet; - int ret = NET_RX_DROP; + int res, ret = NET_RX_DROP;
icmp_packet = (struct batadv_icmp_packet *)skb->data;
@@@ -321,7 -333,8 +333,8 @@@ icmp_packet->msg_type = BATADV_TTL_EXCEEDED; icmp_packet->ttl = BATADV_TTL;
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) + res = batadv_send_skb_to_orig(skb, orig_node, NULL); + if (res != -1) ret = NET_RX_SUCCESS;
out: @@@ -341,7 -354,7 +354,7 @@@ int batadv_recv_icmp_packet(struct sk_b struct ethhdr *ethhdr; struct batadv_orig_node *orig_node = NULL; int hdr_size = sizeof(struct batadv_icmp_header); - int ret = NET_RX_DROP; + int res, ret = NET_RX_DROP;
/* drop packet if it has not necessary minimum size */ if (unlikely(!pskb_may_pull(skb, hdr_size))) @@@ -408,7 -421,8 +421,8 @@@ icmph->ttl--;
/* route it */ - if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP) + res = batadv_send_skb_to_orig(skb, orig_node, recv_if); + if (res != -1) ret = NET_RX_SUCCESS;
out: @@@ -456,29 -470,6 +470,29 @@@ static int batadv_check_unicast_packet( }
/** + * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node + * @orig_node: originator node whose bonding candidates should be replaced + * @new_candidate: new bonding candidate or NULL + */ +static void +batadv_last_bonding_replace(struct batadv_orig_node *orig_node, + struct batadv_orig_ifinfo *new_candidate) +{ + struct batadv_orig_ifinfo *old_candidate; + + spin_lock_bh(&orig_node->neigh_list_lock); + old_candidate = orig_node->last_bonding_candidate; + + if (new_candidate) + kref_get(&new_candidate->refcount); + orig_node->last_bonding_candidate = new_candidate; + spin_unlock_bh(&orig_node->neigh_list_lock); + + if (old_candidate) + batadv_orig_ifinfo_put(old_candidate); +} + +/** * batadv_find_router - find a suitable router for this originator * @bat_priv: the bat priv with all the soft interface information * @orig_node: the destination node @@@ -492,7 -483,7 +506,7 @@@ batadv_find_router(struct batadv_priv * struct batadv_orig_node *orig_node, struct batadv_hard_iface *recv_if) { - struct batadv_algo_ops *bao = bat_priv->bat_algo_ops; + struct batadv_algo_ops *bao = bat_priv->algo_ops; struct batadv_neigh_node *first_candidate_router = NULL; struct batadv_neigh_node *next_candidate_router = NULL; struct batadv_neigh_node *router, *cand_router = NULL; @@@ -546,9 -537,9 +560,9 @@@ /* alternative candidate should be good enough to be * considered */ - if (!bao->bat_neigh_is_similar_or_better(cand_router, - cand->if_outgoing, - router, recv_if)) + if (!bao->neigh.is_similar_or_better(cand_router, + cand->if_outgoing, router, + recv_if)) goto next;
/* don't use the same router twice */ @@@ -585,6 -576,10 +599,6 @@@ next } rcu_read_unlock();
- /* last_bonding_candidate is reset below, remove the old reference. */ - if (orig_node->last_bonding_candidate) - batadv_orig_ifinfo_put(orig_node->last_bonding_candidate); - /* After finding candidates, handle the three cases: * 1) there is a next candidate, use that * 2) there is no next candidate, use the first of the list @@@ -593,28 -588,21 +607,28 @@@ if (next_candidate) { batadv_neigh_node_put(router);
- /* remove references to first candidate, we don't need it. */ - if (first_candidate) { - batadv_neigh_node_put(first_candidate_router); - batadv_orig_ifinfo_put(first_candidate); - } + kref_get(&next_candidate_router->refcount); router = next_candidate_router; - orig_node->last_bonding_candidate = next_candidate; + batadv_last_bonding_replace(orig_node, next_candidate); } else if (first_candidate) { batadv_neigh_node_put(router);
- /* refcounting has already been done in the loop above. */ + kref_get(&first_candidate_router->refcount); router = first_candidate_router; - orig_node->last_bonding_candidate = first_candidate; + batadv_last_bonding_replace(orig_node, first_candidate); } else { - orig_node->last_bonding_candidate = NULL; + batadv_last_bonding_replace(orig_node, NULL); + } + + /* cleanup of candidates */ + if (first_candidate) { + batadv_neigh_node_put(first_candidate_router); + batadv_orig_ifinfo_put(first_candidate); + } + + if (next_candidate) { + batadv_neigh_node_put(next_candidate_router); + batadv_orig_ifinfo_put(next_candidate); }
return router; @@@ -671,6 -659,8 +685,8 @@@ static int batadv_route_unicast_packet(
len = skb->len; res = batadv_send_skb_to_orig(skb, orig_node, recv_if); + if (res == -1) + goto out;
/* translate transmit result into receive result */ if (res == NET_XMIT_SUCCESS) { @@@ -678,13 -668,10 +694,10 @@@ batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD); batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES, len + ETH_HLEN); - - ret = NET_RX_SUCCESS; - } else if (res == NET_XMIT_POLICED) { - /* skb was buffered and consumed */ - ret = NET_RX_SUCCESS; }
+ ret = NET_RX_SUCCESS; + out: if (orig_node) batadv_orig_node_put(orig_node); @@@ -1033,6 -1020,8 +1046,8 @@@ int batadv_recv_frag_packet(struct sk_b if (!orig_node_src) goto out;
+ skb->priority = frag_packet->priority + 256; + /* Route the fragment if it is not for us and too big to be merged. */ if (!batadv_is_my_mac(bat_priv, frag_packet->dest) && batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) { diff --combined net/batman-adv/send.c index 0103976,3a10d87..6191159 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@@ -20,10 -20,11 +20,11 @@@
#include <linux/atomic.h> #include <linux/byteorder/generic.h> + #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/fs.h> - #include <linux/if_ether.h> #include <linux/if.h> + #include <linux/if_ether.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/kref.h> @@@ -42,6 -43,7 +43,7 @@@ #include "fragmentation.h" #include "gateway_client.h" #include "hard-interface.h" + #include "log.h" #include "network-coding.h" #include "originator.h" #include "routing.h" @@@ -71,6 -73,7 +73,7 @@@ int batadv_send_skb_packet(struct sk_bu { struct batadv_priv *bat_priv; struct ethhdr *ethhdr; + int ret;
bat_priv = netdev_priv(hard_iface->soft_iface);
@@@ -108,8 -111,15 +111,15 @@@ /* dev_queue_xmit() returns a negative result on error. However on * congestion and traffic shaping, it drops and returns NET_XMIT_DROP * (which is > 0). This will not be treated as an error. + * + * a negative value cannot be returned because it could be interepreted + * as not consumed skb by callers of batadv_send_skb_to_orig. */ - return dev_queue_xmit(skb); + ret = dev_queue_xmit(skb); + if (ret < 0) + ret = NET_XMIT_DROP; + + return ret; send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; @@@ -155,8 -165,11 +165,11 @@@ int batadv_send_unicast_skb(struct sk_b * host, NULL can be passed as recv_if and no interface alternating is * attempted. * - * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or - * NET_XMIT_POLICED if the skb is buffered for later transmit. + * Return: -1 on failure (and the skb is not consumed), -EINPROGRESS if the + * skb is buffered for later transmit or the NET_XMIT status returned by the + * lower routine if the packet has been passed down. + * + * If the returning value is not -1 the skb has been consumed. */ int batadv_send_skb_to_orig(struct sk_buff *skb, struct batadv_orig_node *orig_node, @@@ -164,7 -177,7 +177,7 @@@ { struct batadv_priv *bat_priv = orig_node->bat_priv; struct batadv_neigh_node *neigh_node; - int ret = NET_XMIT_DROP; + int ret = -1;
/* batadv_find_router() increases neigh_nodes refcount if found. */ neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); @@@ -177,8 -190,7 +190,7 @@@ if (atomic_read(&bat_priv->fragmentation) && skb->len > neigh_node->if_incoming->net_dev->mtu) { /* Fragment and send packet. */ - if (batadv_frag_send_packet(skb, orig_node, neigh_node)) - ret = NET_XMIT_SUCCESS; + ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
goto out; } @@@ -187,12 -199,10 +199,10 @@@ * (i.e. being forwarded). If the packet originates from this node or if * network coding fails, then send the packet as usual. */ - if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) { - ret = NET_XMIT_POLICED; - } else { - batadv_send_unicast_skb(skb, neigh_node); - ret = NET_XMIT_SUCCESS; - } + if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) + ret = -EINPROGRESS; + else + ret = batadv_send_unicast_skb(skb, neigh_node);
out: if (neigh_node) @@@ -318,7 -328,7 +328,7 @@@ int batadv_send_skb_unicast(struct bata { struct batadv_unicast_packet *unicast_packet; struct ethhdr *ethhdr; - int ret = NET_XMIT_DROP; + int res, ret = NET_XMIT_DROP;
if (!orig_node) goto out; @@@ -355,7 -365,8 +365,8 @@@ if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) unicast_packet->ttvn = unicast_packet->ttvn - 1;
- if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP) + res = batadv_send_skb_to_orig(skb, orig_node, NULL); + if (res != -1) ret = NET_XMIT_SUCCESS;
out: @@@ -424,31 -435,11 +435,11 @@@ int batadv_send_skb_via_gw(struct batad struct batadv_orig_node *orig_node;
orig_node = batadv_gw_get_selected_orig(bat_priv); - return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, - orig_node, vid); + return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR, + BATADV_P_DATA, orig_node, vid); }
- void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface) - { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - - if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) || - (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)) - return; - - /* the interface gets activated here to avoid race conditions between - * the moment of activating the interface in - * hardif_activate_interface() where the originator mac is set and - * outdated packets (especially uninitialized mac addresses) in the - * packet queue - */ - if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED) - hard_iface->if_status = BATADV_IF_ACTIVE; - - bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface); - } - - static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) + void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet) { kfree_skb(forw_packet->skb); if (forw_packet->if_incoming) @@@ -604,45 -595,6 +595,6 @@@ out atomic_inc(&bat_priv->bcast_queue_left); }
- void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) - { - struct delayed_work *delayed_work; - struct batadv_forw_packet *forw_packet; - struct batadv_priv *bat_priv; - - delayed_work = to_delayed_work(work); - forw_packet = container_of(delayed_work, struct batadv_forw_packet, - delayed_work); - bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); - spin_lock_bh(&bat_priv->forw_bat_list_lock); - hlist_del(&forw_packet->list); - spin_unlock_bh(&bat_priv->forw_bat_list_lock); - - if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) - goto out; - - bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); - - /* we have to have at least one packet in the queue to determine the - * queues wake up time unless we are shutting down. - * - * only re-schedule if this is the "original" copy, e.g. the OGM of the - * primary interface should only be rescheduled once per period, but - * this function will be called for the forw_packet instances of the - * other secondary interfaces as well. - */ - if (forw_packet->own && - forw_packet->if_incoming == forw_packet->if_outgoing) - batadv_schedule_bat_ogm(forw_packet->if_incoming); - - out: - /* don't count own packet */ - if (!forw_packet->own) - atomic_inc(&bat_priv->batman_queue_left); - - batadv_forw_packet_free(forw_packet); - } - void batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, const struct batadv_hard_iface *hard_iface) diff --combined net/batman-adv/types.h index 74d865a,43db7b6..a64522c --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@@ -33,6 -33,7 +33,7 @@@ #include <linux/types.h> #include <linux/wait.h> #include <linux/workqueue.h> + #include <uapi/linux/batman_adv.h>
#include "packet.h"
@@@ -330,9 -331,7 +331,9 @@@ struct batadv_orig_node DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE); u32 last_bcast_seqno; struct hlist_head neigh_list; - /* neigh_list_lock protects: neigh_list and router */ + /* neigh_list_lock protects: neigh_list, ifinfo_list, + * last_bonding_candidate and router + */ spinlock_t neigh_list_lock; struct hlist_node hash_entry; struct batadv_priv *bat_priv; @@@ -709,6 -708,8 +710,8 @@@ struct batadv_priv_debug_log * @list: list of available gateway nodes * @list_lock: lock protecting gw_list & curr_gw * @curr_gw: pointer to currently selected gateway node + * @mode: gateway operation: off, client or server (see batadv_gw_modes) + * @sel_class: gateway selection class (applies if gw_mode client) * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server) * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server) * @reselect: bool indicating a gateway re-selection is in progress @@@ -717,6 -718,8 +720,8 @@@ struct batadv_priv_gw struct hlist_head list; spinlock_t list_lock; /* protects gw_list & curr_gw */ struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */ + atomic_t mode; + atomic_t sel_class; atomic_t bandwidth_down; atomic_t bandwidth_up; atomic_t reselect; @@@ -753,14 -756,28 +758,28 @@@ struct batadv_priv_dat
#ifdef CONFIG_BATMAN_ADV_MCAST /** + * struct batadv_mcast_querier_state - IGMP/MLD querier state when bridged + * @exists: whether a querier exists in the mesh + * @shadowing: if a querier exists, whether it is potentially shadowing + * multicast listeners (i.e. querier is behind our own bridge segment) + */ + struct batadv_mcast_querier_state { + bool exists; + bool shadowing; + }; + + /** * struct batadv_priv_mcast - per mesh interface mcast data * @mla_list: list of multicast addresses we are currently announcing via TT * @want_all_unsnoopables_list: a list of orig_nodes wanting all unsnoopable * multicast traffic * @want_all_ipv4_list: a list of orig_nodes wanting all IPv4 multicast traffic * @want_all_ipv6_list: a list of orig_nodes wanting all IPv6 multicast traffic + * @querier_ipv4: the current state of an IGMP querier in the mesh + * @querier_ipv6: the current state of an MLD querier in the mesh * @flags: the flags we have last sent in our mcast tvlv * @enabled: whether the multicast tvlv is currently enabled + * @bridged: whether the soft interface has a bridge on top * @num_disabled: number of nodes that have no mcast tvlv * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP traffic * @num_want_all_ipv4: counter for items in want_all_ipv4_list @@@ -773,8 -790,11 +792,11 @@@ struct batadv_priv_mcast struct hlist_head want_all_unsnoopables_list; struct hlist_head want_all_ipv4_list; struct hlist_head want_all_ipv6_list; + struct batadv_mcast_querier_state querier_ipv4; + struct batadv_mcast_querier_state querier_ipv6; u8 flags; bool enabled; + bool bridged; atomic_t num_disabled; atomic_t num_want_all_unsnoopables; atomic_t num_want_all_ipv4; @@@ -814,6 -834,111 +836,111 @@@ struct batadv_priv_nc };
/** + * struct batadv_tp_unacked - unacked packet meta-information + * @seqno: seqno of the unacked packet + * @len: length of the packet + * @list: list node for batadv_tp_vars::unacked_list + * + * This struct is supposed to represent a buffer unacked packet. However, since + * the purpose of the TP meter is to count the traffic only, there is no need to + * store the entire sk_buff, the starting offset and the length are enough + */ + struct batadv_tp_unacked { + u32 seqno; + u16 len; + struct list_head list; + }; + + /** + * enum batadv_tp_meter_role - Modus in tp meter session + * @BATADV_TP_RECEIVER: Initialized as receiver + * @BATADV_TP_SENDER: Initialized as sender + */ + enum batadv_tp_meter_role { + BATADV_TP_RECEIVER, + BATADV_TP_SENDER + }; + + /** + * struct batadv_tp_vars - tp meter private variables per session + * @list: list node for bat_priv::tp_list + * @timer: timer for ack (receiver) and retry (sender) + * @bat_priv: pointer to the mesh object + * @start_time: start time in jiffies + * @other_end: mac address of remote + * @role: receiver/sender modi + * @sending: sending binary semaphore: 1 if sending, 0 is not + * @reason: reason for a stopped session + * @finish_work: work item for the finishing procedure + * @test_length: test length in milliseconds + * @session: TP session identifier + * @icmp_uid: local ICMP "socket" index + * @dec_cwnd: decimal part of the cwnd used during linear growth + * @cwnd: current size of the congestion window + * @cwnd_lock: lock do protect @cwnd & @dec_cwnd + * @ss_threshold: Slow Start threshold. Once cwnd exceeds this value the + * connection switches to the Congestion Avoidance state + * @last_acked: last acked byte + * @last_sent: last sent byte, not yet acked + * @tot_sent: amount of data sent/ACKed so far + * @dup_acks: duplicate ACKs counter + * @fast_recovery: true if in Fast Recovery mode + * @recover: last sent seqno when entering Fast Recovery + * @rto: sender timeout + * @srtt: smoothed RTT scaled by 2^3 + * @rttvar: RTT variation scaled by 2^2 + * @more_bytes: waiting queue anchor when waiting for more ack/retry timeout + * @prerandom_offset: offset inside the prerandom buffer + * @prerandom_lock: spinlock protecting access to prerandom_offset + * @last_recv: last in-order received packet + * @unacked_list: list of unacked packets (meta-info only) + * @unacked_lock: protect unacked_list + * @last_recv_time: time time (jiffies) a msg was received + * @refcount: number of context where the object is used + * @rcu: struct used for freeing in an RCU-safe manner + */ + struct batadv_tp_vars { + struct hlist_node list; + struct timer_list timer; + struct batadv_priv *bat_priv; + unsigned long start_time; + u8 other_end[ETH_ALEN]; + enum batadv_tp_meter_role role; + atomic_t sending; + enum batadv_tp_meter_reason reason; + struct delayed_work finish_work; + u32 test_length; + u8 session[2]; + u8 icmp_uid; + + /* sender variables */ + u16 dec_cwnd; + u32 cwnd; + spinlock_t cwnd_lock; /* Protects cwnd & dec_cwnd */ + u32 ss_threshold; + atomic_t last_acked; + u32 last_sent; + atomic64_t tot_sent; + atomic_t dup_acks; + bool fast_recovery; + u32 recover; + u32 rto; + u32 srtt; + u32 rttvar; + wait_queue_head_t more_bytes; + u32 prerandom_offset; + spinlock_t prerandom_lock; /* Protects prerandom_offset */ + + /* receiver variables */ + u32 last_recv; + struct list_head unacked_list; + spinlock_t unacked_lock; /* Protects unacked_list */ + unsigned long last_recv_time; + struct kref refcount; + struct rcu_head rcu; + }; + + /** * struct batadv_softif_vlan - per VLAN attributes set * @bat_priv: pointer to the mesh object * @vid: VLAN identifier @@@ -867,8 -992,6 +994,6 @@@ struct batadv_priv_bat_v * enabled * @multicast_mode: Enable or disable multicast optimizations on this node's * sender/originating side - * @gw_mode: gateway operation: off, client or server (see batadv_gw_modes) - * @gw_sel_class: gateway selection class (applies if gw_mode client) * @orig_interval: OGM broadcast interval in milliseconds * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop * @log_level: configured log level (see batadv_dbg_level) @@@ -883,14 -1006,17 +1008,17 @@@ * @debug_dir: dentry for debugfs batman-adv subdirectory * @forw_bat_list: list of aggregated OGMs that will be forwarded * @forw_bcast_list: list of broadcast packets that will be rebroadcasted + * @tp_list: list of tp sessions + * @tp_num: number of currently active tp sessions * @orig_hash: hash table containing mesh participants (orig nodes) * @forw_bat_list_lock: lock protecting forw_bat_list * @forw_bcast_list_lock: lock protecting forw_bcast_list + * @tp_list_lock: spinlock protecting @tp_list * @orig_work: work queue callback item for orig node purging * @cleanup_work: work queue callback item for soft-interface deinit * @primary_if: one of the hard-interfaces assigned to this mesh interface * becomes the primary interface - * @bat_algo_ops: routing algorithm used by this mesh interface + * @algo_ops: routing algorithm used by this mesh interface * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top * of the mesh interface represented by this object * @softif_vlan_list_lock: lock protecting softif_vlan_list @@@ -924,8 -1050,6 +1052,6 @@@ struct batadv_priv #ifdef CONFIG_BATMAN_ADV_MCAST atomic_t multicast_mode; #endif - atomic_t gw_mode; - atomic_t gw_sel_class; atomic_t orig_interval; atomic_t hop_penalty; #ifdef CONFIG_BATMAN_ADV_DEBUG @@@ -941,13 -1065,16 +1067,16 @@@ struct dentry *debug_dir; struct hlist_head forw_bat_list; struct hlist_head forw_bcast_list; + struct hlist_head tp_list; struct batadv_hashtable *orig_hash; spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ spinlock_t forw_bcast_list_lock; /* protects forw_bcast_list */ + spinlock_t tp_list_lock; /* protects tp_list */ + atomic_t tp_num; struct delayed_work orig_work; struct work_struct cleanup_work; struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */ - struct batadv_algo_ops *bat_algo_ops; + struct batadv_algo_ops *algo_ops; struct hlist_head softif_vlan_list; spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */ #ifdef CONFIG_BATMAN_ADV_BLA @@@ -1044,7 -1171,6 +1173,7 @@@ struct batadv_bla_backbone_gw * @addr: mac address of claimed non-mesh client * @vid: vlan id this client was detected on * @backbone_gw: pointer to backbone gw claiming this client + * @backbone_lock: lock protecting backbone_gw pointer * @lasttime: last time we heard of claim (locals only) * @hash_entry: hlist node for batadv_priv_bla::claim_hash * @refcount: number of contexts the object is used @@@ -1054,7 -1180,6 +1183,7 @@@ struct batadv_bla_claim u8 addr[ETH_ALEN]; unsigned short vid; struct batadv_bla_backbone_gw *backbone_gw; + spinlock_t backbone_lock; /* protects backbone_gw */ unsigned long lasttime; struct hlist_node hash_entry; struct rcu_head rcu; @@@ -1265,66 -1390,77 +1394,77 @@@ struct batadv_forw_packet };
/** + * struct batadv_algo_iface_ops - mesh algorithm callbacks (interface specific) + * @activate: start routing mechanisms when hard-interface is brought up + * @enable: init routing info when hard-interface is enabled + * @disable: de-init routing info when hard-interface is disabled + * @update_mac: (re-)init mac addresses of the protocol information + * belonging to this hard-interface + * @primary_set: called when primary interface is selected / changed + */ + struct batadv_algo_iface_ops { + void (*activate)(struct batadv_hard_iface *hard_iface); + int (*enable)(struct batadv_hard_iface *hard_iface); + void (*disable)(struct batadv_hard_iface *hard_iface); + void (*update_mac)(struct batadv_hard_iface *hard_iface); + void (*primary_set)(struct batadv_hard_iface *hard_iface); + }; + + /** + * struct batadv_algo_neigh_ops - mesh algorithm callbacks (neighbour specific) + * @hardif_init: called on creation of single hop entry + * @cmp: compare the metrics of two neighbors for their respective outgoing + * interfaces + * @is_similar_or_better: check if neigh1 is equally similar or better than + * neigh2 for their respective outgoing interface from the metric prospective + * @print: print the single hop neighbor list (optional) + */ + struct batadv_algo_neigh_ops { + void (*hardif_init)(struct batadv_hardif_neigh_node *neigh); + int (*cmp)(struct batadv_neigh_node *neigh1, + struct batadv_hard_iface *if_outgoing1, + struct batadv_neigh_node *neigh2, + struct batadv_hard_iface *if_outgoing2); + bool (*is_similar_or_better)(struct batadv_neigh_node *neigh1, + struct batadv_hard_iface *if_outgoing1, + struct batadv_neigh_node *neigh2, + struct batadv_hard_iface *if_outgoing2); + void (*print)(struct batadv_priv *priv, struct seq_file *seq); + }; + + /** + * struct batadv_algo_orig_ops - mesh algorithm callbacks (originator specific) + * @free: free the resources allocated by the routing algorithm for an orig_node + * object + * @add_if: ask the routing algorithm to apply the needed changes to the + * orig_node due to a new hard-interface being added into the mesh + * @del_if: ask the routing algorithm to apply the needed changes to the + * orig_node due to an hard-interface being removed from the mesh + * @print: print the originator table (optional) + */ + struct batadv_algo_orig_ops { + void (*free)(struct batadv_orig_node *orig_node); + int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num); + int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num, + int del_if_num); + void (*print)(struct batadv_priv *priv, struct seq_file *seq, + struct batadv_hard_iface *hard_iface); + }; + + /** * struct batadv_algo_ops - mesh algorithm callbacks * @list: list node for the batadv_algo_list * @name: name of the algorithm - * @bat_iface_activate: start routing mechanisms when hard-interface is brought - * up - * @bat_iface_enable: init routing info when hard-interface is enabled - * @bat_iface_disable: de-init routing info when hard-interface is disabled - * @bat_iface_update_mac: (re-)init mac addresses of the protocol information - * belonging to this hard-interface - * @bat_primary_iface_set: called when primary interface is selected / changed - * @bat_ogm_schedule: prepare a new outgoing OGM for the send queue - * @bat_ogm_emit: send scheduled OGM - * @bat_hardif_neigh_init: called on creation of single hop entry - * @bat_neigh_cmp: compare the metrics of two neighbors for their respective - * outgoing interfaces - * @bat_neigh_is_similar_or_better: check if neigh1 is equally similar or - * better than neigh2 for their respective outgoing interface from the metric - * prospective - * @bat_neigh_print: print the single hop neighbor list (optional) - * @bat_neigh_free: free the resources allocated by the routing algorithm for a - * neigh_node object - * @bat_orig_print: print the originator table (optional) - * @bat_orig_free: free the resources allocated by the routing algorithm for an - * orig_node object - * @bat_orig_add_if: ask the routing algorithm to apply the needed changes to - * the orig_node due to a new hard-interface being added into the mesh - * @bat_orig_del_if: ask the routing algorithm to apply the needed changes to - * the orig_node due to an hard-interface being removed from the mesh + * @iface: callbacks related to interface handling + * @neigh: callbacks related to neighbors handling + * @orig: callbacks related to originators handling */ struct batadv_algo_ops { struct hlist_node list; char *name; - void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface); - int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface); - void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface); - void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface); - void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface); - void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface); - void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet); - /* neigh_node handling API */ - void (*bat_hardif_neigh_init)(struct batadv_hardif_neigh_node *neigh); - int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1, - struct batadv_hard_iface *if_outgoing1, - struct batadv_neigh_node *neigh2, - struct batadv_hard_iface *if_outgoing2); - bool (*bat_neigh_is_similar_or_better) - (struct batadv_neigh_node *neigh1, - struct batadv_hard_iface *if_outgoing1, - struct batadv_neigh_node *neigh2, - struct batadv_hard_iface *if_outgoing2); - void (*bat_neigh_print)(struct batadv_priv *priv, struct seq_file *seq); - void (*bat_neigh_free)(struct batadv_neigh_node *neigh); - /* orig_node handling API */ - void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq, - struct batadv_hard_iface *hard_iface); - void (*bat_orig_free)(struct batadv_orig_node *orig_node); - int (*bat_orig_add_if)(struct batadv_orig_node *orig_node, - int max_if_num); - int (*bat_orig_del_if)(struct batadv_orig_node *orig_node, - int max_if_num, int del_if_num); + struct batadv_algo_iface_ops iface; + struct batadv_algo_neigh_ops neigh; + struct batadv_algo_orig_ops orig; };
/** diff --combined net/core/filter.c index e759d90,10c4a2f..d405f7a --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -53,10 -53,9 +53,10 @@@ #include <net/sock_reuseport.h>
/** - * sk_filter - run a packet through a socket filter + * sk_filter_trim_cap - run a packet through a socket filter * @sk: sock associated with &sk_buff * @skb: buffer to filter + * @cap: limit on how short the eBPF program may trim the packet * * Run the eBPF program and then cut skb->data to correct size returned by * the program. If pkt_len is 0 we toss packet. If skb->len is smaller @@@ -65,7 -64,7 +65,7 @@@ * be accepted or -EPERM if the packet should be tossed. * */ -int sk_filter(struct sock *sk, struct sk_buff *skb) +int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) { int err; struct sk_filter *filter; @@@ -86,13 -85,14 +86,13 @@@ filter = rcu_dereference(sk->sk_filter); if (filter) { unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); - - err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; + err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; } rcu_read_unlock();
return err; } -EXPORT_SYMBOL(sk_filter); +EXPORT_SYMBOL(sk_filter_trim_cap);
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) { @@@ -150,6 -150,12 +150,12 @@@ static u64 __get_raw_cpu_id(u64 ctx, u6 return raw_smp_processor_id(); }
+ static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { + .func = __get_raw_cpu_id, + .gpl_only = false, + .ret_type = RET_INTEGER, + }; + static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, struct bpf_insn *insn_buf) { @@@ -748,6 -754,17 +754,17 @@@ static bool chk_code_allowed(u16 code_t return codes[code_to_probe]; }
+ static bool bpf_check_basics_ok(const struct sock_filter *filter, + unsigned int flen) + { + if (filter == NULL) + return false; + if (flen == 0 || flen > BPF_MAXINSNS) + return false; + + return true; + } + /** * bpf_check_classic - verify socket filter code * @filter: filter to verify @@@ -768,9 -785,6 +785,6 @@@ static int bpf_check_classic(const stru bool anc_found; int pc;
- if (flen == 0 || flen > BPF_MAXINSNS) - return -EINVAL; - /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *ftest = &filter[pc]; @@@ -1065,7 -1079,7 +1079,7 @@@ int bpf_prog_create(struct bpf_prog **p struct bpf_prog *fp;
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); @@@ -1112,7 -1126,7 +1126,7 @@@ int bpf_prog_create_from_user(struct bp int err;
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); @@@ -1207,7 -1221,6 +1221,6 @@@ stati struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) { unsigned int fsize = bpf_classic_proglen(fprog); - unsigned int bpf_fsize = bpf_prog_size(fprog->len); struct bpf_prog *prog; int err;
@@@ -1215,10 -1228,10 +1228,10 @@@ return ERR_PTR(-EPERM);
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return ERR_PTR(-EINVAL);
- prog = bpf_prog_alloc(bpf_fsize, 0); + prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!prog) return ERR_PTR(-ENOMEM);
@@@ -1288,21 -1301,10 +1301,10 @@@ int sk_reuseport_attach_filter(struct s
static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) { - struct bpf_prog *prog; - if (sock_flag(sk, SOCK_FILTER_LOCKED)) return ERR_PTR(-EPERM);
- prog = bpf_prog_get(ufd); - if (IS_ERR(prog)) - return prog; - - if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) { - bpf_prog_put(prog); - return ERR_PTR(-EINVAL); - } - - return prog; + return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); }
int sk_attach_bpf(u32 ufd, struct sock *sk) @@@ -1603,9 -1605,36 +1605,36 @@@ static const struct bpf_func_proto bpf_ .arg5_type = ARG_ANYTHING, };
+ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) + { + if (skb_at_tc_ingress(skb)) + skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); + + return dev_forward_skb(dev, skb); + } + + static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) + { + int ret; + + if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { + net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); + kfree_skb(skb); + return -ENETDOWN; + } + + skb->dev = dev; + + __this_cpu_inc(xmit_recursion); + ret = dev_queue_xmit(skb); + __this_cpu_dec(xmit_recursion); + + return ret; + } + static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; + struct sk_buff *skb = (struct sk_buff *) (long) r1; struct net_device *dev;
if (unlikely(flags & ~(BPF_F_INGRESS))) @@@ -1615,19 -1644,12 +1644,12 @@@ if (unlikely(!dev)) return -EINVAL;
- skb2 = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!skb2)) + skb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb)) return -ENOMEM;
- if (flags & BPF_F_INGRESS) { - if (skb_at_tc_ingress(skb2)) - skb_postpush_rcsum(skb2, skb_mac_header(skb2), - skb2->mac_len); - return dev_forward_skb(dev, skb2); - } - - skb2->dev = dev; - return dev_queue_xmit(skb2); + return flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); }
static const struct bpf_func_proto bpf_clone_redirect_proto = { @@@ -1671,15 -1693,8 +1693,8 @@@ int skb_do_redirect(struct sk_buff *skb return -EINVAL; }
- if (ri->flags & BPF_F_INGRESS) { - if (skb_at_tc_ingress(skb)) - skb_postpush_rcsum(skb, skb_mac_header(skb), - skb->mac_len); - return dev_forward_skb(dev, skb); - } - - skb->dev = dev; - return dev_queue_xmit(skb); + return ri->flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); }
static const struct bpf_func_proto bpf_redirect_proto = { @@@ -1714,6 -1729,23 +1729,23 @@@ static const struct bpf_func_proto bpf_ .arg1_type = ARG_PTR_TO_CTX, };
+ static u64 bpf_get_hash_recalc(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) + { + /* If skb_clear_hash() was called due to mangling, we can + * trigger SW recalculation here. Later access to hash + * can then use the inline skb->hash via context directly + * instead of calling this helper again. + */ + return skb_get_hash((struct sk_buff *) (unsigned long) r1); + } + + static const struct bpf_func_proto bpf_get_hash_recalc_proto = { + .func = bpf_get_hash_recalc, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + }; + static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *) (long) r1; @@@ -1757,6 -1789,224 +1789,224 @@@ const struct bpf_func_proto bpf_skb_vla }; EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
+ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) + { + /* Caller already did skb_cow() with len as headroom, + * so no need to do it here. + */ + skb_push(skb, len); + memmove(skb->data, skb->data + len, off); + memset(skb->data + off, 0, len); + + /* No skb_postpush_rcsum(skb, skb->data + off, len) + * needed here as it does not change the skb->csum + * result for checksum complete when summing over + * zeroed blocks. + */ + return 0; + } + + static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) + { + /* skb_ensure_writable() is not needed here, as we're + * already working on an uncloned skb. + */ + if (unlikely(!pskb_may_pull(skb, off + len))) + return -ENOMEM; + + skb_postpull_rcsum(skb, skb->data + off, len); + memmove(skb->data + len, skb->data, off); + __skb_pull(skb, len); + + return 0; + } + + static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) + { + bool trans_same = skb->transport_header == skb->network_header; + int ret; + + /* There's no need for __skb_push()/__skb_pull() pair to + * get to the start of the mac header as we're guaranteed + * to always start from here under eBPF. + */ + ret = bpf_skb_generic_push(skb, off, len); + if (likely(!ret)) { + skb->mac_header -= len; + skb->network_header -= len; + if (trans_same) + skb->transport_header = skb->network_header; + } + + return ret; + } + + static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) + { + bool trans_same = skb->transport_header == skb->network_header; + int ret; + + /* Same here, __skb_push()/__skb_pull() pair not needed. */ + ret = bpf_skb_generic_pop(skb, off, len); + if (likely(!ret)) { + skb->mac_header += len; + skb->network_header += len; + if (trans_same) + skb->transport_header = skb->network_header; + } + + return ret; + } + + static int bpf_skb_proto_4_to_6(struct sk_buff *skb) + { + const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); + u32 off = skb->network_header - skb->mac_header; + int ret; + + ret = skb_cow(skb, len_diff); + if (unlikely(ret < 0)) + return ret; + + ret = bpf_skb_net_hdr_push(skb, off, len_diff); + if (unlikely(ret < 0)) + return ret; + + if (skb_is_gso(skb)) { + /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to + * be changed into SKB_GSO_TCPV6. + */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { + skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; + } + + /* Due to IPv6 header, MSS needs to be downgraded. */ + skb_shinfo(skb)->gso_size -= len_diff; + /* Header must be checked, and gso_segs recomputed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + } + + skb->protocol = htons(ETH_P_IPV6); + skb_clear_hash(skb); + + return 0; + } + + static int bpf_skb_proto_6_to_4(struct sk_buff *skb) + { + const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); + u32 off = skb->network_header - skb->mac_header; + int ret; + + ret = skb_unclone(skb, GFP_ATOMIC); + if (unlikely(ret < 0)) + return ret; + + ret = bpf_skb_net_hdr_pop(skb, off, len_diff); + if (unlikely(ret < 0)) + return ret; + + if (skb_is_gso(skb)) { + /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to + * be changed into SKB_GSO_TCPV4. + */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; + } + + /* Due to IPv4 header, MSS can be upgraded. */ + skb_shinfo(skb)->gso_size += len_diff; + /* Header must be checked, and gso_segs recomputed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + } + + skb->protocol = htons(ETH_P_IP); + skb_clear_hash(skb); + + return 0; + } + + static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) + { + __be16 from_proto = skb->protocol; + + if (from_proto == htons(ETH_P_IP) && + to_proto == htons(ETH_P_IPV6)) + return bpf_skb_proto_4_to_6(skb); + + if (from_proto == htons(ETH_P_IPV6) && + to_proto == htons(ETH_P_IP)) + return bpf_skb_proto_6_to_4(skb); + + return -ENOTSUPP; + } + + static u64 bpf_skb_change_proto(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) + { + struct sk_buff *skb = (struct sk_buff *) (long) r1; + __be16 proto = (__force __be16) r2; + int ret; + + if (unlikely(flags)) + return -EINVAL; + + /* General idea is that this helper does the basic groundwork + * needed for changing the protocol, and eBPF program fills the + * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() + * and other helpers, rather than passing a raw buffer here. + * + * The rationale is to keep this minimal and without a need to + * deal with raw packet data. F.e. even if we would pass buffers + * here, the program still needs to call the bpf_lX_csum_replace() + * helpers anyway. Plus, this way we keep also separation of + * concerns, since f.e. bpf_skb_store_bytes() should only take + * care of stores. + * + * Currently, additional options and extension header space are + * not supported, but flags register is reserved so we can adapt + * that. For offloads, we mark packet as dodgy, so that headers + * need to be verified first. + */ + ret = bpf_skb_proto_xlat(skb, proto); + bpf_compute_data_end(skb); + return ret; + } + + static const struct bpf_func_proto bpf_skb_change_proto_proto = { + .func = bpf_skb_change_proto, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + }; + + static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) + { + struct sk_buff *skb = (struct sk_buff *) (long) r1; + u32 pkt_type = r2; + + /* We only allow a restricted subset to be changed for now. */ + if (unlikely(skb->pkt_type > PACKET_OTHERHOST || + pkt_type > PACKET_OTHERHOST)) + return -EINVAL; + + skb->pkt_type = pkt_type; + return 0; + } + + static const struct bpf_func_proto bpf_skb_change_type_proto = { + .func = bpf_skb_change_type, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + }; + bool bpf_helper_changes_skb_data(void *func) { if (func == bpf_skb_vlan_push) @@@ -1765,6 -2015,8 +2015,8 @@@ return true; if (func == bpf_skb_store_bytes) return true; + if (func == bpf_skb_change_proto) + return true; if (func == bpf_l3_csum_replace) return true; if (func == bpf_l4_csum_replace) @@@ -2004,6 -2256,40 +2256,40 @@@ bpf_get_skb_set_tunnel_proto(enum bpf_f } }
+ #ifdef CONFIG_SOCK_CGROUP_DATA + static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) + { + struct sk_buff *skb = (struct sk_buff *)(long)r1; + struct bpf_map *map = (struct bpf_map *)(long)r2; + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct cgroup *cgrp; + struct sock *sk; + u32 i = (u32)r3; + + sk = skb->sk; + if (!sk || !sk_fullsock(sk)) + return -ENOENT; + + if (unlikely(i >= array->map.max_entries)) + return -E2BIG; + + cgrp = READ_ONCE(array->ptrs[i]); + if (unlikely(!cgrp)) + return -EAGAIN; + + return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); + } + + static const struct bpf_func_proto bpf_skb_in_cgroup_proto = { + .func = bpf_skb_in_cgroup, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + }; + #endif + static const struct bpf_func_proto * sk_filter_func_proto(enum bpf_func_id func_id) { @@@ -2017,7 -2303,7 +2303,7 @@@ case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; case BPF_FUNC_get_smp_processor_id: - return &bpf_get_smp_processor_id_proto; + return &bpf_get_raw_smp_processor_id_proto; case BPF_FUNC_tail_call: return &bpf_tail_call_proto; case BPF_FUNC_ktime_get_ns: @@@ -2052,6 -2338,10 +2338,10 @@@ tc_cls_act_func_proto(enum bpf_func_id return &bpf_skb_vlan_push_proto; case BPF_FUNC_skb_vlan_pop: return &bpf_skb_vlan_pop_proto; + case BPF_FUNC_skb_change_proto: + return &bpf_skb_change_proto_proto; + case BPF_FUNC_skb_change_type: + return &bpf_skb_change_type_proto; case BPF_FUNC_skb_get_tunnel_key: return &bpf_skb_get_tunnel_key_proto; case BPF_FUNC_skb_set_tunnel_key: @@@ -2064,8 -2354,16 +2354,16 @@@ return &bpf_redirect_proto; case BPF_FUNC_get_route_realm: return &bpf_get_route_realm_proto; + case BPF_FUNC_get_hash_recalc: + return &bpf_get_hash_recalc_proto; case BPF_FUNC_perf_event_output: return bpf_get_event_output_proto(); + case BPF_FUNC_get_smp_processor_id: + return &bpf_get_smp_processor_id_proto; + #ifdef CONFIG_SOCK_CGROUP_DATA + case BPF_FUNC_skb_in_cgroup: + return &bpf_skb_in_cgroup_proto; + #endif default: return sk_filter_func_proto(func_id); } diff --combined net/ipv4/tcp_input.c index 91868bb,94d4aff..fda0844 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@@ -87,7 -87,7 +87,7 @@@ int sysctl_tcp_adv_win_scale __read_mos EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */ -int sysctl_tcp_challenge_ack_limit = 100; +int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly; int sysctl_tcp_rfc1337 __read_mostly; @@@ -3115,6 -3115,7 +3115,7 @@@ static int tcp_clean_rtx_queue(struct s long ca_rtt_us = -1L; struct sk_buff *skb; u32 pkts_acked = 0; + u32 last_in_flight = 0; bool rtt_update; int flag = 0;
@@@ -3154,6 -3155,7 +3155,7 @@@ if (!first_ackt.v64) first_ackt = last_ackt;
+ last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; reord = min(pkts_acked, reord); if (!after(scb->end_seq, tp->high_seq)) flag |= FLAG_ORIG_SACK_ACKED; @@@ -3250,7 -3252,8 +3252,8 @@@
if (icsk->icsk_ca_ops->pkts_acked) { struct ack_sample sample = { .pkts_acked = pkts_acked, - .rtt_us = ca_rtt_us }; + .rtt_us = ca_rtt_us, + .in_flight = last_in_flight };
icsk->icsk_ca_ops->pkts_acked(sk, &sample); } @@@ -3458,7 -3461,7 +3461,7 @@@ static void tcp_send_challenge_ack(stru static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); - u32 now; + u32 count, now;
/* First check our per-socket dupack rate limit. */ if (tcp_oow_rate_limited(sock_net(sk), skb, @@@ -3466,18 -3469,13 +3469,18 @@@ &tp->last_oow_ack_time)) return;
- /* Then check the check host-wide RFC 5961 rate limit. */ + /* Then check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { + u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1; + challenge_timestamp = now; - challenge_count = 0; + WRITE_ONCE(challenge_count, half + + prandom_u32_max(sysctl_tcp_challenge_ack_limit)); } - if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { + count = READ_ONCE(challenge_count); + if (count > 0) { + WRITE_ONCE(challenge_count, count - 1); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } @@@ -5164,6 -5162,7 +5167,7 @@@ static bool tcp_validate_incoming(struc const struct tcphdr *th, int syn_inerr) { struct tcp_sock *tp = tcp_sk(sk); + bool rst_seq_match = false;
/* RFC1323: H1. Apply PAWS check first. */ if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && @@@ -5200,13 -5199,32 +5204,32 @@@
/* Step 2: check RST bit */ if (th->rst) { - /* RFC 5961 3.2 : - * If sequence number exactly matches RCV.NXT, then + /* RFC 5961 3.2 (extend to match against SACK too if available): + * If seq num matches RCV.NXT or the right-most SACK block, + * then * RESET the connection * else * Send a challenge ACK */ - if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) + if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { + rst_seq_match = true; + } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { + struct tcp_sack_block *sp = &tp->selective_acks[0]; + int max_sack = sp[0].end_seq; + int this_sack; + + for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; + ++this_sack) { + max_sack = after(sp[this_sack].end_seq, + max_sack) ? + sp[this_sack].end_seq : max_sack; + } + + if (TCP_SKB_CB(skb)->seq == max_sack) + rst_seq_match = true; + } + + if (rst_seq_match) tcp_reset(sk); else tcp_send_challenge_ack(sk, skb); diff --combined net/ipv6/udp.c index acc09705,0a71a312d..ad5292b --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@@ -620,8 -620,6 +620,8 @@@ int udpv6_queue_rcv_skb(struct sock *sk
if (sk_filter(sk, skb)) goto drop; + if (unlikely(skb->len < sizeof(struct udphdr))) + goto drop;
udp_csum_pull_header(skb); if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { @@@ -1209,6 -1207,11 +1209,11 @@@ do_udp_sendmsg
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ if (ipc6.tclass < 0) + ipc6.tclass = np->tclass; + + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); @@@ -1219,9 -1222,6 +1224,6 @@@ if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
- if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: diff --combined net/netfilter/nf_conntrack_core.c index 9f530ad,153e33f..0ad9368 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@@ -327,16 -327,10 +327,10 @@@ struct nf_conn *nf_ct_tmpl_alloc(struc
tmpl->status = IPS_TEMPLATE; write_pnet(&tmpl->ct_net, net); - - if (nf_ct_zone_add(tmpl, flags, zone) < 0) - goto out_free; - + nf_ct_zone_add(tmpl, zone); atomic_set(&tmpl->ct_general.use, 0);
return tmpl; - out_free: - kfree(tmpl); - return NULL; } EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
@@@ -646,7 -640,6 +640,7 @@@ static int nf_ct_resolve_clash(struct n
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->allow_clash && + !nfct_nat(ct) && !nf_ct_is_dying(ct) && atomic_inc_not_zero(&ct->ct_general.use)) { nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct); @@@ -930,16 -923,13 +924,13 @@@ __nf_conntrack_alloc(struct net *net offsetof(struct nf_conn, proto) - offsetof(struct nf_conn, __nfct_init_offset[0]));
- if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0) - goto out_free; + nf_ct_zone_add(ct, zone);
/* Because we use RCU lookups, we set ct_general.use to zero before * this is inserted in any list. */ atomic_set(&ct->ct_general.use, 0); return ct; - out_free: - kmem_cache_free(nf_conntrack_cachep, ct); out: atomic_dec(&net->ct.count); return ERR_PTR(-ENOMEM); @@@ -1343,14 -1333,6 +1334,6 @@@ bool __nf_ct_kill_acct(struct nf_conn * } EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
- #ifdef CONFIG_NF_CONNTRACK_ZONES - static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = { - .len = sizeof(struct nf_conntrack_zone), - .align = __alignof__(struct nf_conntrack_zone), - .id = NF_CT_EXT_ZONE, - }; - #endif - #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h> @@@ -1533,9 -1515,6 +1516,6 @@@ void nf_conntrack_cleanup_end(void
nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_htable_size);
- #ifdef CONFIG_NF_CONNTRACK_ZONES - nf_ct_extend_unregister(&nf_ct_zone_extend); - #endif nf_conntrack_proto_fini(); nf_conntrack_seqadj_fini(); nf_conntrack_labels_fini(); @@@ -1602,15 -1581,8 +1582,15 @@@ void *nf_ct_alloc_hashtable(unsigned in unsigned int nr_slots, i; size_t sz;
+ if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head))) + return NULL; + BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); + + if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head))) + return NULL; + sz = nr_slots * sizeof(struct hlist_nulls_head); hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, get_order(sz)); @@@ -1625,24 -1597,14 +1605,14 @@@ } EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
- int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) + int nf_conntrack_hash_resize(unsigned int hashsize) { - int i, bucket, rc; - unsigned int hashsize, old_size; + int i, bucket; + unsigned int old_size; struct hlist_nulls_head *hash, *old_hash; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct;
- if (current->nsproxy->net_ns != &init_net) - return -EOPNOTSUPP; - - /* On boot, we can set this without any fancy locking. */ - if (!nf_conntrack_htable_size) - return param_set_uint(val, kp); - - rc = kstrtouint(val, 0, &hashsize); - if (rc) - return rc; if (!hashsize) return -EINVAL;
@@@ -1650,6 -1612,12 +1620,12 @@@ if (!hash) return -ENOMEM;
+ old_size = nf_conntrack_htable_size; + if (old_size == hashsize) { + nf_ct_free_hashtable(hash, hashsize); + return 0; + } + local_bh_disable(); nf_conntrack_all_lock(); write_seqcount_begin(&nf_conntrack_generation); @@@ -1685,6 -1653,25 +1661,25 @@@ nf_ct_free_hashtable(old_hash, old_size); return 0; } + + int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) + { + unsigned int hashsize; + int rc; + + if (current->nsproxy->net_ns != &init_net) + return -EOPNOTSUPP; + + /* On boot, we can set this without any fancy locking. */ + if (!nf_conntrack_htable_size) + return param_set_uint(val, kp); + + rc = kstrtouint(val, 0, &hashsize); + if (rc) + return rc; + + return nf_conntrack_hash_resize(hashsize); + } EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, @@@ -1741,7 -1728,7 +1736,7 @@@ int nf_conntrack_init_start(void
nf_conntrack_cachep = kmem_cache_create("nf_conntrack", sizeof(struct nf_conn), 0, - SLAB_DESTROY_BY_RCU, NULL); + SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL); if (!nf_conntrack_cachep) goto err_cachep;
@@@ -1781,11 -1768,6 +1776,6 @@@ if (ret < 0) goto err_seqadj;
- #ifdef CONFIG_NF_CONNTRACK_ZONES - ret = nf_ct_extend_register(&nf_ct_zone_extend); - if (ret < 0) - goto err_extend; - #endif ret = nf_conntrack_proto_init(); if (ret < 0) goto err_proto; @@@ -1801,10 -1783,6 +1791,6 @@@ return 0;
err_proto: - #ifdef CONFIG_NF_CONNTRACK_ZONES - nf_ct_extend_unregister(&nf_ct_zone_extend); - err_extend: - #endif nf_conntrack_seqadj_fini(); err_seqadj: nf_conntrack_labels_fini(); diff --combined net/netfilter/nf_tables_api.c index cf7c745,18b7f85..f24bed0 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@@ -131,29 -131,8 +131,8 @@@ static void nft_trans_destroy(struct nf kfree(trans); }
- static int nft_register_basechain(struct nft_base_chain *basechain, - unsigned int hook_nops) - { - struct net *net = read_pnet(&basechain->pnet); - - if (basechain->flags & NFT_BASECHAIN_DISABLED) - return 0; - - return nf_register_net_hooks(net, basechain->ops, hook_nops); - } - - static void nft_unregister_basechain(struct nft_base_chain *basechain, - unsigned int hook_nops) - { - struct net *net = read_pnet(&basechain->pnet); - - if (basechain->flags & NFT_BASECHAIN_DISABLED) - return; - - nf_unregister_net_hooks(net, basechain->ops, hook_nops); - } - - static int nf_tables_register_hooks(const struct nft_table *table, + static int nf_tables_register_hooks(struct net *net, + const struct nft_table *table, struct nft_chain *chain, unsigned int hook_nops) { @@@ -161,10 -140,12 +140,12 @@@ !(chain->flags & NFT_BASE_CHAIN)) return 0;
- return nft_register_basechain(nft_base_chain(chain), hook_nops); + return nf_register_net_hooks(net, nft_base_chain(chain)->ops, + hook_nops); }
- static void nf_tables_unregister_hooks(const struct nft_table *table, + static void nf_tables_unregister_hooks(struct net *net, + const struct nft_table *table, struct nft_chain *chain, unsigned int hook_nops) { @@@ -172,12 -153,9 +153,9 @@@ !(chain->flags & NFT_BASE_CHAIN)) return;
- nft_unregister_basechain(nft_base_chain(chain), hook_nops); + nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, hook_nops); }
- /* Internal table flags */ - #define NFT_TABLE_INACTIVE (1 << 15) - static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; @@@ -187,7 -165,7 +165,7 @@@ return -ENOMEM;
if (msg_type == NFT_MSG_NEWTABLE) - ctx->table->flags |= NFT_TABLE_INACTIVE; + nft_activate_next(ctx->net, ctx->table);
list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; @@@ -201,7 -179,7 +179,7 @@@ static int nft_deltable(struct nft_ctx if (err < 0) return err;
- list_del_rcu(&ctx->table->list); + nft_deactivate_next(ctx->net, ctx->table); return err; }
@@@ -214,7 -192,7 +192,7 @@@ static int nft_trans_chain_add(struct n return -ENOMEM;
if (msg_type == NFT_MSG_NEWCHAIN) - ctx->chain->flags |= NFT_CHAIN_INACTIVE; + nft_activate_next(ctx->net, ctx->chain);
list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; @@@ -229,47 -207,17 +207,17 @@@ static int nft_delchain(struct nft_ctx return err;
ctx->table->use--; - list_del_rcu(&ctx->chain->list); + nft_deactivate_next(ctx->net, ctx->chain);
return err; }
- static inline bool - nft_rule_is_active(struct net *net, const struct nft_rule *rule) - { - return (rule->genmask & nft_genmask_cur(net)) == 0; - } - - static inline int - nft_rule_is_active_next(struct net *net, const struct nft_rule *rule) - { - return (rule->genmask & nft_genmask_next(net)) == 0; - } - - static inline void - nft_rule_activate_next(struct net *net, struct nft_rule *rule) - { - /* Now inactive, will be active in the future */ - rule->genmask = nft_genmask_cur(net); - } - - static inline void - nft_rule_deactivate_next(struct net *net, struct nft_rule *rule) - { - rule->genmask = nft_genmask_next(net); - } - - static inline void nft_rule_clear(struct net *net, struct nft_rule *rule) - { - rule->genmask &= ~nft_genmask_next(net); - } - static int nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) { /* You cannot delete the same rule twice */ - if (nft_rule_is_active_next(ctx->net, rule)) { - nft_rule_deactivate_next(ctx->net, rule); + if (nft_is_active_next(ctx->net, rule)) { + nft_deactivate_next(ctx->net, rule); ctx->chain->use--; return 0; } @@@ -322,9 -270,6 +270,6 @@@ static int nft_delrule_by_chain(struct return 0; }
- /* Internal set flag */ - #define NFT_SET_INACTIVE (1 << 15) - static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, struct nft_set *set) { @@@ -337,7 -282,7 +282,7 @@@ if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) { nft_trans_set_id(trans) = ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); - set->flags |= NFT_SET_INACTIVE; + nft_activate_next(ctx->net, set); } nft_trans_set(trans) = set; list_add_tail(&trans->list, &ctx->net->nft.commit_list); @@@ -353,7 -298,7 +298,7 @@@ static int nft_delset(struct nft_ctx *c if (err < 0) return err;
- list_del_rcu(&set->list); + nft_deactivate_next(ctx->net, set); ctx->table->use--;
return err; @@@ -364,26 -309,29 +309,29 @@@ */
static struct nft_table *nft_table_lookup(const struct nft_af_info *afi, - const struct nlattr *nla) + const struct nlattr *nla, + u8 genmask) { struct nft_table *table;
list_for_each_entry(table, &afi->tables, list) { - if (!nla_strcmp(nla, table->name)) + if (!nla_strcmp(nla, table->name) && + nft_active_genmask(table, genmask)) return table; } return NULL; }
static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi, - const struct nlattr *nla) + const struct nlattr *nla, + u8 genmask) { struct nft_table *table;
if (nla == NULL) return ERR_PTR(-EINVAL);
- table = nft_table_lookup(afi, nla); + table = nft_table_lookup(afi, nla, genmask); if (table != NULL) return table;
@@@ -524,6 -472,8 +472,8 @@@ static int nf_tables_dump_tables(struc if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); + if (!nft_is_active(net, table)) + continue; if (nf_tables_fill_table_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@@ -548,6 -498,7 +498,7 @@@ static int nf_tables_gettable(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_cur(net); const struct nft_af_info *afi; const struct nft_table *table; struct sk_buff *skb2; @@@ -565,11 -516,9 +516,9 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); + table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask); if (IS_ERR(table)) return PTR_ERR(table); - if (table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) @@@ -588,17 -537,21 +537,21 @@@ err return err; }
- static int nf_tables_table_enable(const struct nft_af_info *afi, + static int nf_tables_table_enable(struct net *net, + const struct nft_af_info *afi, struct nft_table *table) { struct nft_chain *chain; int err, i = 0;
list_for_each_entry(chain, &table->chains, list) { + if (!nft_is_active_next(net, chain)) + continue; if (!(chain->flags & NFT_BASE_CHAIN)) continue;
- err = nft_register_basechain(nft_base_chain(chain), afi->nops); + err = nf_register_net_hooks(net, nft_base_chain(chain)->ops, + afi->nops); if (err < 0) goto err;
@@@ -607,26 -560,34 +560,34 @@@ return 0; err: list_for_each_entry(chain, &table->chains, list) { + if (!nft_is_active_next(net, chain)) + continue; if (!(chain->flags & NFT_BASE_CHAIN)) continue;
if (i-- <= 0) break;
- nft_unregister_basechain(nft_base_chain(chain), afi->nops); + nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, + afi->nops); } return err; }
- static void nf_tables_table_disable(const struct nft_af_info *afi, + static void nf_tables_table_disable(struct net *net, + const struct nft_af_info *afi, struct nft_table *table) { struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list) { - if (chain->flags & NFT_BASE_CHAIN) - nft_unregister_basechain(nft_base_chain(chain), - afi->nops); + if (!nft_is_active_next(net, chain)) + continue; + if (!(chain->flags & NFT_BASE_CHAIN)) + continue; + + nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, + afi->nops); } }
@@@ -656,7 -617,7 +617,7 @@@ static int nf_tables_updtable(struct nf nft_trans_table_enable(trans) = false; } else if (!(flags & NFT_TABLE_F_DORMANT) && ctx->table->flags & NFT_TABLE_F_DORMANT) { - ret = nf_tables_table_enable(ctx->afi, ctx->table); + ret = nf_tables_table_enable(ctx->net, ctx->afi, ctx->table); if (ret >= 0) { ctx->table->flags &= ~NFT_TABLE_F_DORMANT; nft_trans_table_enable(trans) = true; @@@ -678,6 -639,7 +639,7 @@@ static int nf_tables_newtable(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); const struct nlattr *name; struct nft_af_info *afi; struct nft_table *table; @@@ -691,7 -653,7 +653,7 @@@ return PTR_ERR(afi);
name = nla[NFTA_TABLE_NAME]; - table = nf_tables_table_lookup(afi, name); + table = nf_tables_table_lookup(afi, name, genmask); if (IS_ERR(table)) { if (PTR_ERR(table) != -ENOENT) return PTR_ERR(table); @@@ -699,8 -661,6 +661,6 @@@ }
if (table != NULL) { - if (table->flags & NFT_TABLE_INACTIVE) - return -ENOENT; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) @@@ -752,6 -712,9 +712,9 @@@ static int nft_flush_table(struct nft_c struct nft_set *set, *ns;
list_for_each_entry(chain, &ctx->table->chains, list) { + if (!nft_is_active_next(ctx->net, chain)) + continue; + ctx->chain = chain;
err = nft_delrule_by_chain(ctx); @@@ -760,6 -723,9 +723,9 @@@ }
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { + if (!nft_is_active_next(ctx->net, set)) + continue; + if (set->flags & NFT_SET_ANONYMOUS && !list_empty(&set->bindings)) continue; @@@ -770,6 -736,9 +736,9 @@@ }
list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + if (!nft_is_active_next(ctx->net, chain)) + continue; + ctx->chain = chain;
err = nft_delchain(ctx); @@@ -795,6 -764,9 +764,9 @@@ static int nft_flush(struct nft_ctx *ct
ctx->afi = afi; list_for_each_entry_safe(table, nt, &afi->tables, list) { + if (!nft_is_active_next(ctx->net, table)) + continue; + if (nla[NFTA_TABLE_NAME] && nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0) continue; @@@ -815,6 -787,7 +787,7 @@@ static int nf_tables_deltable(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_af_info *afi; struct nft_table *table; int family = nfmsg->nfgen_family; @@@ -828,7 -801,7 +801,7 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]); + table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME], genmask); if (IS_ERR(table)) return PTR_ERR(table);
@@@ -875,12 -848,14 +848,14 @@@ EXPORT_SYMBOL_GPL(nft_unregister_chain_ */
static struct nft_chain * - nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle) + nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle, + u8 genmask) { struct nft_chain *chain;
list_for_each_entry(chain, &table->chains, list) { - if (chain->handle == handle) + if (chain->handle == handle && + nft_active_genmask(chain, genmask)) return chain; }
@@@ -888,7 -863,8 +863,8 @@@ }
static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table, - const struct nlattr *nla) + const struct nlattr *nla, + u8 genmask) { struct nft_chain *chain;
@@@ -896,7 -872,8 +872,8 @@@ return ERR_PTR(-EINVAL);
list_for_each_entry(chain, &table->chains, list) { - if (!nla_strcmp(nla, chain->name)) + if (!nla_strcmp(nla, chain->name) && + nft_active_genmask(chain, genmask)) return chain; }
@@@ -1079,6 -1056,8 +1056,8 @@@ static int nf_tables_dump_chains(struc if (idx > s_idx) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); + if (!nft_is_active(net, chain)) + continue; if (nf_tables_fill_chain_info(skb, net, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@@ -1104,6 -1083,7 +1083,7 @@@ static int nf_tables_getchain(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_cur(net); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; @@@ -1122,17 -1102,13 +1102,13 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table); - if (table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
- chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); + chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); - if (chain->flags & NFT_CHAIN_INACTIVE) - return -ENOENT;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb2) @@@ -1231,6 -1207,7 +1207,7 @@@ static int nf_tables_newchain(struct ne struct nft_chain *chain; struct nft_base_chain *basechain = NULL; struct nlattr *ha[NFTA_HOOK_MAX + 1]; + u8 genmask = nft_genmask_next(net); int family = nfmsg->nfgen_family; struct net_device *dev = NULL; u8 policy = NF_ACCEPT; @@@ -1247,7 -1224,7 +1224,7 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
@@@ -1256,11 -1233,11 +1233,11 @@@
if (nla[NFTA_CHAIN_HANDLE]) { handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE])); - chain = nf_tables_chain_lookup_byhandle(table, handle); + chain = nf_tables_chain_lookup_byhandle(table, handle, genmask); if (IS_ERR(chain)) return PTR_ERR(chain); } else { - chain = nf_tables_chain_lookup(table, name); + chain = nf_tables_chain_lookup(table, name, genmask); if (IS_ERR(chain)) { if (PTR_ERR(chain) != -ENOENT) return PTR_ERR(chain); @@@ -1291,16 -1268,20 +1268,20 @@@ struct nft_stats *stats = NULL; struct nft_trans *trans;
- if (chain->flags & NFT_CHAIN_INACTIVE) - return -ENOENT; if (nlh->nlmsg_flags & NLM_F_EXCL) return -EEXIST; if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP;
- if (nla[NFTA_CHAIN_HANDLE] && name && - !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]))) - return -EEXIST; + if (nla[NFTA_CHAIN_HANDLE] && name) { + struct nft_chain *chain2; + + chain2 = nf_tables_chain_lookup(table, + nla[NFTA_CHAIN_NAME], + genmask); + if (IS_ERR(chain2)) + return PTR_ERR(chain2); + }
if (nla[NFTA_CHAIN_COUNTERS]) { if (!(chain->flags & NFT_BASE_CHAIN)) @@@ -1455,7 -1436,7 +1436,7 @@@ chain->table = table; nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
- err = nf_tables_register_hooks(table, chain, afi->nops); + err = nf_tables_register_hooks(net, table, chain, afi->nops); if (err < 0) goto err1;
@@@ -1468,7 -1449,7 +1449,7 @@@ list_add_tail_rcu(&chain->list, &table->chains); return 0; err2: - nf_tables_unregister_hooks(table, chain, afi->nops); + nf_tables_unregister_hooks(net, table, chain, afi->nops); err1: nf_tables_chain_destroy(chain); return err; @@@ -1479,6 -1460,7 +1460,7 @@@ static int nf_tables_delchain(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; @@@ -1489,11 -1471,11 +1471,11 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
- chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]); + chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->use > 0) @@@ -1724,11 -1706,9 +1706,11 @@@ struct nft_expr *nft_expr_init(const st
err = nf_tables_newexpr(ctx, &info, expr); if (err < 0) - goto err2; + goto err3;
return expr; +err3: + kfree(expr); err2: module_put(info.ops->type->owner); err1: @@@ -1900,7 -1880,7 +1882,7 @@@ static int nf_tables_dump_rules(struct list_for_each_entry_rcu(table, &afi->tables, list) { list_for_each_entry_rcu(chain, &table->chains, list) { list_for_each_entry_rcu(rule, &chain->rules, list) { - if (!nft_rule_is_active(net, rule)) + if (!nft_is_active(net, rule)) goto cont; if (idx < s_idx) goto cont; @@@ -1933,6 -1913,7 +1915,7 @@@ static int nf_tables_getrule(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_cur(net); const struct nft_af_info *afi; const struct nft_table *table; const struct nft_chain *chain; @@@ -1952,17 -1933,13 +1935,13 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table); - if (table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
- chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); + chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); - if (chain->flags & NFT_CHAIN_INACTIVE) - return -ENOENT;
rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]); if (IS_ERR(rule)) @@@ -2011,6 -1988,7 +1990,7 @@@ static int nf_tables_newrule(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain; @@@ -2031,11 -2009,11 +2011,11 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
- chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); + chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], genmask); if (IS_ERR(chain)) return PTR_ERR(chain);
@@@ -2104,7 -2082,7 +2084,7 @@@ if (rule == NULL) goto err1;
- nft_rule_activate_next(net, rule); + nft_activate_next(net, rule);
rule->handle = handle; rule->dlen = size; @@@ -2126,14 -2104,14 +2106,14 @@@ }
if (nlh->nlmsg_flags & NLM_F_REPLACE) { - if (nft_rule_is_active_next(net, old_rule)) { + if (nft_is_active_next(net, old_rule)) { trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, old_rule); if (trans == NULL) { err = -ENOMEM; goto err2; } - nft_rule_deactivate_next(net, old_rule); + nft_deactivate_next(net, old_rule); chain->use--; list_add_tail_rcu(&rule->list, &old_rule->list); } else { @@@ -2176,6 -2154,7 +2156,7 @@@ static int nf_tables_delrule(struct ne const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_af_info *afi; struct nft_table *table; struct nft_chain *chain = NULL; @@@ -2187,12 -2166,13 +2168,13 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
if (nla[NFTA_RULE_CHAIN]) { - chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]); + chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN], + genmask); if (IS_ERR(chain)) return PTR_ERR(chain); } @@@ -2212,6 -2192,9 +2194,9 @@@ } } else { list_for_each_entry(chain, &table->chains, list) { + if (!nft_is_active_next(net, chain)) + continue; + ctx.chain = chain; err = nft_delrule_by_chain(&ctx); if (err < 0) @@@ -2341,7 -2324,8 +2326,8 @@@ static const struct nla_policy nft_set_ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, const struct sk_buff *skb, const struct nlmsghdr *nlh, - const struct nlattr * const nla[]) + const struct nlattr * const nla[], + u8 genmask) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi = NULL; @@@ -2357,7 -2341,8 +2343,8 @@@ if (afi == NULL) return -EAFNOSUPPORT;
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE], + genmask); if (IS_ERR(table)) return PTR_ERR(table); } @@@ -2367,7 -2352,7 +2354,7 @@@ }
struct nft_set *nf_tables_set_lookup(const struct nft_table *table, - const struct nlattr *nla) + const struct nlattr *nla, u8 genmask) { struct nft_set *set;
@@@ -2375,22 -2360,27 +2362,27 @@@ return ERR_PTR(-EINVAL);
list_for_each_entry(set, &table->sets, list) { - if (!nla_strcmp(nla, set->name)) + if (!nla_strcmp(nla, set->name) && + nft_active_genmask(set, genmask)) return set; } return ERR_PTR(-ENOENT); }
struct nft_set *nf_tables_set_lookup_byid(const struct net *net, - const struct nlattr *nla) + const struct nlattr *nla, + u8 genmask) { struct nft_trans *trans; u32 id = ntohl(nla_get_be32(nla));
list_for_each_entry(trans, &net->nft.commit_list, list) { + struct nft_set *set = nft_trans_set(trans); + if (trans->msg_type == NFT_MSG_NEWSET && - id == nft_trans_set_id(trans)) - return nft_trans_set(trans); + id == nft_trans_set_id(trans) && + nft_active_genmask(set, genmask)) + return set; } return ERR_PTR(-ENOENT); } @@@ -2415,6 -2405,8 +2407,8 @@@ cont list_for_each_entry(i, &ctx->table->sets, list) { int tmp;
+ if (!nft_is_active_next(ctx->net, set)) + continue; if (!sscanf(i->name, name, &tmp)) continue; if (tmp < min || tmp >= min + BITS_PER_BYTE * PAGE_SIZE) @@@ -2434,6 -2426,8 +2428,8 @@@
snprintf(set->name, sizeof(set->name), name, min + n); list_for_each_entry(i, &ctx->table->sets, list) { + if (!nft_is_active_next(ctx->net, i)) + continue; if (!strcmp(set->name, i->name)) return -ENFILE; } @@@ -2582,6 -2576,8 +2578,8 @@@ static int nf_tables_dump_sets(struct s list_for_each_entry_rcu(set, &table->sets, list) { if (idx < s_idx) goto cont; + if (!nft_is_active(net, set)) + goto cont;
ctx_set = *ctx; ctx_set.table = table; @@@ -2618,6 -2614,7 +2616,7 @@@ static int nf_tables_getset(struct net struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + u8 genmask = nft_genmask_cur(net); const struct nft_set *set; struct nft_ctx ctx; struct sk_buff *skb2; @@@ -2625,7 -2622,7 +2624,7 @@@ int err;
/* Verify existence before starting dump */ - err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err;
@@@ -2652,11 -2649,9 +2651,9 @@@ if (!nla[NFTA_SET_TABLE]) return -EINVAL;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask); if (IS_ERR(set)) return PTR_ERR(set); - if (set->flags & NFT_SET_INACTIVE) - return -ENOENT;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (skb2 == NULL) @@@ -2695,6 -2690,7 +2692,7 @@@ static int nf_tables_newset(struct net const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); const struct nft_set_ops *ops; struct nft_af_info *afi; struct nft_table *table; @@@ -2792,13 -2788,13 +2790,13 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE], genmask); if (IS_ERR(table)) return PTR_ERR(table);
nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla);
- set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); + set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME], genmask); if (IS_ERR(set)) { if (PTR_ERR(set) != -ENOENT) return PTR_ERR(set); @@@ -2897,6 -2893,7 +2895,7 @@@ static int nf_tables_delset(struct net const struct nlattr * const nla[]) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); struct nft_set *set; struct nft_ctx ctx; int err; @@@ -2906,11 -2903,11 +2905,11 @@@ if (nla[NFTA_SET_TABLE] == NULL) return -EINVAL;
- err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME], genmask); if (IS_ERR(set)) return PTR_ERR(set); if (!list_empty(&set->bindings)) @@@ -2975,7 -2972,7 +2974,7 @@@ void nf_tables_unbind_set(const struct list_del_rcu(&binding->list);
if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && - !(set->flags & NFT_SET_INACTIVE)) + nft_is_active(ctx->net, set)) nf_tables_set_destroy(ctx, set); }
@@@ -3031,7 -3028,8 +3030,8 @@@ static const struct nla_policy nft_set_ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net, const struct sk_buff *skb, const struct nlmsghdr *nlh, - const struct nlattr * const nla[]) + const struct nlattr * const nla[], + u8 genmask) { const struct nfgenmsg *nfmsg = nlmsg_data(nlh); struct nft_af_info *afi; @@@ -3041,7 -3039,8 +3041,8 @@@ if (IS_ERR(afi)) return PTR_ERR(afi);
- table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]); + table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE], + genmask); if (IS_ERR(table)) return PTR_ERR(table);
@@@ -3138,6 -3137,7 +3139,7 @@@ static int nf_tables_dump_setelem(cons static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); + u8 genmask = nft_genmask_cur(net); const struct nft_set *set; struct nft_set_dump_args args; struct nft_ctx ctx; @@@ -3154,17 -3154,14 +3156,14 @@@ return err;
err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, - (void *)nla); + (void *)nla, genmask); if (err < 0) return err; - if (ctx.table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + genmask); if (IS_ERR(set)) return PTR_ERR(set); - if (set->flags & NFT_SET_INACTIVE) - return -ENOENT;
event = NFT_MSG_NEWSETELEM; event |= NFNL_SUBSYS_NFTABLES << 8; @@@ -3218,21 -3215,19 +3217,19 @@@ static int nf_tables_getsetelem(struct struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + u8 genmask = nft_genmask_cur(net); const struct nft_set *set; struct nft_ctx ctx; int err;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err; - if (ctx.table->flags & NFT_TABLE_INACTIVE) - return -ENOENT;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + genmask); if (IS_ERR(set)) return PTR_ERR(set); - if (set->flags & NFT_SET_INACTIVE) - return -ENOENT;
if (nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { @@@ -3550,6 -3545,7 +3547,7 @@@ static int nf_tables_newsetelem(struct struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + u8 genmask = nft_genmask_next(net); const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; @@@ -3558,15 -3554,17 +3556,17 @@@ if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + genmask); if (IS_ERR(set)) { if (nla[NFTA_SET_ELEM_LIST_SET_ID]) { set = nf_tables_set_lookup_byid(net, - nla[NFTA_SET_ELEM_LIST_SET_ID]); + nla[NFTA_SET_ELEM_LIST_SET_ID], + genmask); } if (IS_ERR(set)) return PTR_ERR(set); @@@ -3672,6 -3670,7 +3672,7 @@@ static int nf_tables_delsetelem(struct struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[]) { + u8 genmask = nft_genmask_next(net); const struct nlattr *attr; struct nft_set *set; struct nft_ctx ctx; @@@ -3680,11 -3679,12 +3681,12 @@@ if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) return -EINVAL;
- err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla); + err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, genmask); if (err < 0) return err;
- set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]); + set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], + genmask); if (IS_ERR(set)) return PTR_ERR(set); if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT) @@@ -3952,36 -3952,40 +3954,40 @@@ static int nf_tables_commit(struct net case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { if (!nft_trans_table_enable(trans)) { - nf_tables_table_disable(trans->ctx.afi, + nf_tables_table_disable(net, + trans->ctx.afi, trans->ctx.table); trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; } } else { - trans->ctx.table->flags &= ~NFT_TABLE_INACTIVE; + nft_clear(net, trans->ctx.table); } nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE); nft_trans_destroy(trans); break; case NFT_MSG_DELTABLE: + list_del_rcu(&trans->ctx.table->list); nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) nft_chain_commit_update(trans); else - trans->ctx.chain->flags &= ~NFT_CHAIN_INACTIVE; + nft_clear(net, trans->ctx.chain);
nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); nft_trans_destroy(trans); break; case NFT_MSG_DELCHAIN: + list_del_rcu(&trans->ctx.chain->list); nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN); - nf_tables_unregister_hooks(trans->ctx.table, + nf_tables_unregister_hooks(trans->ctx.net, + trans->ctx.table, trans->ctx.chain, trans->ctx.afi->nops); break; case NFT_MSG_NEWRULE: - nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); + nft_clear(trans->ctx.net, nft_trans_rule(trans)); nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_NEWRULE); @@@ -3994,7 -3998,7 +4000,7 @@@ NFT_MSG_DELRULE); break; case NFT_MSG_NEWSET: - nft_trans_set(trans)->flags &= ~NFT_SET_INACTIVE; + nft_clear(net, nft_trans_set(trans)); /* This avoids hitting -EBUSY when deleting the table * from the transaction. */ @@@ -4007,6 -4011,7 +4013,7 @@@ nft_trans_destroy(trans); break; case NFT_MSG_DELSET: + list_del_rcu(&nft_trans_set(trans)->list); nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_DELSET, GFP_KERNEL); break; @@@ -4078,7 -4083,8 +4085,8 @@@ static int nf_tables_abort(struct net * case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { if (nft_trans_table_enable(trans)) { - nf_tables_table_disable(trans->ctx.afi, + nf_tables_table_disable(net, + trans->ctx.afi, trans->ctx.table); trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; } @@@ -4088,8 -4094,7 +4096,7 @@@ } break; case NFT_MSG_DELTABLE: - list_add_tail_rcu(&trans->ctx.table->list, - &trans->ctx.afi->tables); + nft_clear(trans->ctx.net, trans->ctx.table); nft_trans_destroy(trans); break; case NFT_MSG_NEWCHAIN: @@@ -4100,15 -4105,15 +4107,15 @@@ } else { trans->ctx.table->use--; list_del_rcu(&trans->ctx.chain->list); - nf_tables_unregister_hooks(trans->ctx.table, + nf_tables_unregister_hooks(trans->ctx.net, + trans->ctx.table, trans->ctx.chain, trans->ctx.afi->nops); } break; case NFT_MSG_DELCHAIN: trans->ctx.table->use++; - list_add_tail_rcu(&trans->ctx.chain->list, - &trans->ctx.table->chains); + nft_clear(trans->ctx.net, trans->ctx.chain); nft_trans_destroy(trans); break; case NFT_MSG_NEWRULE: @@@ -4117,7 -4122,7 +4124,7 @@@ break; case NFT_MSG_DELRULE: trans->ctx.chain->use++; - nft_rule_clear(trans->ctx.net, nft_trans_rule(trans)); + nft_clear(trans->ctx.net, nft_trans_rule(trans)); nft_trans_destroy(trans); break; case NFT_MSG_NEWSET: @@@ -4126,8 -4131,7 +4133,7 @@@ break; case NFT_MSG_DELSET: trans->ctx.table->use++; - list_add_tail_rcu(&nft_trans_set(trans)->list, - &trans->ctx.table->sets); + nft_clear(trans->ctx.net, nft_trans_set(trans)); nft_trans_destroy(trans); break; case NFT_MSG_NEWSETELEM: @@@ -4274,6 -4278,8 +4280,8 @@@ static int nf_tables_check_loops(const }
list_for_each_entry(set, &ctx->table->sets, list) { + if (!nft_is_active_next(ctx->net, set)) + continue; if (!(set->flags & NFT_SET_MAP) || set->dtype != NFT_DATA_VERDICT) continue; @@@ -4432,6 -4438,7 +4440,7 @@@ static const struct nla_policy nft_verd static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla) { + u8 genmask = nft_genmask_next(ctx->net); struct nlattr *tb[NFTA_VERDICT_MAX + 1]; struct nft_chain *chain; int err; @@@ -4464,7 -4471,7 +4473,7 @@@ if (!tb[NFTA_VERDICT_CHAIN]) return -EINVAL; chain = nf_tables_chain_lookup(ctx->table, - tb[NFTA_VERDICT_CHAIN]); + tb[NFTA_VERDICT_CHAIN], genmask); if (IS_ERR(chain)) return PTR_ERR(chain); if (chain->flags & NFT_BASE_CHAIN) @@@ -4642,7 -4649,7 +4651,7 @@@ int __nft_release_basechain(struct nft_
BUG_ON(!(ctx->chain->flags & NFT_BASE_CHAIN));
- nf_tables_unregister_hooks(ctx->chain->table, ctx->chain, + nf_tables_unregister_hooks(ctx->net, ctx->chain->table, ctx->chain, ctx->afi->nops); list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { list_del(&rule->list); @@@ -4671,7 -4678,8 +4680,8 @@@ static void __nft_release_afinfo(struc
list_for_each_entry_safe(table, nt, &afi->tables, list) { list_for_each_entry(chain, &table->chains, list) - nf_tables_unregister_hooks(table, chain, afi->nops); + nf_tables_unregister_hooks(net, table, chain, + afi->nops); /* No packets are walking on these chains anymore. */ ctx.table = table; list_for_each_entry(chain, &table->chains, list) { diff --combined net/netfilter/nft_meta.c index f4bad9d,03e5e33..2863f34 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@@ -199,13 -199,6 +199,6 @@@ err } EXPORT_SYMBOL_GPL(nft_meta_get_eval);
- /* don't change or set _LOOPBACK, _USER, etc. */ - static bool pkt_type_ok(u32 p) - { - return p == PACKET_HOST || p == PACKET_BROADCAST || - p == PACKET_MULTICAST || p == PACKET_OTHERHOST; - } - void nft_meta_set_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@@ -223,11 -216,11 +216,11 @@@ break; case NFT_META_PKTTYPE: if (skb->pkt_type != value && - pkt_type_ok(value) && pkt_type_ok(skb->pkt_type)) + skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type)) skb->pkt_type = value; break; case NFT_META_NFTRACE: - skb->nf_trace = 1; + skb->nf_trace = !!value; break; default: WARN_ON(1); diff --combined net/tipc/bearer.c index a597708,8584cc4..4131d5a --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@@ -1,7 -1,7 +1,7 @@@ /* * net/tipc/bearer.c: TIPC bearer code * - * Copyright (c) 1996-2006, 2013-2014, Ericsson AB + * Copyright (c) 1996-2006, 2013-2016, Ericsson AB * Copyright (c) 2004-2006, 2010-2013, Wind River Systems * All rights reserved. * @@@ -39,6 -39,7 +39,7 @@@ #include "bearer.h" #include "link.h" #include "discover.h" + #include "monitor.h" #include "bcast.h" #include "netlink.h"
@@@ -313,6 -314,10 +314,10 @@@ restart rcu_assign_pointer(tn->bearer_list[bearer_id], b); if (skb) tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); + + if (tipc_mon_create(net, bearer_id)) + return -ENOMEM; + pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", name, tipc_addr_string_fill(addr_string, disc_domain), priority); @@@ -330,21 -335,6 +335,21 @@@ static int tipc_reset_bearer(struct ne return 0; }
+/* tipc_bearer_reset_all - reset all links on all bearers + */ +void tipc_bearer_reset_all(struct net *net) +{ + struct tipc_net *tn = tipc_net(net); + struct tipc_bearer *b; + int i; + + for (i = 0; i < MAX_BEARERS; i++) { + b = rcu_dereference_rtnl(tn->bearer_list[i]); + if (b) + tipc_reset_bearer(net, b); + } +} + /** * bearer_disable * @@@ -363,6 -353,7 +368,7 @@@ static void bearer_disable(struct net * tipc_disc_delete(b->link_req); RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL); kfree_rcu(b, rcu); + tipc_mon_delete(net, bearer_id); }
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, diff --combined net/tipc/bearer.h index 60e49c3,0d337c7..f1e6db5 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@@ -1,7 -1,7 +1,7 @@@ /* * net/tipc/bearer.h: Include file for TIPC bearer code * - * Copyright (c) 1996-2006, 2013-2014, Ericsson AB + * Copyright (c) 1996-2006, 2013-2016, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@@ -198,7 -198,6 +198,7 @@@ void tipc_bearer_add_dest(struct net *n void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest); struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name); struct tipc_media *tipc_media_find(const char *name); +void tipc_bearer_reset_all(struct net *net); int tipc_bearer_setup(void); void tipc_bearer_cleanup(void); void tipc_bearer_stop(struct net *net); diff --combined net/tipc/link.c index 7d89f87,c1df33f..877d94f --- a/net/tipc/link.c +++ b/net/tipc/link.c @@@ -42,6 -42,7 +42,7 @@@ #include "name_distr.h" #include "discover.h" #include "netlink.h" + #include "monitor.h"
#include <linux/pkt_sched.h>
@@@ -87,7 -88,6 +88,6 @@@ struct tipc_stats * @peer_bearer_id: bearer id used by link's peer endpoint * @bearer_id: local bearer id used by link * @tolerance: minimum link continuity loss needed to reset link [in ms] - * @keepalive_intv: link keepalive timer interval * @abort_limit: # of unacknowledged continuity probes needed to reset link * @state: current state of link FSM * @peer_caps: bitmap describing capabilities of peer node @@@ -96,6 -96,7 +96,7 @@@ * @pmsg: convenience pointer to "proto_msg" field * @priority: current link priority * @net_plane: current link network plane ('A' through 'H') + * @mon_state: cookie with information needed by link monitor * @backlog_limit: backlog queue congestion thresholds (indexed by importance) * @exp_msg_count: # of tunnelled messages expected during link changeover * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset @@@ -131,7 -132,6 +132,6 @@@ struct tipc_link u32 peer_bearer_id; u32 bearer_id; u32 tolerance; - unsigned long keepalive_intv; u32 abort_limit; u32 state; u16 peer_caps; @@@ -140,6 -140,7 +140,7 @@@ char if_name[TIPC_MAX_IF_NAME]; u32 priority; char net_plane; + struct tipc_mon_state mon_state; u16 rst_cnt;
/* Failover/synch */ @@@ -349,8 -350,6 +350,8 @@@ void tipc_link_remove_bc_peer(struct ti u16 ack = snd_l->snd_nxt - 1;
snd_l->ackers--; + rcv_l->bc_peer_is_up = true; + rcv_l->state = LINK_ESTABLISHED; tipc_link_bc_ack_rcv(rcv_l, ack, xmitq); tipc_link_reset(rcv_l); rcv_l->state = LINK_RESET; @@@ -713,18 -712,25 +714,25 @@@ int tipc_link_timeout(struct tipc_link bool setup = false; u16 bc_snt = l->bc_sndlink->snd_nxt - 1; u16 bc_acked = l->bc_rcvlink->acked; - - link_profile_stats(l); + struct tipc_mon_state *mstate = &l->mon_state;
switch (l->state) { case LINK_ESTABLISHED: case LINK_SYNCHING: - if (l->silent_intv_cnt > l->abort_limit) - return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); mtyp = STATE_MSG; + link_profile_stats(l); + tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); + if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) + return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); state = bc_acked != bc_snt; - probe = l->silent_intv_cnt; - l->silent_intv_cnt++; + state |= l->bc_rcvlink->rcv_unacked; + state |= l->rcv_unacked; + state |= !skb_queue_empty(&l->transmq); + state |= !skb_queue_empty(&l->deferdq); + probe = mstate->probing; + probe |= l->silent_intv_cnt; + if (probe || mstate->monitoring) + l->silent_intv_cnt++; break; case LINK_RESET: setup = l->rst_cnt++ <= 4; @@@ -835,6 -841,7 +843,7 @@@ void tipc_link_reset(struct tipc_link * l->stats.recv_info = 0; l->stale_count = 0; l->bc_peer_is_up = false; + memset(&l->mon_state, 0, sizeof(l->mon_state)); tipc_link_reset_stats(l); }
@@@ -1243,6 -1250,9 +1252,9 @@@ static void tipc_link_build_proto_msg(s struct tipc_msg *hdr; struct sk_buff_head *dfq = &l->deferdq; bool node_up = link_is_up(l->bc_rcvlink); + struct tipc_mon_state *mstate = &l->mon_state; + int dlen = 0; + void *data;
/* Don't send protocol message during reset or link failover */ if (tipc_link_is_blocked(l)) @@@ -1255,12 -1265,13 +1267,13 @@@ rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, - TIPC_MAX_IF_NAME, l->addr, + tipc_max_domain_size, l->addr, tipc_own_addr(l->net), 0, 0, 0); if (!skb) return;
hdr = buf_msg(skb); + data = msg_data(hdr); msg_set_session(hdr, l->session); msg_set_bearer_id(hdr, l->bearer_id); msg_set_net_plane(hdr, l->net_plane); @@@ -1276,14 -1287,18 +1289,18 @@@
if (mtyp == STATE_MSG) { msg_set_seq_gap(hdr, rcvgap); - msg_set_size(hdr, INT_H_SIZE); msg_set_probe(hdr, probe); + tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); + msg_set_size(hdr, INT_H_SIZE + dlen); + skb_trim(skb, INT_H_SIZE + dlen); l->stats.sent_states++; l->rcv_unacked = 0; } else { /* RESET_MSG or ACTIVATE_MSG */ msg_set_max_pkt(hdr, l->advertised_mtu); - strcpy(msg_data(hdr), l->if_name); + strcpy(data, l->if_name); + msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); + skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); } if (probe) l->stats.sent_probes++; @@@ -1376,7 -1391,9 +1393,9 @@@ static int tipc_link_proto_rcv(struct t u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); u16 rcv_nxt = l->rcv_nxt; + u16 dlen = msg_data_sz(hdr); int mtyp = msg_type(hdr); + void *data; char *if_name; int rc = 0;
@@@ -1386,6 -1403,10 +1405,10 @@@ if (tipc_own_addr(l->net) > msg_prevnode(hdr)) l->net_plane = msg_net_plane(hdr);
+ skb_linearize(skb); + hdr = buf_msg(skb); + data = msg_data(hdr); + switch (mtyp) { case RESET_MSG:
@@@ -1396,8 -1417,6 +1419,6 @@@ /* fall thru' */
case ACTIVATE_MSG: - skb_linearize(skb); - hdr = buf_msg(skb);
/* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; @@@ -1405,7 -1424,7 +1426,7 @@@ break; if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) break; - strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); + strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) @@@ -1453,6 -1472,8 +1474,8 @@@ rc = TIPC_LINK_UP_EVT; break; } + tipc_mon_rcv(l->net, data, dlen, l->addr, + &l->mon_state, l->bearer_id);
/* Send NACK if peer has sent pkts we haven't received yet */ if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) @@@ -1561,12 -1582,7 +1584,12 @@@ void tipc_link_bc_sync_rcv(struct tipc_ if (!msg_peer_node_is_up(hdr)) return;
- l->bc_peer_is_up = true; + /* Open when peer ackowledges our bcast init msg (pkt #1) */ + if (msg_ack(hdr)) + l->bc_peer_is_up = true; + + if (!l->bc_peer_is_up) + return;
/* Ignore if peers_snd_nxt goes beyond receive window */ if (more(peers_snd_nxt, l->rcv_nxt + l->window)) diff --combined net/tipc/node.c index 23d4761,a3fc0a3..95cc78b --- a/net/tipc/node.c +++ b/net/tipc/node.c @@@ -40,6 -40,7 +40,7 @@@ #include "name_distr.h" #include "socket.h" #include "bcast.h" + #include "monitor.h" #include "discover.h" #include "netlink.h"
@@@ -205,17 -206,6 +206,6 @@@ u16 tipc_node_get_capabilities(struct n return caps; }
- /* - * A trivial power-of-two bitmask technique is used for speed, since this - * operation is done for every incoming TIPC packet. The number of hash table - * entries has been chosen so that no hash chain exceeds 8 nodes and will - * usually be much smaller (typically only a single node). - */ - static unsigned int tipc_hashfn(u32 addr) - { - return addr & (NODE_HTABLE_SIZE - 1); - } - static void tipc_node_kref_release(struct kref *kref) { struct tipc_node *n = container_of(kref, struct tipc_node, kref); @@@ -279,6 -269,7 +269,7 @@@ static void tipc_node_write_unlock(stru u32 addr = 0; u32 flags = n->action_flags; u32 link_id = 0; + u32 bearer_id; struct list_head *publ_list;
if (likely(!flags)) { @@@ -288,6 -279,7 +279,7 @@@
addr = n->addr; link_id = n->link_id; + bearer_id = link_id & 0xffff; publ_list = &n->publ_list;
n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | @@@ -301,13 -293,16 +293,16 @@@ if (flags & TIPC_NOTIFY_NODE_UP) tipc_named_node_up(net, addr);
- if (flags & TIPC_NOTIFY_LINK_UP) + if (flags & TIPC_NOTIFY_LINK_UP) { + tipc_mon_peer_up(net, addr, bearer_id); tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, link_id, addr); - - if (flags & TIPC_NOTIFY_LINK_DOWN) + } + if (flags & TIPC_NOTIFY_LINK_DOWN) { + tipc_mon_peer_down(net, addr, bearer_id); tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, link_id, addr); + } }
struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) @@@ -378,14 -373,13 +373,13 @@@ static void tipc_node_calculate_timer(s { unsigned long tol = tipc_link_tolerance(l); unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; - unsigned long keepalive_intv = msecs_to_jiffies(intv);
/* Link with lowest tolerance determines timer interval */ - if (keepalive_intv < n->keepalive_intv) - n->keepalive_intv = keepalive_intv; + if (intv < n->keepalive_intv) + n->keepalive_intv = intv;
- /* Ensure link's abort limit corresponds to current interval */ - tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv)); + /* Ensure link's abort limit corresponds to current tolerance */ + tipc_link_set_abort_limit(l, tol / n->keepalive_intv); }
static void tipc_node_delete(struct tipc_node *node) @@@ -526,7 -520,7 +520,7 @@@ static void tipc_node_timeout(unsigned if (rc & TIPC_LINK_DOWN_EVT) tipc_node_link_down(n, bearer_id, false); } - mod_timer(&n->timer, jiffies + n->keepalive_intv); + mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv)); }
/** @@@ -692,6 -686,7 +686,7 @@@ static void tipc_node_link_down(struct struct tipc_link *l = le->link; struct tipc_media_addr *maddr; struct sk_buff_head xmitq; + int old_bearer_id = bearer_id;
if (!l) return; @@@ -711,6 -706,8 +706,8 @@@ tipc_link_fsm_evt(l, LINK_RESET_EVT); } tipc_node_write_unlock(n); + if (delete) + tipc_mon_remove_peer(n->net, n->addr, old_bearer_id); tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); tipc_sk_rcv(n->net, &le->inputq); } @@@ -735,6 -732,7 +732,7 @@@ void tipc_node_check_dest(struct net *n bool accept_addr = false; bool reset = true; char *if_name; + unsigned long intv;
*dupl_addr = false; *respond = false; @@@ -840,9 -838,11 +838,11 @@@ le->link = l; n->link_cnt++; tipc_node_calculate_timer(n, l); - if (n->link_cnt == 1) - if (!mod_timer(&n->timer, jiffies + n->keepalive_intv)) + if (n->link_cnt == 1) { + intv = jiffies + msecs_to_jiffies(n->keepalive_intv); + if (!mod_timer(&n->timer, intv)) tipc_node_get(n); + } } memcpy(&le->maddr, maddr, sizeof(*maddr)); exit: @@@ -950,7 -950,7 +950,7 @@@ static void tipc_node_fsm_evt(struct ti state = SELF_UP_PEER_UP; break; case SELF_LOST_CONTACT_EVT: - state = SELF_DOWN_PEER_LEAVING; + state = SELF_DOWN_PEER_DOWN; break; case SELF_ESTABL_CONTACT_EVT: case PEER_LOST_CONTACT_EVT: @@@ -969,7 -969,7 +969,7 @@@ state = SELF_UP_PEER_UP; break; case PEER_LOST_CONTACT_EVT: - state = SELF_LEAVING_PEER_DOWN; + state = SELF_DOWN_PEER_DOWN; break; case SELF_LOST_CONTACT_EVT: case PEER_ESTABL_CONTACT_EVT: @@@ -1297,6 -1297,10 +1297,6 @@@ static void tipc_node_bc_rcv(struct ne
rc = tipc_bcast_rcv(net, be->link, skb);
- /* Broadcast link reset may happen at reassembly failure */ - if (rc & TIPC_LINK_DOWN_EVT) - tipc_node_reset_links(n); - /* Broadcast ACKs are sent on a unicast link */ if (rc & TIPC_LINK_SND_BC_ACK) { tipc_node_read_lock(n); @@@ -1316,17 -1320,6 +1316,17 @@@ spin_unlock_bh(&be->inputq2.lock); tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2); } + + if (rc & TIPC_LINK_DOWN_EVT) { + /* Reception reassembly failure => reset all links to peer */ + if (!tipc_link_is_up(be->link)) + tipc_node_reset_links(n); + + /* Retransmission failure => reset all links to all peers */ + if (!tipc_link_is_up(tipc_bc_sndlink(net))) + tipc_bearer_reset_all(net); + } + tipc_node_put(n); }
diff --combined net/wireless/nl80211.c index 7d72283,5782f71..46417f9 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@@ -167,6 -167,7 +167,7 @@@ __cfg80211_rdev_from_attrs(struct net *
if (attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); + netdev = __dev_get_by_index(netns, ifindex); if (netdev) { if (netdev->ieee80211_ptr) @@@ -404,6 -405,10 +405,10 @@@ static const struct nla_policy nl80211_ [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_SUPPORT_P2P_PS] = { .type = NLA_U8 }, + [NL80211_ATTR_MU_MIMO_GROUP_DATA] = { + .len = VHT_MUMIMO_GROUPS_DATA_LEN + }, + [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN }, };
/* policy for the key attributes */ @@@ -731,6 -736,7 +736,7 @@@ static int nl80211_parse_key_new(struc
if (tb[NL80211_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; + err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, tb[NL80211_KEY_DEFAULT_TYPES], nl80211_key_default_policy); @@@ -1264,7 -1270,7 +1270,7 @@@ nl80211_send_mgmt_stypes(struct sk_buf struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; - long split_start, band_start, chan_start; + long split_start, band_start, chan_start, capa_start; bool split; };
@@@ -1382,6 -1388,7 +1388,7 @@@ static int nl80211_send_wiphy(struct cf rdev->ops->get_antenna) { u32 tx_ant = 0, rx_ant = 0; int res; + res = rdev_get_antenna(rdev, &tx_ant, &rx_ant); if (!res) { if (nla_put_u32(msg, @@@ -1761,6 -1768,47 +1768,47 @@@ nla_nest_end(msg, nested); }
+ state->split_start++; + break; + case 13: + if (rdev->wiphy.num_iftype_ext_capab && + rdev->wiphy.iftype_ext_capab) { + struct nlattr *nested_ext_capab, *nested; + + nested = nla_nest_start(msg, + NL80211_ATTR_IFTYPE_EXT_CAPA); + if (!nested) + goto nla_put_failure; + + for (i = state->capa_start; + i < rdev->wiphy.num_iftype_ext_capab; i++) { + const struct wiphy_iftype_ext_capab *capab; + + capab = &rdev->wiphy.iftype_ext_capab[i]; + + nested_ext_capab = nla_nest_start(msg, i); + if (!nested_ext_capab || + nla_put_u32(msg, NL80211_ATTR_IFTYPE, + capab->iftype) || + nla_put(msg, NL80211_ATTR_EXT_CAPA, + capab->extended_capabilities_len, + capab->extended_capabilities) || + nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, + capab->extended_capabilities_len, + capab->extended_capabilities_mask)) + goto nla_put_failure; + + nla_nest_end(msg, nested_ext_capab); + if (state->split) + break; + } + nla_nest_end(msg, nested); + if (i < rdev->wiphy.num_iftype_ext_capab) { + state->capa_start = i + 1; + break; + } + } + /* done */ state->split_start = 0; break; @@@ -2116,7 -2164,6 +2164,6 @@@ static int nl80211_set_wds_peer(struct return rdev_set_wds_peer(rdev, dev, bssid); }
- static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; @@@ -2251,6 -2298,7 +2298,7 @@@ if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] && info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) { u32 tx_ant, rx_ant; + if ((!rdev->wiphy.available_antennas_tx && !rdev->wiphy.available_antennas_rx) || !rdev->ops->set_antenna) @@@ -2651,6 -2699,38 +2699,38 @@@ static int nl80211_set_interface(struc change = true; }
+ if (info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]) { + const u8 *mumimo_groups; + u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; + + if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) + return -EOPNOTSUPP; + + mumimo_groups = + nla_data(info->attrs[NL80211_ATTR_MU_MIMO_GROUP_DATA]); + + /* bits 0 and 63 are reserved and must be zero */ + if ((mumimo_groups[0] & BIT(7)) || + (mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN - 1] & BIT(0))) + return -EINVAL; + + memcpy(params.vht_mumimo_groups, mumimo_groups, + VHT_MUMIMO_GROUPS_DATA_LEN); + change = true; + } + + if (info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR]) { + u32 cap_flag = NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER; + + if (!wiphy_ext_feature_isset(&rdev->wiphy, cap_flag)) + return -EOPNOTSUPP; + + nla_memcpy(params.macaddr, + info->attrs[NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR], + ETH_ALEN); + change = true; + } + if (flags && (*flags & MONITOR_FLAG_ACTIVE) && !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) return -EOPNOTSUPP; @@@ -2919,6 -2999,7 +2999,7 @@@ static int nl80211_get_key(struct sk_bu pairwise = !!mac_addr; if (info->attrs[NL80211_ATTR_KEY_TYPE]) { u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); + if (kt >= NUM_NL80211_KEYTYPES) return -EINVAL; if (kt != NL80211_KEYTYPE_GROUP && @@@ -3487,16 -3568,16 +3568,16 @@@ static int nl80211_start_ap(struct sk_b params.smps_mode = NL80211_SMPS_OFF; }
+ params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); + if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) + return -EOPNOTSUPP; + if (info->attrs[NL80211_ATTR_ACL_POLICY]) { params.acl = parse_acl_data(&rdev->wiphy, info); if (IS_ERR(params.acl)) return PTR_ERR(params.acl); }
- params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); - if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ]) - return -EOPNOTSUPP; - wdev_lock(wdev); err = rdev_start_ap(rdev, dev, ¶ms); if (!err) { @@@ -3962,7 -4043,6 +4043,6 @@@ static int nl80211_dump_station(struct sta_idx++; }
- out: cb->args[2] = sta_idx; err = skb->len; @@@ -4366,6 -4446,12 +4446,12 @@@ static int nl80211_set_station(struct s nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); if (params.plink_state >= NUM_NL80211_PLINK_STATES) return -EINVAL; + if (info->attrs[NL80211_ATTR_MESH_PEER_AID]) { + params.peer_aid = nla_get_u16( + info->attrs[NL80211_ATTR_MESH_PEER_AID]); + if (params.peer_aid > IEEE80211_MAX_AID) + return -EINVAL; + } params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE; }
@@@ -4763,7 -4849,6 +4849,6 @@@ static int nl80211_dump_mpath(struct sk path_idx++; }
- out: cb->args[2] = path_idx; err = skb->len; @@@ -5053,7 -5138,6 +5138,6 @@@ static int nl80211_req_set_reg(struct s enum nl80211_user_reg_hint_type user_reg_hint_type; u32 owner_nlportid;
- /* * You should only get this when cfg80211 hasn't yet initialized * completely when built-in to the kernel right between the time @@@ -5245,6 -5329,51 +5329,51 @@@ static const struct nla_polic [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, };
+ static int nl80211_check_bool(const struct nlattr *nla, u8 min, u8 max, bool *out) + { + u8 val = nla_get_u8(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + + static int nl80211_check_u8(const struct nlattr *nla, u8 min, u8 max, u8 *out) + { + u8 val = nla_get_u8(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + + static int nl80211_check_u16(const struct nlattr *nla, u16 min, u16 max, u16 *out) + { + u16 val = nla_get_u16(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + + static int nl80211_check_u32(const struct nlattr *nla, u32 min, u32 max, u32 *out) + { + u32 val = nla_get_u32(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + + static int nl80211_check_s32(const struct nlattr *nla, s32 min, s32 max, s32 *out) + { + s32 val = nla_get_s32(nla); + if (val < min || val > max) + return -EINVAL; + *out = val; + return 0; + } + static int nl80211_parse_mesh_config(struct genl_info *info, struct mesh_config *cfg, u32 *mask_out) @@@ -5255,14 -5384,12 +5384,12 @@@ #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ do { \ if (tb[attr]) { \ - if (fn(tb[attr]) < min || fn(tb[attr]) > max) \ + if (fn(tb[attr], min, max, &cfg->param)) \ return -EINVAL; \ - cfg->param = fn(tb[attr]); \ mask |= (1 << (attr - 1)); \ } \ } while (0)
- if (!info->attrs[NL80211_ATTR_MESH_CONFIG]) return -EINVAL; if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, @@@ -5277,99 -5404,99 +5404,99 @@@ /* Fill in the params struct */ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, 1, 255, mask, NL80211_MESHCONF_RETRY_TIMEOUT, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, 1, 255, mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, 1, 255, mask, NL80211_MESHCONF_HOLDING_TIMEOUT, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, 0, 255, mask, NL80211_MESHCONF_MAX_PEER_LINKS, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, 0, 16, mask, NL80211_MESHCONF_MAX_RETRIES, - nla_get_u8); + nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, 1, 255, - mask, NL80211_MESHCONF_TTL, nla_get_u8); + mask, NL80211_MESHCONF_TTL, nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, 1, 255, mask, NL80211_MESHCONF_ELEMENT_TTL, - nla_get_u8); + nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 0, 1, mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, - nla_get_u8); + nl80211_check_bool); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, 1, 255, mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 0, 255, mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, - nla_get_u8); + nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, 1, 65535, mask, NL80211_MESHCONF_PATH_REFRESH_TIME, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, 1, 65535, mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, 1, 65535, mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPnetDiameterTraversalTime, 1, 65535, mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, 0, 4, mask, NL80211_MESHCONF_HWMP_ROOTMODE, - nla_get_u8); + nl80211_check_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_RANN_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshGateAnnouncementProtocol, 0, 1, mask, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, - nla_get_u8); + nl80211_check_bool); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1, mask, NL80211_MESHCONF_FORWARDING, - nla_get_u8); + nl80211_check_bool); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, mask, NL80211_MESHCONF_RSSI_THRESHOLD, - nla_get_s32); + nl80211_check_s32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, mask, NL80211_MESHCONF_HT_OPMODE, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, 1, 65535, mask, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPconfirmationInterval, 1, 65535, mask, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, - nla_get_u16); + nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, power_mode, NL80211_MESH_POWER_ACTIVE, NL80211_MESH_POWER_MAX, mask, NL80211_MESHCONF_POWER_MODE, - nla_get_u32); + nl80211_check_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshAwakeWindowDuration, 0, 65535, mask, - NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); + NL80211_MESHCONF_AWAKE_WINDOW, nl80211_check_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, 0, 0xffffffff, mask, NL80211_MESHCONF_PLINK_TIMEOUT, - nla_get_u32); + nl80211_check_u32); if (mask_out) *mask_out = mask;
@@@ -5409,7 -5536,6 +5536,6 @@@ static int nl80211_parse_mesh_setup(str IEEE80211_PATH_METRIC_VENDOR : IEEE80211_PATH_METRIC_AIRTIME;
- if (tb[NL80211_MESH_SETUP_IE]) { struct nlattr *ieattr = tb[NL80211_MESH_SETUP_IE]; @@@ -5796,10 -5922,8 +5922,8 @@@ static int nl80211_set_reg(struct sk_bu } }
- r = set_regdom(rd, REGD_SOURCE_CRDA); - /* set_regdom took ownership */ - rd = NULL; - + /* set_regdom takes ownership of rd */ + return set_regdom(rd, REGD_SOURCE_CRDA); bad_reg: kfree(rd); return r; @@@ -6033,6 -6157,7 +6157,7 @@@ static int nl80211_trigger_scan(struct /* all channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; + if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { @@@ -6104,6 -6229,19 +6229,19 @@@ } }
+ if (info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]) { + if (!wiphy_ext_feature_isset(wiphy, + NL80211_EXT_FEATURE_SET_SCAN_DWELL)) { + err = -EOPNOTSUPP; + goto out_free; + } + + request->duration = + nla_get_u16(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION]); + request->duration_mandatory = + nla_get_flag(info->attrs[NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY]); + } + if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) { request->flags = nla_get_u32( info->attrs[NL80211_ATTR_SCAN_FLAGS]); @@@ -6442,6 -6580,7 +6580,7 @@@ nl80211_parse_sched_scan(struct wiphy * /* all channels */ for (band = 0; band < NUM_NL80211_BANDS; band++) { int j; + if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { @@@ -6511,7 -6650,7 +6650,7 @@@ nla_data(ssid), nla_len(ssid)); request->match_sets[i].ssid.ssid_len = nla_len(ssid); - /* special attribute - old implemenation w/a */ + /* special attribute - old implementation w/a */ request->match_sets[i].rssi_thold = default_match_rssi; rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI]; @@@ -6936,6 -7075,13 +7075,13 @@@ static int nl80211_send_bss(struct sk_b jiffies_to_msecs(jiffies - intbss->ts))) goto nla_put_failure;
+ if (intbss->parent_tsf && + (nla_put_u64_64bit(msg, NL80211_BSS_PARENT_TSF, + intbss->parent_tsf, NL80211_BSS_PAD) || + nla_put(msg, NL80211_BSS_PARENT_BSSID, ETH_ALEN, + intbss->parent_bssid))) + goto nla_put_failure; + if (intbss->ts_boottime && nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, intbss->ts_boottime, NL80211_BSS_PAD)) @@@ -7204,6 -7350,7 +7350,7 @@@ static int nl80211_authenticate(struct if (key.idx >= 0) { int i; bool ok = false; + for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) { if (key.p.cipher == rdev->wiphy.cipher_suites[i]) { ok = true; @@@ -7282,6 -7429,7 +7429,7 @@@ static int nl80211_crypto_settings(stru
if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) { u16 proto; + proto = nla_get_u16( info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]); settings->control_port_ethertype = cpu_to_be16(proto); @@@ -8435,6 -8583,7 +8583,7 @@@ static u32 rateset_to_mask(struct ieee8 for (i = 0; i < rates_len; i++) { int rate = (rates[i] & 0x7f) * 5; int ridx; + for (ridx = 0; ridx < sband->n_bitrates; ridx++) { struct ieee80211_rate *srate = &sband->bitrates[ridx]; @@@ -8743,7 -8892,6 +8892,6 @@@ static int nl80211_tx_mgmt(struct sk_bu if (params.wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME || params.wait > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; - }
params.offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; @@@ -10590,7 -10738,6 +10738,6 @@@ int cfg80211_vendor_cmd_reply(struct sk } EXPORT_SYMBOL_GPL(cfg80211_vendor_cmd_reply);
- static int nl80211_set_qos_map(struct sk_buff *skb, struct genl_info *info) { @@@ -10945,7 -11092,7 +11092,7 @@@ static const struct genl_ops nl80211_op .cmd = NL80211_CMD_SET_WIPHY, .doit = nl80211_set_wiphy, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_RTNL, }, { @@@ -10961,7 -11108,7 +11108,7 @@@ .cmd = NL80211_CMD_SET_INTERFACE, .doit = nl80211_set_interface, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -10969,7 -11116,7 +11116,7 @@@ .cmd = NL80211_CMD_NEW_INTERFACE, .doit = nl80211_new_interface, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -10977,7 -11124,7 +11124,7 @@@ .cmd = NL80211_CMD_DEL_INTERFACE, .doit = nl80211_del_interface, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -10985,7 -11132,7 +11132,7 @@@ .cmd = NL80211_CMD_GET_KEY, .doit = nl80211_get_key, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -10993,7 -11140,7 +11140,7 @@@ .cmd = NL80211_CMD_SET_KEY, .doit = nl80211_set_key, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, @@@ -11002,7 -11149,7 +11149,7 @@@ .cmd = NL80211_CMD_NEW_KEY, .doit = nl80211_new_key, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, @@@ -11011,14 -11158,14 +11158,14 @@@ .cmd = NL80211_CMD_DEL_KEY, .doit = nl80211_del_key, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_BEACON, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_set_beacon, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, @@@ -11026,7 -11173,7 +11173,7 @@@ { .cmd = NL80211_CMD_START_AP, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_start_ap, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, @@@ -11034,7 -11181,7 +11181,7 @@@ { .cmd = NL80211_CMD_STOP_AP, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .doit = nl80211_stop_ap, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, @@@ -11051,7 -11198,7 +11198,7 @@@ .cmd = NL80211_CMD_SET_STATION, .doit = nl80211_set_station, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11059,7 -11206,7 +11206,7 @@@ .cmd = NL80211_CMD_NEW_STATION, .doit = nl80211_new_station, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11067,7 -11214,7 +11214,7 @@@ .cmd = NL80211_CMD_DEL_STATION, .doit = nl80211_del_station, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11076,7 -11223,7 +11223,7 @@@ .doit = nl80211_get_mpath, .dumpit = nl80211_dump_mpath, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11085,7 -11232,7 +11232,7 @@@ .doit = nl80211_get_mpp, .dumpit = nl80211_dump_mpp, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11093,7 -11240,7 +11240,7 @@@ .cmd = NL80211_CMD_SET_MPATH, .doit = nl80211_set_mpath, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11101,7 -11248,7 +11248,7 @@@ .cmd = NL80211_CMD_NEW_MPATH, .doit = nl80211_new_mpath, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11109,7 -11256,7 +11256,7 @@@ .cmd = NL80211_CMD_DEL_MPATH, .doit = nl80211_del_mpath, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11117,7 -11264,7 +11264,7 @@@ .cmd = NL80211_CMD_SET_BSS, .doit = nl80211_set_bss, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11156,7 -11303,7 +11303,7 @@@ .cmd = NL80211_CMD_SET_MESH_CONFIG, .doit = nl80211_update_mesh_config, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11164,7 -11311,7 +11311,7 @@@ .cmd = NL80211_CMD_TRIGGER_SCAN, .doit = nl80211_trigger_scan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11172,7 -11319,7 +11319,7 @@@ .cmd = NL80211_CMD_ABORT_SCAN, .doit = nl80211_abort_scan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11185,7 -11332,7 +11332,7 @@@ .cmd = NL80211_CMD_START_SCHED_SCAN, .doit = nl80211_start_sched_scan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11193,7 -11340,7 +11340,7 @@@ .cmd = NL80211_CMD_STOP_SCHED_SCAN, .doit = nl80211_stop_sched_scan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11201,7 -11348,7 +11348,7 @@@ .cmd = NL80211_CMD_AUTHENTICATE, .doit = nl80211_authenticate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, @@@ -11210,7 -11357,7 +11357,7 @@@ .cmd = NL80211_CMD_ASSOCIATE, .doit = nl80211_associate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11218,7 -11365,7 +11365,7 @@@ .cmd = NL80211_CMD_DEAUTHENTICATE, .doit = nl80211_deauthenticate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11226,7 -11373,7 +11373,7 @@@ .cmd = NL80211_CMD_DISASSOCIATE, .doit = nl80211_disassociate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11234,7 -11381,7 +11381,7 @@@ .cmd = NL80211_CMD_JOIN_IBSS, .doit = nl80211_join_ibss, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11242,7 -11389,7 +11389,7 @@@ .cmd = NL80211_CMD_LEAVE_IBSS, .doit = nl80211_leave_ibss, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11252,7 -11399,7 +11399,7 @@@ .doit = nl80211_testmode_do, .dumpit = nl80211_testmode_dump, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11261,7 -11408,7 +11408,7 @@@ .cmd = NL80211_CMD_CONNECT, .doit = nl80211_connect, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11269,7 -11416,7 +11416,7 @@@ .cmd = NL80211_CMD_DISCONNECT, .doit = nl80211_disconnect, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11277,7 -11424,7 +11424,7 @@@ .cmd = NL80211_CMD_SET_WIPHY_NETNS, .doit = nl80211_wiphy_netns, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11290,7 -11437,7 +11437,7 @@@ .cmd = NL80211_CMD_SET_PMKSA, .doit = nl80211_setdel_pmksa, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11298,7 -11445,7 +11445,7 @@@ .cmd = NL80211_CMD_DEL_PMKSA, .doit = nl80211_setdel_pmksa, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11306,7 -11453,7 +11453,7 @@@ .cmd = NL80211_CMD_FLUSH_PMKSA, .doit = nl80211_flush_pmksa, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11314,7 -11461,7 +11461,7 @@@ .cmd = NL80211_CMD_REMAIN_ON_CHANNEL, .doit = nl80211_remain_on_channel, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11322,7 -11469,7 +11469,7 @@@ .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, .doit = nl80211_cancel_remain_on_channel, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11330,7 -11477,7 +11477,7 @@@ .cmd = NL80211_CMD_SET_TX_BITRATE_MASK, .doit = nl80211_set_tx_bitrate_mask, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11338,7 -11485,7 +11485,7 @@@ .cmd = NL80211_CMD_REGISTER_FRAME, .doit = nl80211_register_mgmt, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11346,7 -11493,7 +11493,7 @@@ .cmd = NL80211_CMD_FRAME, .doit = nl80211_tx_mgmt, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11354,7 -11501,7 +11501,7 @@@ .cmd = NL80211_CMD_FRAME_WAIT_CANCEL, .doit = nl80211_tx_mgmt_cancel_wait, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11362,7 -11509,7 +11509,7 @@@ .cmd = NL80211_CMD_SET_POWER_SAVE, .doit = nl80211_set_power_save, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11378,7 -11525,7 +11525,7 @@@ .cmd = NL80211_CMD_SET_CQM, .doit = nl80211_set_cqm, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11386,7 -11533,7 +11533,7 @@@ .cmd = NL80211_CMD_SET_CHANNEL, .doit = nl80211_set_channel, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11394,7 -11541,7 +11541,7 @@@ .cmd = NL80211_CMD_SET_WDS_PEER, .doit = nl80211_set_wds_peer, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11402,7 -11549,7 +11549,7 @@@ .cmd = NL80211_CMD_JOIN_MESH, .doit = nl80211_join_mesh, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11410,7 -11557,7 +11557,7 @@@ .cmd = NL80211_CMD_LEAVE_MESH, .doit = nl80211_leave_mesh, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11418,7 -11565,7 +11565,7 @@@ .cmd = NL80211_CMD_JOIN_OCB, .doit = nl80211_join_ocb, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11426,7 -11573,7 +11573,7 @@@ .cmd = NL80211_CMD_LEAVE_OCB, .doit = nl80211_leave_ocb, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11443,7 -11590,7 +11590,7 @@@ .cmd = NL80211_CMD_SET_WOWLAN, .doit = nl80211_set_wowlan, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11452,7 -11599,7 +11599,7 @@@ .cmd = NL80211_CMD_SET_REKEY_OFFLOAD, .doit = nl80211_set_rekey_data, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL | NL80211_FLAG_CLEAR_SKB, @@@ -11461,7 -11608,7 +11608,7 @@@ .cmd = NL80211_CMD_TDLS_MGMT, .doit = nl80211_tdls_mgmt, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11469,7 -11616,7 +11616,7 @@@ .cmd = NL80211_CMD_TDLS_OPER, .doit = nl80211_tdls_oper, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11477,7 -11624,7 +11624,7 @@@ .cmd = NL80211_CMD_UNEXPECTED_FRAME, .doit = nl80211_register_unexpected_frame, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11485,7 -11632,7 +11632,7 @@@ .cmd = NL80211_CMD_PROBE_CLIENT, .doit = nl80211_probe_client, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11493,7 -11640,7 +11640,7 @@@ .cmd = NL80211_CMD_REGISTER_BEACONS, .doit = nl80211_register_beacons, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11501,7 -11648,7 +11648,7 @@@ .cmd = NL80211_CMD_SET_NOACK_MAP, .doit = nl80211_set_noack_map, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11509,7 -11656,7 +11656,7 @@@ .cmd = NL80211_CMD_START_P2P_DEVICE, .doit = nl80211_start_p2p_device, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11517,7 -11664,7 +11664,7 @@@ .cmd = NL80211_CMD_STOP_P2P_DEVICE, .doit = nl80211_stop_p2p_device, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11525,7 -11672,7 +11672,7 @@@ .cmd = NL80211_CMD_SET_MCAST_RATE, .doit = nl80211_set_mcast_rate, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11533,7 -11680,7 +11680,7 @@@ .cmd = NL80211_CMD_SET_MAC_ACL, .doit = nl80211_set_mac_acl, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, @@@ -11541,7 -11688,7 +11688,7 @@@ .cmd = NL80211_CMD_RADAR_DETECT, .doit = nl80211_start_radar_detection, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11554,7 -11701,7 +11701,7 @@@ .cmd = NL80211_CMD_UPDATE_FT_IES, .doit = nl80211_update_ft_ies, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11562,7 -11709,7 +11709,7 @@@ .cmd = NL80211_CMD_CRIT_PROTOCOL_START, .doit = nl80211_crit_protocol_start, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11570,7 -11717,7 +11717,7 @@@ .cmd = NL80211_CMD_CRIT_PROTOCOL_STOP, .doit = nl80211_crit_protocol_stop, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11585,7 -11732,7 +11732,7 @@@ .cmd = NL80211_CMD_SET_COALESCE, .doit = nl80211_set_coalesce, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11593,7 -11740,7 +11740,7 @@@ .cmd = NL80211_CMD_CHANNEL_SWITCH, .doit = nl80211_channel_switch, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11602,7 -11749,7 +11749,7 @@@ .doit = nl80211_vendor_cmd, .dumpit = nl80211_vendor_cmd_dump, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, @@@ -11610,7 -11757,7 +11757,7 @@@ .cmd = NL80211_CMD_SET_QOS_MAP, .doit = nl80211_set_qos_map, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11618,7 -11765,7 +11765,7 @@@ .cmd = NL80211_CMD_ADD_TX_TS, .doit = nl80211_add_tx_ts, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11626,7 -11773,7 +11773,7 @@@ .cmd = NL80211_CMD_DEL_TX_TS, .doit = nl80211_del_tx_ts, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11634,7 -11781,7 +11781,7 @@@ .cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH, .doit = nl80211_tdls_channel_switch, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11642,7 -11789,7 +11789,7 @@@ .cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH, .doit = nl80211_tdls_cancel_channel_switch, .policy = nl80211_policy, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, @@@ -11708,6 -11855,13 +11855,13 @@@ static int nl80211_add_scan_req(struct nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags)) goto nla_put_failure;
+ if (req->info.scan_start_tsf && + (nla_put_u64_64bit(msg, NL80211_ATTR_SCAN_START_TIME_TSF, + req->info.scan_start_tsf, NL80211_BSS_PAD) || + nla_put(msg, NL80211_ATTR_SCAN_START_TIME_TSF_BSSID, ETH_ALEN, + req->info.tsf_bssid))) + goto nla_put_failure; + return 0; nla_put_failure: return -ENOBUFS; @@@ -12092,7 -12246,7 +12246,7 @@@ void nl80211_send_connect_result(struc struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, - u16 status, gfp_t gfp) + int status, gfp_t gfp) { struct sk_buff *msg; void *hdr; @@@ -12110,7 -12264,10 +12264,10 @@@ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) || - nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) || + nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, + status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE : + status) || + (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) || (req_ie && nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) || (resp_ie && @@@ -12126,7 -12283,6 +12283,6 @@@ nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); - }
void nl80211_send_roamed(struct cfg80211_registered_device *rdev, @@@ -12165,7 -12321,6 +12321,6 @@@ nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); - }
void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, @@@ -12203,7 -12358,6 +12358,6 @@@ nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); - }
void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, @@@ -13545,7 -13699,6 +13699,6 @@@ void cfg80211_crit_proto_stopped(struc if (hdr) genlmsg_cancel(msg, hdr); nlmsg_free(msg); - } EXPORT_SYMBOL(cfg80211_crit_proto_stopped);