The following commit has been merged in the master branch: commit 5a2172cd83399965162c4bfda9d37cfd0706bdbd Merge: 50c0dd4b386e89cdca69410edef2e00f9e7188c9 6f30e8b022c8e3a722928ddb1a2ae0be852fcc0e Author: Stephen Rothwell sfr@canb.auug.org.au Date: Thu Jun 30 11:01:12 2016 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS index d8b83bf,f5ddaa9..cd2fc9f --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -595,10 -595,6 +595,10 @@@ S: Odd Fixe L: linux-alpha@vger.kernel.org F: arch/alpha/
+ALPS PS/2 TOUCHPAD DRIVER +R: Pali Rohár pali.rohar@gmail.com +F: drivers/input/mouse/alps.* + ALTERA MAILBOX DRIVER M: Ley Foon Tan lftan@altera.com L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) @@@ -1163,7 -1159,6 +1163,7 @@@ F: arch/arm/mach-footbridge ARM/FREESCALE IMX / MXC ARM ARCHITECTURE M: Shawn Guo shawnguo@kernel.org M: Sascha Hauer kernel@pengutronix.de +R: Fabio Estevam fabio.estevam@nxp.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git @@@ -1526,7 -1521,6 +1526,7 @@@ M: David Brown <david.brown@linaro.org L: linux-arm-msm@vger.kernel.org L: linux-soc@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/soc/qcom/ F: arch/arm/boot/dts/qcom-*.dts F: arch/arm/boot/dts/qcom-*.dtsi F: arch/arm/mach-qcom/ @@@ -1604,10 -1598,8 +1604,10 @@@ F: arch/arm/mach-s3c24* F: arch/arm/mach-s3c64xx/ F: arch/arm/mach-s5p*/ F: arch/arm/mach-exynos*/ -F: drivers/*/*s3c2410* -F: drivers/*/*/*s3c2410* +F: drivers/*/*s3c24* +F: drivers/*/*/*s3c24* +F: drivers/*/*s3c64xx* +F: drivers/*/*s5pv210* F: drivers/memory/samsung/* F: drivers/soc/samsung/* F: drivers/spi/spi-s3c* @@@ -1650,13 -1642,6 +1650,13 @@@ L: linux-media@vger.kernel.or S: Maintained F: drivers/media/platform/s5p-tv/
+ARM/SAMSUNG S5P SERIES HDMI CEC SUBSYSTEM SUPPORT +M: Kyungmin Park kyungmin.park@samsung.com +L: linux-arm-kernel@lists.infradead.org +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/staging/media/platform/s5p-cec/ + ARM/SAMSUNG S5P SERIES JPEG CODEC SUPPORT M: Andrzej Pietrasiewicz andrzej.p@samsung.com M: Jacek Anaszewski j.anaszewski@samsung.com @@@ -1679,6 -1664,7 +1679,6 @@@ F: arch/arm/boot/dts/sh F: arch/arm/configs/shmobile_defconfig F: arch/arm/include/debug/renesas-scif.S F: arch/arm/mach-shmobile/ -F: drivers/sh/ F: drivers/soc/renesas/ F: include/linux/soc/renesas/
@@@ -2256,8 -2242,7 +2256,8 @@@ F: include/net/ax25. F: net/ax25/
AZ6007 DVB DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -2469,6 -2454,14 +2469,14 @@@ L: netdev@vger.kernel.or S: Supported F: drivers/net/ethernet/broadcom/b44.*
+ BROADCOM B53 ETHERNET SWITCH DRIVER + M: Florian Fainelli f.fainelli@gmail.com + L: netdev@vger.kernel.org + L: openwrt-devel@lists.openwrt.org (subscribers-only) + S: Supported + F: drivers/net/dsa/b53/* + F: include/linux/platform_data/b53.h + BROADCOM GENET ETHERNET DRIVER M: Florian Fainelli f.fainelli@gmail.com L: netdev@vger.kernel.org @@@ -2504,7 -2497,6 +2512,7 @@@ F: arch/arm64/boot/dts/broadcom F: arch/arm/configs/bcm_defconfig F: drivers/mmc/host/sdhci-bcm-kona.c F: drivers/clocksource/bcm_kona_timer.c +F: drivers/power/reset/brcm-kona-reset.c
BROADCOM BCM2835 ARM ARCHITECTURE M: Stephen Warren swarren@wwwdotorg.org @@@ -2586,12 -2578,11 +2594,11 @@@ S: Supporte F: drivers/net/ethernet/broadcom/tg3.*
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER - M: Brett Rudley brudley@broadcom.com - M: Arend van Spriel arend@broadcom.com - M: Franky (Zhenhui) Lin frankyl@broadcom.com - M: Hante Meuleman meuleman@broadcom.com + M: Arend van Spriel arend.vanspriel@broadcom.com + M: Franky Lin franky.lin@broadcom.com + M: Hante Meuleman hante.meuleman@broadcom.com L: linux-wireless@vger.kernel.org - L: brcm80211-dev-list@broadcom.com + L: brcm80211-dev-list.pdl@broadcom.com S: Supported F: drivers/net/wireless/broadcom/brcm80211/
@@@ -2725,8 -2716,7 +2732,8 @@@ F: Documentation/filesystems/btrfs.tx F: fs/btrfs/
BTTV VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -2790,9 -2780,9 +2797,9 @@@ F: include/net/caif F: net/caif/
CALGARY x86-64 IOMMU -M: Muli Ben-Yehuda muli@il.ibm.com -M: "Jon D. Mason" jdmason@kudzu.us -L: discuss@x86-64.org +M: Muli Ben-Yehuda mulix@mulix.org +M: Jon Mason jdmason@kudzu.us +L: iommu@lists.linux-foundation.org S: Maintained F: arch/x86/kernel/pci-calgary_64.c F: arch/x86/kernel/tce_64.c @@@ -2823,6 -2813,7 +2830,7 @@@ W: https://github.com/linux-ca T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained + F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h F: include/linux/can/platform/ @@@ -2862,22 -2853,6 +2870,22 @@@ F: drivers/net/ieee802154/cc2520. F: include/linux/spi/cc2520.h F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
+CEC DRIVER +M: Hans Verkuil hans.verkuil@cisco.com +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +W: http://linuxtv.org +S: Supported +F: Documentation/cec.txt +F: Documentation/DocBook/media/v4l/cec* +F: drivers/staging/media/cec/ +F: drivers/media/cec-edid.c +F: drivers/media/rc/keymaps/rc-cec.c +F: include/media/cec.h +F: include/media/cec-edid.h +F: include/linux/cec.h +F: include/linux/cec-funcs.h + CELL BROADBAND ENGINE ARCHITECTURE M: Arnd Bergmann arnd@arndb.de L: linuxppc-dev@lists.ozlabs.org @@@ -3377,8 -3352,7 +3385,8 @@@ S: Maintaine F: drivers/media/dvb-frontends/cx24120*
CX88 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -3808,7 -3782,6 +3816,7 @@@ Q: https://patchwork.kernel.org/project S: Maintained F: drivers/dma/ F: include/linux/dmaengine.h +F: Documentation/devicetree/bindings/dma/ F: Documentation/dmaengine/ T: git git://git.infradead.org/users/vkoul/slave-dma.git
@@@ -4326,8 -4299,7 +4334,8 @@@ F: fs/ecryptfs EDAC-CORE M: Doug Thompson dougthompson@xmission.com M: Borislav Petkov bp@alien8.de -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next @@@ -4372,8 -4344,7 +4380,8 @@@ S: Maintaine F: drivers/edac/e7xxx_edac.c
EDAC-GHES -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/ghes_edac.c @@@ -4397,22 -4368,19 +4405,22 @@@ S: Maintaine F: drivers/edac/i5000_edac.c
EDAC-I5400 -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i5400_edac.c
EDAC-I7300 -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i7300_edac.c
EDAC-I7CORE -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i7core_edac.c @@@ -4449,8 -4417,7 +4457,8 @@@ S: Maintaine F: drivers/edac/r82600_edac.c
EDAC-SBRIDGE -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/sb_edac.c @@@ -4509,8 -4476,7 +4517,8 @@@ S: Maintaine F: drivers/net/ethernet/ibm/ehea/
EM28XX VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -4913,6 -4879,13 +4921,13 @@@ F: drivers/net/ethernet/freescale/gianf X: drivers/net/ethernet/freescale/gianfar_ptp.c F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+ FREESCALE QUICC ENGINE UCC HDLC DRIVER + M: Zhao Qiang qiang.zhao@nxp.com + L: netdev@vger.kernel.org + L: linuxppc-dev@lists.ozlabs.org + S: Maintained + F: drivers/net/wan/fsl_ucc_hdlc* + FREESCALE QUICC ENGINE UCC UART DRIVER M: Timur Tabi timur@tabi.org L: linuxppc-dev@lists.ozlabs.org @@@ -6529,7 -6502,6 +6544,7 @@@ F: include/uapi/linux/sunrpc
KERNEL SELFTEST FRAMEWORK M: Shuah Khan shuahkh@osg.samsung.com +M: Shuah Khan shuah@kernel.org L: linux-kselftest@vger.kernel.org T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest S: Maintained @@@ -6900,7 -6872,6 +6915,7 @@@ F: drivers/crypto/nx F: drivers/crypto/vmx/ F: drivers/net/ethernet/ibm/ibmveth.* F: drivers/net/ethernet/ibm/ibmvnic.* +F: drivers/pci/hotplug/pnv_php.c F: drivers/pci/hotplug/rpa* F: drivers/scsi/ibmvscsi/ N: opal @@@ -7200,6 -7171,12 +7215,12 @@@ W: http://www.kernel.org/doc/man-page L: linux-man@vger.kernel.org S: Maintained
+ MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER + M: Andrew Lunn andrew@lunn.ch + M: Vivien Didelot vivien.didelot@savoirfairelinux.com + S: Maintained + F: drivers/net/dsa/mv88e6xxx/ + MARVELL ARMADA DRM SUPPORT M: Russell King rmk+kernel@armlinux.org.uk S: Maintained @@@ -7207,11 -7184,6 +7228,6 @@@ F: drivers/gpu/drm/armada F: include/uapi/drm/armada_drm.h F: Documentation/devicetree/bindings/display/armada/
- MARVELL 88E6352 DSA support - M: Guenter Roeck linux@roeck-us.net - S: Maintained - F: drivers/net/dsa/mv88e6352.c - MARVELL CRYPTO DRIVER M: Boris Brezillon boris.brezillon@free-electrons.com M: Arnaud Ebalard arno@natisbad.org @@@ -7347,16 -7319,6 +7363,16 @@@ L: linux-iio@vger.kernel.or S: Maintained F: drivers/iio/potentiometer/mcp4531.c
+MEDIA DRIVERS FOR RENESAS - FCP +M: Laurent Pinchart laurent.pinchart@ideasonboard.com +L: linux-media@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Supported +F: Documentation/devicetree/bindings/media/renesas,fcp.txt +F: drivers/media/platform/rcar-fcp.c +F: include/media/rcar-fcp.h + MEDIA DRIVERS FOR RENESAS - VSP1 M: Laurent Pinchart laurent.pinchart@ideasonboard.com L: linux-media@vger.kernel.org @@@ -7366,18 -7328,8 +7382,18 @@@ S: Supporte F: Documentation/devicetree/bindings/media/renesas,vsp1.txt F: drivers/media/platform/vsp1/
+MEDIA DRIVERS FOR HELENE +M: Abylay Ospan aospan@netup.ru +L: linux-media@vger.kernel.org +W: https://linuxtv.org +W: http://netup.tv/ +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/dvb-frontends/helene* + MEDIA DRIVERS FOR ASCOT2E M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7387,7 -7339,6 +7403,7 @@@ F: drivers/media/dvb-frontends/ascot2e
MEDIA DRIVERS FOR CXD2841ER M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7397,7 -7348,6 +7413,7 @@@ F: drivers/media/dvb-frontends/cxd2841e
MEDIA DRIVERS FOR HORUS3A M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7407,7 -7357,6 +7423,7 @@@ F: drivers/media/dvb-frontends/horus3a
MEDIA DRIVERS FOR LNBH25 M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7417,7 -7366,6 +7433,7 @@@ F: drivers/media/dvb-frontends/lnbh25
MEDIA DRIVERS FOR NETUP PCI UNIVERSAL DVB devices M: Sergey Kozlov serjk@netup.ru +M: Abylay Ospan aospan@netup.ru L: linux-media@vger.kernel.org W: https://linuxtv.org W: http://netup.tv/ @@@ -7426,8 -7374,7 +7442,8 @@@ S: Supporte F: drivers/media/pci/netup_unidvb/*
MEDIA INPUT INFRASTRUCTURE (V4L/DVB) -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org P: LinuxTV.org Project L: linux-media@vger.kernel.org W: https://linuxtv.org @@@ -7475,7 -7422,7 +7491,7 @@@ F: drivers/scsi/megaraid. F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en) -M: Eugenia Emantayev eugenia@mellanox.com +M: Tariq Toukan tariqt@mellanox.com L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@@ -7666,8 -7613,10 +7682,8 @@@ L: linux-media@vger.kernel.or W: https://linuxtv.org W: http://palosaari.fi/linux/ Q: http://patchwork.linuxtv.org/project/linux-media/list/ -T: git git://linuxtv.org/anttip/media_tree.git S: Maintained -F: drivers/staging/media/mn88472/ -F: drivers/media/dvb-frontends/mn88472.h +F: drivers/media/dvb-frontends/mn88472*
MN88473 MEDIA DRIVER M: Antti Palosaari crope@iki.fi @@@ -8474,9 -8423,10 +8490,9 @@@ F: drivers/i2c/busses/i2c-ocores. OPEN FIRMWARE AND FLATTENED DEVICE TREE M: Rob Herring robh+dt@kernel.org M: Frank Rowand frowand.list@gmail.com -M: Grant Likely grant.likely@linaro.org L: devicetree@vger.kernel.org W: http://www.devicetree.org/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git S: Maintained F: drivers/of/ F: include/linux/of*.h @@@ -8484,10 -8434,12 +8500,10 @@@ F: scripts/dtc
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS M: Rob Herring robh+dt@kernel.org -M: Pawel Moll pawel.moll@arm.com M: Mark Rutland mark.rutland@arm.com -M: Ian Campbell ijc+devicetree@hellion.org.uk -M: Kumar Gala galak@codeaurora.org L: devicetree@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git +Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/ S: Maintained F: Documentation/devicetree/ F: arch/*/boot/dts/ @@@ -8744,7 -8696,6 +8760,7 @@@ L: linux-pci@vger.kernel.or Q: http://patchwork.ozlabs.org/project/linux-pci/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git S: Supported +F: Documentation/devicetree/bindings/pci/ F: Documentation/PCI/ F: drivers/pci/ F: include/linux/pci* @@@ -8890,15 -8841,6 +8906,15 @@@ S: Maintaine F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt F: drivers/pci/host/pci-xgene-msi.c
+PCIE DRIVER FOR AXIS ARTPEC +M: Niklas Cassel niklas.cassel@axis.com +M: Jesper Nilsson jesper.nilsson@axis.com +L: linux-arm-kernel@axis.com +L: linux-pci@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/axis,artpec* +F: drivers/pci/host/*artpec* + PCIE DRIVER FOR HISILICON M: Zhou Wang wangzhou1@hisilicon.com M: Gabriele Paoloni gabriele.paoloni@huawei.com @@@ -9022,7 -8964,6 +9038,7 @@@ L: linux-gpio@vger.kernel.or T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git S: Maintained F: Documentation/devicetree/bindings/pinctrl/ +F: Documentation/pinctrl.txt F: drivers/pinctrl/ F: include/linux/pinctrl/
@@@ -9930,8 -9871,7 +9946,8 @@@ S: Odd Fixe F: drivers/media/i2c/saa6588*
SAA7134 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -10340,10 -10280,9 +10356,9 @@@ W: http://www.avagotech.co S: Supported F: drivers/scsi/be2iscsi/
- Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER + Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) M: Sathya Perla sathya.perla@broadcom.com M: Ajit Khaparde ajit.khaparde@broadcom.com - M: Padmanabh Ratnakar padmanabh.ratnakar@broadcom.com M: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com M: Somnath Kotur somnath.kotur@broadcom.com L: netdev@vger.kernel.org @@@ -10450,8 -10389,7 +10465,8 @@@ S: Maintaine F: drivers/media/radio/si4713/radio-usb-si4713.c
SIANO DVB DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -11217,8 -11155,7 +11232,8 @@@ S: Maintaine F: drivers/media/i2c/tda9840*
TEA5761 TUNER DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -11226,8 -11163,7 +11241,8 @@@ S: Odd fixe F: drivers/media/tuners/tea5761.*
TEA5767 TUNER DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -11374,6 -11310,11 +11389,6 @@@ F: Documentation/thermal/cpu-cooling-ap F: drivers/thermal/cpu_cooling.c F: include/linux/cpu_cooling.h
-THINGM BLINK(1) USB RGB LED DRIVER -M: Vivien Didelot vivien.didelot@savoirfairelinux.com -S: Maintained -F: drivers/hid/hid-thingm.c - THINKPAD ACPI EXTRAS DRIVER M: Henrique de Moraes Holschuh ibm-acpi@hmh.eng.br L: ibm-acpi-devel@lists.sourceforge.net @@@ -11609,8 -11550,7 +11624,8 @@@ F: include/linux/shmem_fs. F: mm/shmem.c
TM6000 VIDEO4LINUX DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -11964,8 -11904,7 +11979,8 @@@ F: drivers/usb/common/usb-otg-fsm.
USB OVER IP DRIVER M: Valentina Manea valentina.manea.m@gmail.com -M: Shuah Khan shuah.kh@samsung.com +M: Shuah Khan shuahkh@osg.samsung.com +M: Shuah Khan shuah@kernel.org L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/usbip_protocol.txt @@@ -12036,7 -11975,6 +12051,7 @@@ L: linux-usb@vger.kernel.or W: http://www.linux-usb.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git S: Supported +F: Documentation/devicetree/bindings/usb/ F: Documentation/usb/ F: drivers/usb/ F: include/linux/usb.h @@@ -12210,7 -12148,6 +12225,7 @@@ VIRTIO CORE, NET AND BLOCK DRIVER M: "Michael S. Tsirkin" mst@redhat.com L: virtualization@lists.linux-foundation.org S: Maintained +F: Documentation/devicetree/bindings/virtio/ F: drivers/virtio/ F: tools/virtio/ F: drivers/net/virtio_net.c @@@ -12599,8 -12536,7 +12614,8 @@@ S: Maintaine F: arch/x86/entry/vdso/
XC2028/3028 TUNER DRIVER -M: Mauro Carvalho Chehab mchehab@osg.samsung.com +M: Mauro Carvalho Chehab mchehab@s-opensource.com +M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git diff --combined arch/arm/boot/dts/am33xx.dtsi index af18c47,7fa2951..98748c6 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@@ -45,9 -45,19 +45,9 @@@ device_type = "cpu"; reg = <0>;
- /* - * To consider voltage drop between PMIC and SoC, - * tolerance value is reduced to 2% from 4% and - * voltage value is increased as a precaution. - */ - operating-points = < - /* kHz uV */ - 720000 1285000 - 600000 1225000 - 500000 1125000 - 275000 1125000 - >; - voltage-tolerance = <2>; /* 2 percentage */ + operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_conf 0x7fc 0x1fff 0>; + ti,syscon-rev = <&scm_conf 0x600>;
clocks = <&dpll_mpu_ck>; clock-names = "cpu"; @@@ -56,78 -66,6 +56,78 @@@ }; };
+ cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + + /* + * The three following nodes are marked with opp-suspend + * because the can not be enabled simultaneously on a + * single SoC. + */ + opp50@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <950000 931000 969000>; + opp-supported-hw = <0x06 0x0010>; + opp-suspend; + }; + + opp100@275000000 { + opp-hz = /bits/ 64 <275000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x01 0x00FF>; + opp-suspend; + }; + + opp100@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x06 0x0020>; + opp-suspend; + }; + + opp100@500000000 { + opp-hz = /bits/ 64 <500000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + opp100@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0x06 0x0040>; + }; + + opp120@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + opp120@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0x06 0x0080>; + }; + + oppturbo@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0x01 0xFFFF>; + }; + + oppturbo@800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0x06 0x0100>; + }; + + oppnitro@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1325000 1298500 1351500>; + opp-supported-hw = <0x04 0x0200>; + }; + }; + pmu { compatible = "arm,cortex-a8-pmu"; interrupts = <3>; @@@ -249,7 -187,7 +249,7 @@@ reg = <0x49000000 0x10000>; reg-names = "edma3_cc"; interrupts = <12 13 14>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -741,24 -679,20 +741,24 @@@ 0x48300200 0x48300200 0x80>; /* EHRPWM */
ecap0: ecap@48300100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48300100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <31>; interrupt-names = "ecap0"; - ti,hwmods = "ecap0"; status = "disabled"; };
ehrpwm0: pwm@48300200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48300200 0x80>; - ti,hwmods = "ehrpwm0"; + clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -775,24 -709,20 +775,24 @@@ 0x48302200 0x48302200 0x80>; /* EHRPWM */
ecap1: ecap@48302100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48302100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <47>; interrupt-names = "ecap1"; - ti,hwmods = "ecap1"; status = "disabled"; };
ehrpwm1: pwm@48302200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48302200 0x80>; - ti,hwmods = "ehrpwm1"; + clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -809,24 -739,20 +809,24 @@@ 0x48304200 0x48304200 0x80>; /* EHRPWM */
ecap2: ecap@48304100 { - compatible = "ti,am33xx-ecap"; + compatible = "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48304100 0x80>; + clocks = <&l4ls_gclk>; + clock-names = "fck"; interrupts = <61>; interrupt-names = "ecap2"; - ti,hwmods = "ecap2"; status = "disabled"; };
ehrpwm2: pwm@48304200 { - compatible = "ti,am33xx-ehrpwm"; + compatible = "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48304200 0x80>; - ti,hwmods = "ehrpwm2"; + clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -840,7 -766,6 +840,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -863,7 -788,7 +862,7 @@@ status = "disabled";
davinci_mdio: mdio@4a101000 { - compatible = "ti,davinci_mdio"; + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; #address-cells = <1>; #size-cells = <0>; ti,hwmods = "davinci_mdio"; diff --combined arch/arm/boot/dts/am4372.dtsi index e7b53ef,cd81ecf..0fadae5 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@@ -44,49 -44,10 +44,49 @@@ clocks = <&dpll_mpu_ck>; clock-names = "cpu";
+ operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_conf 0x610 0x3f 0>; + ti,syscon-rev = <&scm_conf 0x600>; + clock-latency = <300000>; /* From omap-cpufreq driver */ }; };
+ cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + + opp50@300000000 { + opp-hz = /bits/ 64 <300000000>; + opp-microvolt = <950000 931000 969000>; + opp-supported-hw = <0xFF 0x01>; + opp-suspend; + }; + + opp100@600000000 { + opp-hz = /bits/ 64 <600000000>; + opp-microvolt = <1100000 1078000 1122000>; + opp-supported-hw = <0xFF 0x04>; + }; + + opp120@720000000 { + opp-hz = /bits/ 64 <720000000>; + opp-microvolt = <1200000 1176000 1224000>; + opp-supported-hw = <0xFF 0x08>; + }; + + oppturbo@800000000 { + opp-hz = /bits/ 64 <800000000>; + opp-microvolt = <1260000 1234800 1285200>; + opp-supported-hw = <0xFF 0x10>; + }; + + oppnitro@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1325000 1298500 1351500>; + opp-supported-hw = <0xFF 0x20>; + }; + }; + gic: interrupt-controller@48241000 { compatible = "arm,cortex-a9-gic"; interrupt-controller; @@@ -238,7 -199,7 +238,7 @@@ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -665,7 -626,6 +665,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -675,7 -635,7 +674,7 @@@ syscon = <&scm_conf>;
davinci_mdio: mdio@4a101000 { - compatible = "ti,am4372-mdio","ti,davinci_mdio"; + compatible = "ti,am4372-mdio","ti,cpsw-mdio","ti,davinci_mdio"; reg = <0x4a101000 0x100>; #address-cells = <1>; #size-cells = <0>; @@@ -711,24 -671,18 +710,24 @@@ status = "disabled";
ecap0: ecap@48300100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48300100 0x80>; - ti,hwmods = "ecap0"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm0: pwm@48300200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48300200 0x80>; - ti,hwmods = "ehrpwm0"; + clocks = <&ehrpwm0_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -743,24 -697,18 +742,24 @@@ status = "disabled";
ecap1: ecap@48302100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48302100 0x80>; - ti,hwmods = "ecap1"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm1: pwm@48302200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48302200 0x80>; - ti,hwmods = "ehrpwm1"; + clocks = <&ehrpwm1_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -775,24 -723,18 +774,24 @@@ status = "disabled";
ecap2: ecap@48304100 { - compatible = "ti,am4372-ecap","ti,am33xx-ecap"; + compatible = "ti,am4372-ecap", + "ti,am3352-ecap", + "ti,am33xx-ecap"; #pwm-cells = <3>; reg = <0x48304100 0x80>; - ti,hwmods = "ecap2"; + clocks = <&l4ls_gclk>; + clock-names = "fck"; status = "disabled"; };
ehrpwm2: pwm@48304200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48304200 0x80>; - ti,hwmods = "ehrpwm2"; + clocks = <&ehrpwm2_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -807,13 -749,10 +806,13 @@@ status = "disabled";
ehrpwm3: pwm@48306200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48306200 0x80>; - ti,hwmods = "ehrpwm3"; + clocks = <&ehrpwm3_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -828,13 -767,10 +827,13 @@@ status = "disabled";
ehrpwm4: pwm@48308200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x48308200 0x80>; - ti,hwmods = "ehrpwm4"; + clocks = <&ehrpwm4_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -849,13 -785,10 +848,13 @@@ status = "disabled";
ehrpwm5: pwm@4830a200 { - compatible = "ti,am4372-ehrpwm","ti,am33xx-ehrpwm"; + compatible = "ti,am4372-ehrpwm", + "ti,am3352-ehrpwm", + "ti,am33xx-ehrpwm"; #pwm-cells = <3>; reg = <0x4830a200 0x80>; - ti,hwmods = "ehrpwm5"; + clocks = <&ehrpwm5_tbclk>, <&l4ls_gclk>; + clock-names = "tbclk", "fck"; status = "disabled"; }; }; @@@ -909,13 -842,6 +908,13 @@@ dma-names = "tx", "rx"; };
+ rng: rng@48310000 { + compatible = "ti,omap4-rng"; + ti,hwmods = "rng"; + reg = <0x48310000 0x2000>; + interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; + }; + mcasp0: mcasp@48038000 { compatible = "ti,am33xx-mcasp-audio"; ti,hwmods = "mcasp0"; diff --combined arch/arm/boot/dts/dm814x.dtsi index a6e0aebc,f23cae0c..68e412c --- a/arch/arm/boot/dts/dm814x.dtsi +++ b/arch/arm/boot/dts/dm814x.dtsi @@@ -448,7 -448,7 +448,7 @@@ reg = <0x49000000 0x10000>; reg-names = "edma3_cc"; interrupts = <12 13 14>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -509,7 -509,6 +509,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; diff --combined arch/arm/boot/dts/dra7.dtsi index 40b69a5,f8b39a5..d9bfb94 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@@ -73,49 -73,6 +73,49 @@@ interrupt-parent = <&gic>; };
+ cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0>; + + operating-points-v2 = <&cpu0_opp_table>; + ti,syscon-efuse = <&scm_wkup 0x20c 0xf80000 19>; + ti,syscon-rev = <&scm_wkup 0x204>; + + clocks = <&dpll_mpu_ck>; + clock-names = "cpu"; + + clock-latency = <300000>; /* From omap-cpufreq driver */ + + /* cooling options */ + cooling-min-level = <0>; + cooling-max-level = <2>; + #cooling-cells = <2>; /* min followed by max */ + }; + }; + + cpu0_opp_table: opp_table0 { + compatible = "operating-points-v2"; + opp-shared; + + opp_nom@1000000000 { + opp-hz = /bits/ 64 <1000000000>; + opp-microvolt = <1060000 850000 1150000>; + opp-supported-hw = <0xFF 0x01>; + opp-suspend; + }; + + opp_od@1176000000 { + opp-hz = /bits/ 64 <1176000000>; + opp-microvolt = <1160000 885000 1160000>; + opp-supported-hw = <0xFF 0x02>; + }; + }; + /* * The soc node represents the soc top level view. It is used for IPs * that are not memory mapped in the MPU view or for the MPU itself. @@@ -276,11 -233,6 +276,11 @@@ prm_clockdomains: clockdomains { }; }; + + scm_wkup: scm_conf@c000 { + compatible = "syscon"; + reg = <0xc000 0x1000>; + }; };
axi@0 { @@@ -324,7 -276,7 +324,7 @@@ ranges = <0x51800000 0x51800000 0x3000 0x0 0x30000000 0x10000000>; status = "disabled"; - pcie@51000000 { + pcie@51800000 { compatible = "ti,dra7-pcie"; reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>; reg-names = "rc_dbics", "ti_conf", "config"; @@@ -352,53 -304,6 +352,53 @@@ }; };
+ ocmcram1: ocmcram@40300000 { + compatible = "mmio-sram"; + reg = <0x40300000 0x80000>; + ranges = <0x0 0x40300000 0x80000>; + #address-cells = <1>; + #size-cells = <1>; + /* + * This is a placeholder for an optional reserved + * region for use by secure software. The size + * of this region is not known until runtime so it + * is set as zero to either be updated to reserve + * space or left unchanged to leave all SRAM for use. + * On HS parts that that require the reserved region + * either the bootloader can update the size to + * the required amount or the node can be overridden + * from the board dts file for the secure platform. + */ + sram-hs@0 { + compatible = "ti,secure-ram"; + reg = <0x0 0x0>; + }; + }; + + /* + * NOTE: ocmcram2 and ocmcram3 are not available on all + * DRA7xx and AM57xx variants. Confirm availability in + * the data manual for the exact part number in use + * before enabling these nodes in the board dts file. + */ + ocmcram2: ocmcram@40400000 { + status = "disabled"; + compatible = "mmio-sram"; + reg = <0x40400000 0x100000>; + ranges = <0x0 0x40400000 0x100000>; + #address-cells = <1>; + #size-cells = <1>; + }; + + ocmcram3: ocmcram@40500000 { + status = "disabled"; + compatible = "mmio-sram"; + reg = <0x40500000 0x100000>; + ranges = <0x0 0x40500000 0x100000>; + #address-cells = <1>; + #size-cells = <1>; + }; + bandgap: bandgap@4a0021e0 { reg = <0x4a0021e0 0xc 0x4a00232c 0xc @@@ -436,7 -341,7 +436,7 @@@ interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "edma3_ccint", "emda3_mperr", + interrupt-names = "edma3_ccint", "edma3_mperr", "edma3_ccerrint"; dma-requests = <64>; #dma-cells = <2>; @@@ -1546,8 -1451,6 +1546,8 @@@ ti,hwmods = "gpmc"; reg = <0x50000000 0x37c>; /* device IO registers */ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 4 0>; + dma-names = "rxtx"; gpmc,num-cs = <8>; gpmc,num-waitpins = <2>; #address-cells = <2>; @@@ -1723,7 -1626,6 +1723,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -1758,7 -1660,7 +1757,7 @@@ status = "disabled";
davinci_mdio: mdio@48485000 { - compatible = "ti,davinci_mdio"; + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; #address-cells = <1>; #size-cells = <0>; ti,hwmods = "davinci_mdio"; @@@ -1840,149 -1742,6 +1839,149 @@@ clock-names = "fck", "sys_clk"; }; }; + + epwmss0: epwmss@4843e000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x4843e000 0x30>; + ti,hwmods = "epwmss0"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm0: pwm@4843e200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x4843e200 0x80>; + clocks = <&ehrpwm0_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap0: ecap@4843e100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x4843e100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + epwmss1: epwmss@48440000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x48440000 0x30>; + ti,hwmods = "epwmss1"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm1: pwm@48440200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x48440200 0x80>; + clocks = <&ehrpwm1_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap1: ecap@48440100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x48440100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + epwmss2: epwmss@48442000 { + compatible = "ti,dra746-pwmss", "ti,am33xx-pwmss"; + reg = <0x48442000 0x30>; + ti,hwmods = "epwmss2"; + #address-cells = <1>; + #size-cells = <1>; + status = "disabled"; + ranges; + + ehrpwm2: pwm@48442200 { + compatible = "ti,dra746-ehrpwm", + "ti,am3352-ehrpwm"; + #pwm-cells = <3>; + reg = <0x48442200 0x80>; + clocks = <&ehrpwm2_tbclk>, <&l4_root_clk_div>; + clock-names = "tbclk", "fck"; + status = "disabled"; + }; + + ecap2: ecap@48442100 { + compatible = "ti,dra746-ecap", + "ti,am3352-ecap"; + #pwm-cells = <3>; + reg = <0x48442100 0x80>; + clocks = <&l4_root_clk_div>; + clock-names = "fck"; + status = "disabled"; + }; + }; + + aes1: aes@4b500000 { + compatible = "ti,omap4-aes"; + ti,hwmods = "aes1"; + reg = <0x4b500000 0xa0>; + interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 111 0>, <&edma_xbar 110 0>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + aes2: aes@4b700000 { + compatible = "ti,omap4-aes"; + ti,hwmods = "aes2"; + reg = <0x4b700000 0xa0>; + interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 114 0>, <&edma_xbar 113 0>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + des: des@480a5000 { + compatible = "ti,omap4-des"; + ti,hwmods = "des"; + reg = <0x480a5000 0xa0>; + interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&sdma_xbar 117>, <&sdma_xbar 116>; + dma-names = "tx", "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + sham: sham@53100000 { + compatible = "ti,omap5-sham"; + ti,hwmods = "sham"; + reg = <0x4b101000 0x300>; + interrupts = <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 119 0>; + dma-names = "rx"; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; + + rng: rng@48090000 { + compatible = "ti,omap4-rng"; + ti,hwmods = "rng"; + reg = <0x48090000 0x2000>; + interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&l3_iclk_div>; + clock-names = "fck"; + }; };
thermal_zones: thermal-zones { diff --combined arch/arm/boot/dts/rk3288.dtsi index 7fa932f,3ebee53..cd33f01 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@@ -539,8 -539,9 +539,9 @@@ gmac: ethernet@ff290000 { compatible = "rockchip,rk3288-gmac"; reg = <0xff290000 0x10000>; - interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "macirq"; + interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq", "eth_wake_irq"; rockchip,grf = <&grf>; clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>, @@@ -826,11 -827,6 +827,11 @@@ #phy-cells = <0>; status = "disabled"; }; + + io_domains: io-domains { + compatible = "rockchip,rk3288-io-voltage-domain"; + status = "disabled"; + }; };
wdt: watchdog@ff800000 { diff --combined arch/arm64/boot/dts/broadcom/ns2-svk.dts index b062a44,ea5603f..0862b3e --- a/arch/arm64/boot/dts/broadcom/ns2-svk.dts +++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts @@@ -40,14 -40,10 +40,14 @@@
aliases { serial0 = &uart3; + serial1 = &uart0; + serial2 = &uart1; + serial3 = &uart2; };
chosen { stdout-path = "serial0:115200n8"; + bootargs = "earlycon=uart8250,mmio32,0x66130000"; };
memory { @@@ -56,6 -52,14 +56,14 @@@ }; };
+ &pci_phy0 { + status = "ok"; + }; + + &pci_phy1 { + status = "ok"; + }; + &pcie0 { status = "ok"; }; @@@ -72,18 -76,6 +80,18 @@@ status = "ok"; };
+&uart0 { + status = "ok"; +}; + +&uart1 { + status = "ok"; +}; + +&uart2 { + status = "ok"; +}; + &uart3 { status = "ok"; }; @@@ -133,18 -125,6 +141,18 @@@ }; };
+&sata_phy0 { + status = "ok"; +}; + +&sata_phy1 { + status = "ok"; +}; + +&sata { + status = "ok"; +}; + &sdio0 { status = "ok"; }; @@@ -161,11 -141,10 +169,19 @@@ }; };
+&pinctrl { + pinctrl-names = "default"; + pinctrl-0 = <&nand_sel>; + nand_sel: nand_sel { + function = "nand"; + groups = "nand_grp"; + }; +}; ++ + &mdio_mux_iproc { + mdio@10 { + gphy0: eth-phy@10 { + reg = <0x10>; + }; + }; + }; diff --combined arch/arm64/boot/dts/broadcom/ns2.dtsi index d1dc812,46b78fa..f53b095 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi @@@ -251,22 -251,6 +251,22 @@@ mmu-masters; };
+ pinctrl: pinctrl@6501d130 { + compatible = "brcm,ns2-pinmux"; + reg = <0x6501d130 0x08>, + <0x660a0028 0x04>, + <0x660009b0 0x40>; + }; + + gpio_aon: gpio@65024800 { + compatible = "brcm,iproc-gpio"; + reg = <0x65024800 0x50>, + <0x65024008 0x18>; + ngpios = <6>; + #gpio-cells = <2>; + gpio-controller; + }; + gic: interrupt-controller@65210000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; @@@ -279,26 -263,45 +279,65 @@@ IRQ_TYPE_LEVEL_HIGH)>; };
+ cci@65590000 { + compatible = "arm,cci-400"; + #address-cells = <1>; + #size-cells = <1>; + reg = <0x65590000 0x1000>; + ranges = <0 0x65590000 0x10000>; + + pmu@9000 { + compatible = "arm,cci-400-pmu,r1", + "arm,cci-400-pmu"; + reg = <0x9000 0x4000>; + interrupts = <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 348 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 349 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + mdio_mux_iproc: mdio-mux@6602023c { + compatible = "brcm,mdio-mux-iproc"; + reg = <0x6602023c 0x14>; + #address-cells = <1>; + #size-cells = <0>; + + mdio@0 { + reg = <0x0>; + #address-cells = <1>; + #size-cells = <0>; + + pci_phy0: pci-phy@0 { + compatible = "brcm,ns2-pcie-phy"; + reg = <0x0>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + mdio@7 { + reg = <0x7>; + #address-cells = <1>; + #size-cells = <0>; + + pci_phy1: pci-phy@0 { + compatible = "brcm,ns2-pcie-phy"; + reg = <0x0>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + mdio@10 { + reg = <0x10>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; + timer0: timer@66030000 { compatible = "arm,sp804", "arm,primecell"; reg = <0x66030000 0x1000>; @@@ -357,16 -360,6 +396,16 @@@ clock-names = "wdogclk", "apb_pclk"; };
+ gpio_g: gpio@660a0000 { + compatible = "brcm,iproc-gpio"; + reg = <0x660a0000 0x50>; + ngpios = <32>; + #gpio-cells = <2>; + gpio-controller; + interrupt-controller; + interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>; + }; + i2c1: i2c@660b0000 { compatible = "brcm,iproc-i2c"; reg = <0x660b0000 0x100>; @@@ -377,36 -370,6 +416,36 @@@ status = "disabled"; };
+ uart0: serial@66100000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66100000 0x100>; + interrupts = <GIC_SPI 390 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: serial@66110000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66110000 0x100>; + interrupts = <GIC_SPI 391 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart2: serial@66120000 { + compatible = "snps,dw-apb-uart"; + reg = <0x66120000 0x100>; + interrupts = <GIC_SPI 392 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&iprocslow>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + uart3: serial@66130000 { compatible = "snps,dw-apb-uart"; reg = <0x66130000 0x100>; @@@ -444,49 -407,6 +483,49 @@@ reg = <0x66220000 0x28>; };
+ sata_phy: sata_phy@663f0100 { + compatible = "brcm,iproc-ns2-sata-phy"; + reg = <0x663f0100 0x1f00>, + <0x663f004c 0x10>; + reg-names = "phy", "phy-ctrl"; + #address-cells = <1>; + #size-cells = <0>; + + sata_phy0: sata-phy@0 { + reg = <0>; + #phy-cells = <0>; + status = "disabled"; + }; + + sata_phy1: sata-phy@1 { + reg = <1>; + #phy-cells = <0>; + status = "disabled"; + }; + }; + + sata: ahci@663f2000 { + compatible = "brcm,iproc-ahci", "generic-ahci"; + reg = <0x663f2000 0x1000>; + reg-names = "ahci"; + interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; + #address-cells = <1>; + #size-cells = <0>; + status = "disabled"; + + sata0: sata-port@0 { + reg = <0>; + phys = <&sata_phy0>; + phy-names = "sata-phy"; + }; + + sata1: sata-port@1 { + reg = <1>; + phys = <&sata_phy1>; + phy-names = "sata-phy"; + }; + }; + sdio0: sdhci@66420000 { compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66420000 0x100>; diff --combined drivers/net/can/dev.c index ad535a8,7188137..e21f7cc --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@@ -69,6 -69,7 +69,7 @@@ EXPORT_SYMBOL_GPL(can_len2dlc)
#ifdef CONFIG_CAN_CALC_BITTIMING #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ + #define CAN_CALC_SYNC_SEG 1
/* * Bit-timing calculation derived from: @@@ -83,98 -84,126 +84,126 @@@ * registers of the CAN controller. You can find more information * in the header file linux/can/netlink.h. */ - static int can_update_spt(const struct can_bittiming_const *btc, - int sampl_pt, int tseg, int *tseg1, int *tseg2) + static int can_update_sample_point(const struct can_bittiming_const *btc, + unsigned int sample_point_nominal, unsigned int tseg, + unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, + unsigned int *sample_point_error_ptr) { - *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000; - if (*tseg2 < btc->tseg2_min) - *tseg2 = btc->tseg2_min; - if (*tseg2 > btc->tseg2_max) - *tseg2 = btc->tseg2_max; - *tseg1 = tseg - *tseg2; - if (*tseg1 > btc->tseg1_max) { - *tseg1 = btc->tseg1_max; - *tseg2 = tseg - *tseg1; + unsigned int sample_point_error, best_sample_point_error = UINT_MAX; + unsigned int sample_point, best_sample_point = 0; + unsigned int tseg1, tseg2; + int i; + + for (i = 0; i <= 1; i++) { + tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i; + tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); + tseg1 = tseg - tseg2; + if (tseg1 > btc->tseg1_max) { + tseg1 = btc->tseg1_max; + tseg2 = tseg - tseg1; + } + + sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG); + sample_point_error = abs(sample_point_nominal - sample_point); + + if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) { + best_sample_point = sample_point; + best_sample_point_error = sample_point_error; + *tseg1_ptr = tseg1; + *tseg2_ptr = tseg2; + } } - return 1000 * (tseg + 1 - *tseg2) / (tseg + 1); + + if (sample_point_error_ptr) + *sample_point_error_ptr = best_sample_point_error; + + return best_sample_point; }
static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { struct can_priv *priv = netdev_priv(dev); - long best_error = 1000000000, error = 0; - int best_tseg = 0, best_brp = 0, brp = 0; - int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0; - int spt_error = 1000, spt = 0, sampl_pt; - long rate; + unsigned int bitrate; /* current bitrate */ + unsigned int bitrate_error; /* difference between current and nominal value */ + unsigned int best_bitrate_error = UINT_MAX; + unsigned int sample_point_error; /* difference between current and nominal value */ + unsigned int best_sample_point_error = UINT_MAX; + unsigned int sample_point_nominal; /* nominal sample point */ + unsigned int best_tseg = 0; /* current best value for tseg */ + unsigned int best_brp = 0; /* current best value for brp */ + unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0; u64 v64;
/* Use CiA recommended sample points */ if (bt->sample_point) { - sampl_pt = bt->sample_point; + sample_point_nominal = bt->sample_point; } else { if (bt->bitrate > 800000) - sampl_pt = 750; + sample_point_nominal = 750; else if (bt->bitrate > 500000) - sampl_pt = 800; + sample_point_nominal = 800; else - sampl_pt = 875; + sample_point_nominal = 875; }
/* tseg even = round down, odd = round up */ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { - tsegall = 1 + tseg / 2; + tsegall = CAN_CALC_SYNC_SEG + tseg / 2; + /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; - /* chose brp step which is possible in system */ + + /* choose brp step which is possible in system */ brp = (brp / btc->brp_inc) * btc->brp_inc; if ((brp < btc->brp_min) || (brp > btc->brp_max)) continue; - rate = priv->clock.freq / (brp * tsegall); - error = bt->bitrate - rate; + + bitrate = priv->clock.freq / (brp * tsegall); + bitrate_error = abs(bt->bitrate - bitrate); + /* tseg brp biterror */ - if (error < 0) - error = -error; - if (error > best_error) + if (bitrate_error > best_bitrate_error) continue; - best_error = error; - if (error == 0) { - spt = can_update_spt(btc, sampl_pt, tseg / 2, - &tseg1, &tseg2); - error = sampl_pt - spt; - if (error < 0) - error = -error; - if (error > spt_error) - continue; - spt_error = error; - } + + /* reset sample point error if we have a better bitrate */ + if (bitrate_error < best_bitrate_error) + best_sample_point_error = UINT_MAX; + + can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); + if (sample_point_error > best_sample_point_error) + continue; + + best_sample_point_error = sample_point_error; + best_bitrate_error = bitrate_error; best_tseg = tseg / 2; best_brp = brp; - if (error == 0) + + if (bitrate_error == 0 && sample_point_error == 0) break; }
- if (best_error) { + if (best_bitrate_error) { /* Error in one-tenth of a percent */ - error = (best_error * 1000) / bt->bitrate; - if (error > CAN_CALC_MAX_ERROR) { + v64 = (u64)best_bitrate_error * 1000; + do_div(v64, bt->bitrate); + bitrate_error = (u32)v64; + if (bitrate_error > CAN_CALC_MAX_ERROR) { netdev_err(dev, - "bitrate error %ld.%ld%% too high\n", - error / 10, error % 10); + "bitrate error %d.%d%% too high\n", + bitrate_error / 10, bitrate_error % 10); return -EDOM; - } else { - netdev_warn(dev, "bitrate error %ld.%ld%%\n", - error / 10, error % 10); } + netdev_warn(dev, "bitrate error %d.%d%%\n", + bitrate_error / 10, bitrate_error % 10); }
/* real sample point */ - bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg, - &tseg1, &tseg2); + bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg, + &tseg1, &tseg2, NULL);
- v64 = (u64)best_brp * 1000000000UL; + v64 = (u64)best_brp * 1000 * 1000 * 1000; do_div(v64, priv->clock.freq); bt->tq = (u32)v64; bt->prop_seg = tseg1 / 2; @@@ -182,9 -211,9 +211,9 @@@ bt->phase_seg2 = tseg2;
/* check for sjw user settings */ - if (!bt->sjw || !btc->sjw_max) + if (!bt->sjw || !btc->sjw_max) { bt->sjw = 1; - else { + } else { /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */ if (bt->sjw > btc->sjw_max) bt->sjw = btc->sjw_max; @@@ -194,8 -223,9 +223,9 @@@ }
bt->brp = best_brp; - /* real bit-rate */ - bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1)); + + /* real bitrate */ + bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
return 0; } @@@ -798,9 -828,6 +828,9 @@@ static int can_validate(struct nlattr * * - control mode with CAN_CTRLMODE_FD set */
+ if (!data) + return 0; + if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
@@@ -1011,11 -1038,6 +1041,11 @@@ static int can_newlink(struct net *src_ return -EOPNOTSUPP; }
+static void can_dellink(struct net_device *dev, struct list_head *head) +{ + return; +} + static struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", .maxtype = IFLA_CAN_MAX, @@@ -1024,7 -1046,6 +1054,7 @@@ .validate = can_validate, .newlink = can_newlink, .changelink = can_changelink, + .dellink = can_dellink, .get_size = can_get_size, .fill_info = can_fill_info, .get_xstats_size = can_get_xstats_size, diff --combined drivers/net/can/usb/gs_usb.c index acb0c84,3607643..6f0cbc3 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@@ -1,9 -1,7 +1,9 @@@ -/* CAN driver for Geschwister Schneider USB/CAN devices. +/* CAN driver for Geschwister Schneider USB/CAN devices + * and bytewerk.org candleLight USB CAN interfaces. * - * Copyright (C) 2013 Geschwister Schneider Technologie-, + * Copyright (C) 2013-2016 Geschwister Schneider Technologie-, * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt). + * Copyright (C) 2016 Hubert Denkmair * * Many thanks to all socketcan devs! * @@@ -31,9 -29,6 +31,9 @@@ #define USB_GSUSB_1_VENDOR_ID 0x1d50 #define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209 +#define USB_CANDLELIGHT_PRODUCT_ID 0x2323 + #define GSUSB_ENDPOINT_IN 1 #define GSUSB_ENDPOINT_OUT 2
@@@ -44,7 -39,9 +44,9 @@@ enum gs_usb_breq GS_USB_BREQ_MODE, GS_USB_BREQ_BERR, GS_USB_BREQ_BT_CONST, - GS_USB_BREQ_DEVICE_CONFIG + GS_USB_BREQ_DEVICE_CONFIG, + GS_USB_BREQ_TIMESTAMP, + GS_USB_BREQ_IDENTIFY, };
enum gs_can_mode { @@@ -63,6 -60,11 +65,11 @@@ enum gs_can_state GS_CAN_STATE_SLEEPING };
+ enum gs_can_identify_mode { + GS_CAN_IDENTIFY_OFF = 0, + GS_CAN_IDENTIFY_ON + }; + /* data types passed between host and device */ struct gs_host_config { u32 byte_order; @@@ -82,10 -84,10 +89,10 @@@ struct gs_device_config } __packed;
#define GS_CAN_MODE_NORMAL 0 - #define GS_CAN_MODE_LISTEN_ONLY (1<<0) - #define GS_CAN_MODE_LOOP_BACK (1<<1) - #define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2) - #define GS_CAN_MODE_ONE_SHOT (1<<3) + #define GS_CAN_MODE_LISTEN_ONLY BIT(0) + #define GS_CAN_MODE_LOOP_BACK BIT(1) + #define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) + #define GS_CAN_MODE_ONE_SHOT BIT(3)
struct gs_device_mode { u32 mode; @@@ -106,10 -108,16 +113,16 @@@ struct gs_device_bittiming u32 brp; } __packed;
- #define GS_CAN_FEATURE_LISTEN_ONLY (1<<0) - #define GS_CAN_FEATURE_LOOP_BACK (1<<1) - #define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2) - #define GS_CAN_FEATURE_ONE_SHOT (1<<3) + struct gs_identify_mode { + u32 mode; + } __packed; + + #define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) + #define GS_CAN_FEATURE_LOOP_BACK BIT(1) + #define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) + #define GS_CAN_FEATURE_ONE_SHOT BIT(3) + #define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) + #define GS_CAN_FEATURE_IDENTIFY BIT(5)
struct gs_device_bt_const { u32 feature; @@@ -214,7 -222,8 +227,8 @@@ static void gs_free_tx_context(struct g
/* Get a tx context by id. */ - static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id) + static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, + unsigned int id) { unsigned long flags;
@@@ -457,7 -466,8 +471,8 @@@ static void gs_usb_xmit_callback(struc netif_wake_queue(netdev); }
- static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev) + static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, + struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct net_device_stats *stats = &dev->netdev->stats; @@@ -663,7 -673,8 +678,8 @@@ static int gs_can_open(struct net_devic rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_MODE, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, dev->channel, 0, dm, @@@ -726,7 -737,59 +742,59 @@@ static const struct net_device_ops gs_u .ndo_change_mtu = can_change_mtu, };
- static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) + static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) + { + struct gs_can *dev = netdev_priv(netdev); + struct gs_identify_mode imode; + int rc; + + if (do_identify) + imode.mode = GS_CAN_IDENTIFY_ON; + else + imode.mode = GS_CAN_IDENTIFY_OFF; + + rc = usb_control_msg(interface_to_usbdev(dev->iface), + usb_sndctrlpipe(interface_to_usbdev(dev->iface), + 0), + GS_USB_BREQ_IDENTIFY, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + dev->channel, + 0, + &imode, + sizeof(imode), + 100); + + return (rc > 0) ? 0 : rc; + } + + /* blink LED's for finding the this interface */ + static int gs_usb_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) + { + int rc = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON); + break; + case ETHTOOL_ID_INACTIVE: + rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF); + break; + default: + break; + } + + return rc; + } + + static const struct ethtool_ops gs_usb_ethtool_ops = { + .set_phys_id = gs_usb_set_phys_id, + }; + + static struct gs_can *gs_make_candev(unsigned int channel, + struct usb_interface *intf, + struct gs_device_config *dconf) { struct gs_can *dev; struct net_device *netdev; @@@ -814,10 -877,14 +882,14 @@@ if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT) dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- kfree(bt_const); - SET_NETDEV_DEV(netdev, &intf->dev);
+ if (dconf->sw_version > 1) + if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY) + netdev->ethtool_ops = &gs_usb_ethtool_ops; + + kfree(bt_const); + rc = register_candev(dev->netdev); if (rc) { free_candev(dev->netdev); @@@ -835,19 -902,16 +907,16 @@@ static void gs_destroy_candev(struct gs free_candev(dev->netdev); }
- static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) + static int gs_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) { struct gs_usb *dev; int rc = -ENOMEM; unsigned int icount, i; - struct gs_host_config *hconf; - struct gs_device_config *dconf; - - hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); - if (!hconf) - return -ENOMEM; - - hconf->byte_order = 0x0000beef; + struct gs_host_config hconf = { + .byte_order = 0x0000beef, + }; + struct gs_device_config dconf;
/* send host config */ rc = usb_control_msg(interface_to_usbdev(intf), @@@ -856,22 -920,16 +925,16 @@@ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - hconf, - sizeof(*hconf), + &hconf, + sizeof(hconf), 1000);
- kfree(hconf); - if (rc < 0) { dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; }
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); - if (!dconf) - return -ENOMEM; - /* read device config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), @@@ -879,22 -937,16 +942,16 @@@ USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - dconf, - sizeof(*dconf), + &dconf, + sizeof(dconf), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); - - kfree(dconf); - return rc; }
- icount = dconf->icount+1; - - kfree(dconf); - + icount = dconf.icount + 1; dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
if (icount > GS_MAX_INTF) { @@@ -915,7 -967,7 +972,7 @@@ dev->udev = interface_to_usbdev(intf);
for (i = 0; i < icount; i++) { - dev->canch[i] = gs_make_candev(i, intf); + dev->canch[i] = gs_make_candev(i, intf, &dconf); if (IS_ERR_OR_NULL(dev->canch[i])) { /* save error code to return later */ rc = PTR_ERR(dev->canch[i]); @@@ -957,8 -1009,6 +1014,8 @@@ static void gs_usb_disconnect(struct us static const struct usb_device_id gs_usb_table[] = { { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID, + USB_CANDLELIGHT_PRODUCT_ID, 0) }, {} /* Terminating entry */ };
@@@ -976,6 -1026,5 +1033,6 @@@ module_usb_driver(gs_usb_driver) MODULE_AUTHOR("Maximilian Schneider mws@schneidersoft.net"); MODULE_DESCRIPTION( "Socket CAN device driver for Geschwister Schneider Technologie-, " -"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces."); +"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n" +"and bytewerk.org candleLight USB CAN interfaces."); MODULE_LICENSE("GPL v2"); diff --combined drivers/net/ethernet/broadcom/bgmac.c index a6333d3,e6e74ca..b045dc0 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@@ -246,6 -246,8 +246,8 @@@ err_dma_head
err_drop: dev_kfree_skb(skb); + net_dev->stats.tx_dropped++; + net_dev->stats.tx_errors++; return NETDEV_TX_OK; }
@@@ -267,16 -269,15 +269,16 @@@ static void bgmac_dma_tx_free(struct bg while (ring->start != ring->end) { int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[slot_idx]; - u32 ctl1; + u32 ctl0, ctl1; int len;
if (slot_idx == empty_slot) break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0); ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); len = ctl1 & BGMAC_DESC_CTL1_LEN; - if (ctl1 & BGMAC_DESC_CTL0_SOF) + if (ctl0 & BGMAC_DESC_CTL0_SOF) /* Unmap no longer used buffer */ dma_unmap_single(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); @@@ -285,6 -286,8 +287,8 @@@ DMA_TO_DEVICE);
if (slot->skb) { + bgmac->net_dev->stats.tx_bytes += slot->skb->len; + bgmac->net_dev->stats.tx_packets++; bytes_compl += slot->skb->len; pkts_compl++;
@@@ -465,6 -468,7 +469,7 @@@ static int bgmac_dma_rx_read(struct bgm bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; }
@@@ -472,6 -476,8 +477,8 @@@ bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_length_errors++; + bgmac->net_dev->stats.rx_errors++; break; }
@@@ -482,6 -488,7 +489,7 @@@ if (unlikely(!skb)) { bgmac_err(bgmac, "build_skb failed\n"); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; } skb_put(skb, BGMAC_RX_FRAME_OFFSET + @@@ -491,6 -498,8 +499,8 @@@
skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); + bgmac->net_dev->stats.rx_bytes += len; + bgmac->net_dev->stats.rx_packets++; napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); @@@ -1311,10 -1320,9 +1321,10 @@@ static int bgmac_open(struct net_devic } napi_enable(&bgmac->napi);
- phy_start(bgmac->phy_dev); + phy_start(net_dev->phydev);
- netif_carrier_on(net_dev); + netif_start_queue(net_dev); + return 0; }
@@@ -1324,7 -1332,7 +1334,7 @@@ static int bgmac_stop(struct net_devic
netif_carrier_off(net_dev);
- phy_stop(bgmac->phy_dev); + phy_stop(net_dev->phydev);
napi_disable(&bgmac->napi); bgmac_chip_intrs_off(bgmac); @@@ -1362,12 -1370,10 +1372,10 @@@ static int bgmac_set_mac_address(struc
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { - struct bgmac *bgmac = netdev_priv(net_dev); - if (!netif_running(net_dev)) return -EINVAL;
- return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd); + return phy_mii_ioctl(net_dev->phydev, ifr, cmd); }
static const struct net_device_ops bgmac_netdev_ops = { @@@ -1384,20 -1390,125 +1392,125 @@@ * ethtool_ops **************************************************/
- static int bgmac_get_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) + struct bgmac_stat { + u8 size; + u32 offset; + const char *name; + }; + + static struct bgmac_stat bgmac_get_strings_stats[] = { + { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" }, + { 4, BGMAC_TX_GOOD_PKTS, "tx_good" }, + { 8, BGMAC_TX_OCTETS, "tx_octets" }, + { 4, BGMAC_TX_PKTS, "tx_pkts" }, + { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" }, + { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" }, + { 4, BGMAC_TX_LEN_64, "tx_64" }, + { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" }, + { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" }, + { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" }, + { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" }, + { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" }, + { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" }, + { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" }, + { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" }, + { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" }, + { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" }, + { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" }, + { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" }, + { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" }, + { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" }, + { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" }, + { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" }, + { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" }, + { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" }, + { 4, BGMAC_TX_DEFERED, "tx_defered" }, + { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" }, + { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" }, + { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" }, + { 4, BGMAC_TX_Q0_PKTS, "tx_q0" }, + { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" }, + { 4, BGMAC_TX_Q1_PKTS, "tx_q1" }, + { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" }, + { 4, BGMAC_TX_Q2_PKTS, "tx_q2" }, + { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" }, + { 4, BGMAC_TX_Q3_PKTS, "tx_q3" }, + { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" }, + { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" }, + { 4, BGMAC_RX_GOOD_PKTS, "rx_good" }, + { 8, BGMAC_RX_OCTETS, "rx_octets" }, + { 4, BGMAC_RX_PKTS, "rx_pkts" }, + { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" }, + { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" }, + { 4, BGMAC_RX_LEN_64, "rx_64" }, + { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" }, + { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" }, + { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" }, + { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" }, + { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" }, + { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" }, + { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" }, + { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" }, + { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" }, + { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" }, + { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" }, + { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" }, + { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" }, + { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" }, + { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" }, + { 4, BGMAC_RX_CRC_ERRS, "rx_crc" }, + { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" }, + { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" }, + { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" }, + { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" }, + { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" }, + { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" }, + }; + + #define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats) + + static int bgmac_get_sset_count(struct net_device *dev, int string_set) { - struct bgmac *bgmac = netdev_priv(net_dev); + switch (string_set) { + case ETH_SS_STATS: + return BGMAC_STATS_LEN; + }
- return phy_ethtool_gset(bgmac->phy_dev, cmd); + return -EOPNOTSUPP; }
- static int bgmac_set_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) + static void bgmac_get_strings(struct net_device *dev, u32 stringset, + u8 *data) { - struct bgmac *bgmac = netdev_priv(net_dev); + int i;
- return phy_ethtool_sset(bgmac->phy_dev, cmd); + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < BGMAC_STATS_LEN; i++) + strlcpy(data + i * ETH_GSTRING_LEN, + bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN); + } + + static void bgmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *ss, uint64_t *data) + { + struct bgmac *bgmac = netdev_priv(dev); + const struct bgmac_stat *s; + unsigned int i; + u64 val; + + if (!netif_running(dev)) + return; + + for (i = 0; i < BGMAC_STATS_LEN; i++) { + s = &bgmac_get_strings_stats[i]; + val = 0; + if (s->size == 8) + val = (u64)bgmac_read(bgmac, s->offset + 4) << 32; + val |= bgmac_read(bgmac, s->offset); + data[i] = val; + } }
static void bgmac_get_drvinfo(struct net_device *net_dev, @@@ -1408,9 -1519,12 +1521,12 @@@ }
static const struct ethtool_ops bgmac_ethtool_ops = { - .get_settings = bgmac_get_settings, - .set_settings = bgmac_set_settings, + .get_strings = bgmac_get_strings, + .get_sset_count = bgmac_get_sset_count, + .get_ethtool_stats = bgmac_get_ethtool_stats, .get_drvinfo = bgmac_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, };
/************************************************** @@@ -1431,7 -1545,7 +1547,7 @@@ static int bgmac_mii_write(struct mii_b static void bgmac_adjust_link(struct net_device *net_dev) { struct bgmac *bgmac = netdev_priv(net_dev); - struct phy_device *phy_dev = bgmac->phy_dev; + struct phy_device *phy_dev = net_dev->phydev; bool update = false;
if (phy_dev->link) { @@@ -1475,8 -1589,6 +1591,6 @@@ static int bgmac_fixed_phy_register(str return err; }
- bgmac->phy_dev = phy_dev; - return err; }
@@@ -1521,7 -1633,6 +1635,6 @@@ static int bgmac_mii_register(struct bg err = PTR_ERR(phy_dev); goto err_unregister_bus; } - bgmac->phy_dev = phy_dev;
return err;
@@@ -1590,6 -1701,7 +1703,7 @@@ static int bgmac_probe(struct bcma_devi bgmac->net_dev = net_dev; bgmac->core = core; bcma_set_drvdata(core, bgmac); + SET_NETDEV_DEV(net_dev, &core->dev);
/* Defaults */ memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); diff --combined drivers/net/ethernet/freescale/fec.h index dc71a88,92fd5c0..4e98b24 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@@ -442,8 -442,8 +442,10 @@@ struct bufdesc_ex #define FEC_QUIRK_SINGLE_MDIO (1 << 11) /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +/* Interrupt doesn't wake CPU from deep idle */ +#define FEC_QUIRK_ERR006687 (1 << 13) + /* Controller supports interrupt coalesc */ -#define FEC_QUIRK_HAS_COALESCE (1 << 13) ++#define FEC_QUIRK_HAS_COALESCE (1 << 14)
struct bufdesc_prop { int qid; diff --combined drivers/net/ethernet/freescale/fec_main.c index d9ecc30,4040003..01f7e81 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@@ -60,7 -60,6 +60,7 @@@ #include <linux/if_vlan.h> #include <linux/pinctrl/consumer.h> #include <linux/prefetch.h> +#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@@ -112,7 -111,13 +112,13 @@@ static struct platform_device_id fec_de FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + }, { + .name = "imx6ul-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, }, { /* sentinel */ } @@@ -126,6 -131,7 +132,7 @@@ enum imx_fec_type IMX6Q_FEC, MVF600_FEC, IMX6SX_FEC, + IMX6UL_FEC, };
static const struct of_device_id fec_dt_ids[] = { @@@ -135,6 -141,7 +142,7 @@@ { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@@ -2359,9 -2366,6 +2367,6 @@@ static void fec_enet_itr_coal_set(struc struct fec_enet_private *fep = netdev_priv(ndev); int rx_itr, tx_itr;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) - return; - /* Must be greater than zero to avoid unpredictable behavior */ if (!fep->rx_time_itr || !fep->rx_pkts_itr || !fep->tx_time_itr || !fep->tx_pkts_itr) @@@ -2384,10 -2388,12 +2389,12 @@@
writel(tx_itr, fep->hwp + FEC_TXIC0); writel(rx_itr, fep->hwp + FEC_RXIC0); - writel(tx_itr, fep->hwp + FEC_TXIC1); - writel(rx_itr, fep->hwp + FEC_RXIC1); - writel(tx_itr, fep->hwp + FEC_TXIC2); - writel(rx_itr, fep->hwp + FEC_RXIC2); + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); + } }
static int @@@ -2395,7 -2401,7 +2402,7 @@@ fec_enet_get_coalesce(struct net_devic { struct fec_enet_private *fep = netdev_priv(ndev);
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr; @@@ -2413,7 -2419,7 +2420,7 @@@ fec_enet_set_coalesce(struct net_devic struct fec_enet_private *fep = netdev_priv(ndev); unsigned int cycle;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) { @@@ -2819,9 -2825,6 +2826,9 @@@ fec_enet_open(struct net_device *ndev if (ret) goto err_enet_mii_probe;
+ if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@@ -2857,9 -2860,6 +2864,9 @@@ fec_enet_close(struct net_device *ndev
phy_disconnect(ndev->phydev);
+ if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_unused(); + fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); @@@ -3198,7 -3198,12 +3205,12 @@@ static void fec_reset_phy(struct platfo dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } - msleep(msec); + + if (msec > 20) + msleep(msec); + else + usleep_range(msec * 1000, msec * 1000 + 1000); + gpio_set_value_cansleep(phy_reset, !active_high); } #else /* CONFIG_OF */ @@@ -3299,11 -3304,6 +3311,11 @@@ fec_probe(struct platform_device *pdev
platform_set_drvdata(pdev, ndev);
+ if ((of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6dl")) && + !of_property_read_bool(np, "fsl,err006687-workaround-present")) + fep->quirks |= FEC_QUIRK_ERR006687; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index 2e10d23,2b11405..545b15a --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -31,12 -31,7 +31,7 @@@ /* Local includes */ #include "i40e.h" #include "i40e_diag.h" - #if IS_ENABLED(CONFIG_VXLAN) - #include <net/vxlan.h> - #endif - #if IS_ENABLED(CONFIG_GENEVE) - #include <net/geneve.h> - #endif + #include <net/udp_tunnel.h>
const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@@ -45,8 -40,8 +40,8 @@@ #define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1 - #define DRV_VERSION_MINOR 5 - #define DRV_VERSION_BUILD 16 + #define DRV_VERSION_MINOR 6 + #define DRV_VERSION_BUILD 4 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@@ -1584,14 -1579,8 +1579,8 @@@ static void i40e_vsi_setup_queue_map(st vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ - /* In MFP case we can have a much lower count of MSIx - * vectors available and so we need to lower the used - * q count. - */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) - qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); - else - qcount = vsi->alloc_queue_pairs; + qcount = vsi->alloc_queue_pairs; + num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
@@@ -1845,8 -1834,10 +1834,10 @@@ int i40e_sync_vsi_filters(struct i40e_v { struct list_head tmp_del_list, tmp_add_list; struct i40e_mac_filter *f, *ftmp, *fclone; + struct i40e_hw *hw = &vsi->back->hw; bool promisc_forced_on = false; bool add_happened = false; + char vsi_name[16] = "PF"; int filter_list_len = 0; u32 changed_flags = 0; i40e_status aq_ret = 0; @@@ -1874,6 -1865,11 +1865,11 @@@ INIT_LIST_HEAD(&tmp_del_list); INIT_LIST_HEAD(&tmp_add_list);
+ if (vsi->type == I40E_VSI_SRIOV) + snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); + else if (vsi->type != I40E_VSI_MAIN) + snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); + if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
@@@ -1925,7 -1921,7 +1921,7 @@@ if (!list_empty(&tmp_del_list)) { int del_list_size;
- filter_list_len = pf->hw.aq.asq_buf_size / + filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); @@@ -1957,21 -1953,21 +1953,21 @@@
/* flush a full buffer */ if (num_del == filter_list_len) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, - del_list, - num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_ret = + i40e_aq_remove_macvlan(hw, vsi->seid, + del_list, + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0; memset(del_list, 0, del_list_size);
if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { retval = -EIO; dev_err(&pf->pdev->dev, - "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + "ignoring delete macvlan error on %s, err %s, aq_err %s while flushing a full buffer\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); } } /* Release memory for MAC filter entries which were @@@ -1982,17 -1978,17 +1978,17 @@@ }
if (num_del) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, - del_list, num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0;
if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + "ignoring delete macvlan error on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); }
kfree(del_list); @@@ -2003,7 -1999,7 +1999,7 @@@ int add_list_size;
/* do all the adds now */ - filter_list_len = pf->hw.aq.asq_buf_size / + filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_add_macvlan_element_data), add_list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); @@@ -2038,10 -2034,10 +2034,10 @@@
/* flush a full buffer */ if (num_add == filter_list_len) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_err = hw->aq.asq_last_status; num_add = 0;
if (aq_ret) @@@ -2056,9 -2052,9 +2052,9 @@@ }
if (num_add) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_err = hw->aq.asq_last_status; num_add = 0; } kfree(add_list); @@@ -2067,16 -2063,18 +2063,18 @@@ if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { retval = i40e_aq_rc_to_posix(aq_ret, aq_err); dev_info(&pf->pdev->dev, - "add filter failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && + "add filter failed on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); + if ((hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) && !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) { promisc_forced_on = true; set_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); - dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); + dev_info(&pf->pdev->dev, "promiscuous mode forced on %s\n", + vsi_name); } } } @@@ -2098,12 -2096,12 +2096,12 @@@ NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multi promisc failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "set multi promisc failed on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); } } if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { @@@ -2122,33 -2120,58 +2120,58 @@@ */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + if (cur_promisc) + aq_ret = + i40e_aq_set_default_vsi(hw, + vsi->seid, + NULL); + else + aq_ret = + i40e_aq_clear_default_vsi(hw, + vsi->seid, + NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + hw->aq.asq_last_status); + dev_info(&pf->pdev->dev, + "Set default VSI failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + } } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL, true); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set unicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set unicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set multicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } } aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, @@@ -2159,9 -2182,9 +2182,9 @@@ pf->hw.aq.asq_last_status); dev_info(&pf->pdev->dev, "set brdcast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } } out: @@@ -3952,6 -3975,7 +3975,7 @@@ static void i40e_vsi_free_irq(struct i4 /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(pf->msix_entries[vector].vector, NULL); + synchronize_irq(pf->msix_entries[vector].vector); free_irq(pf->msix_entries[vector].vector, vsi->q_vectors[i]);
@@@ -4958,7 -4982,6 +4982,6 @@@ static void i40e_dcb_reconfigure(struc if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } - i40e_notify_client_of_l2_param_changes(pf->vsi[v]); } }
@@@ -5183,12 -5206,6 +5206,6 @@@ static void i40e_vsi_reinit_locked(stru usleep_range(1000, 2000); i40e_down(vsi);
- /* Give a VF some time to respond to the reset. The - * two second wait is based upon the watchdog cycle in - * the VF driver. - */ - if (vsi->type == I40E_VSI_SRIOV) - msleep(2000); i40e_up(vsi); clear_bit(__I40E_CONFIG_BUSY, &pf->state); } @@@ -5231,6 -5248,9 +5248,9 @@@ void i40e_down(struct i40e_vsi *vsi i40e_clean_tx_ring(vsi->tx_rings[i]); i40e_clean_rx_ring(vsi->rx_rings[i]); } + + i40e_notify_client_of_netdev_close(vsi, false); + }
/** @@@ -5342,14 -5362,7 +5362,7 @@@ int i40e_open(struct net_device *netdev TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
- #ifdef CONFIG_I40E_VXLAN - vxlan_get_rx_port(netdev); - #endif - #ifdef CONFIG_I40E_GENEVE - if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) - geneve_get_rx_port(netdev); - #endif - + udp_tunnel_get_rx_info(netdev); i40e_notify_client_of_netdev_open(vsi);
return 0; @@@ -5716,6 -5729,8 +5729,8 @@@ static int i40e_handle_lldp_event(struc i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); + /* Notify the client for the DCB changes */ + i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); }
exit: @@@ -5940,7 -5955,6 +5955,6 @@@ static void i40e_fdir_flush_and_replay( if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } - }
/** @@@ -7057,7 -7071,6 +7071,6 @@@ static void i40e_handle_mdd_event(struc **/ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { - #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; @@@ -7092,7 -7105,6 +7105,6 @@@ } } } - #endif }
/** @@@ -7174,7 -7186,7 +7186,7 @@@ static int i40e_set_num_rings_in_vsi(st vsi->alloc_queue_pairs = 1; vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); - vsi->num_q_vectors = 1; + vsi->num_q_vectors = pf->num_fdsb_msix; break;
case I40E_VSI_VMDQ2: @@@ -7558,9 -7570,11 +7570,11 @@@ static int i40e_init_msix(struct i40e_p /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (vectors_left) { + pf->num_fdsb_msix = 1; v_budget++; vectors_left--; } else { + pf->num_fdsb_msix = 0; pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; } } @@@ -8579,7 -8593,9 +8593,9 @@@ bool i40e_set_ntuple(struct i40e_pf *pf /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; - pf->flags |= I40E_FLAG_FD_SB_ENABLED; + /* enable FD_SB only if there is MSI-X vector */ + if (pf->num_fdsb_msix > 0) + pf->flags |= I40E_FLAG_FD_SB_ENABLED; } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@@ -8628,7 -8644,6 +8644,6 @@@ static int i40e_set_features(struct net return 0; }
- #if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure @@@ -8648,21 -8663,18 +8663,18 @@@ static u8 i40e_get_udp_port_idx(struct return i; }
- #endif - - #if IS_ENABLED(CONFIG_VXLAN) /** - * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ - static void i40e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + static void i40e_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 next_idx; u8 idx;
@@@ -8670,7 -8682,7 +8682,7 @@@
/* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "vxlan port %d already offloaded\n", + netdev_info(netdev, "port %d already offloaded\n", ntohs(port)); return; } @@@ -8679,131 -8691,75 +8691,75 @@@ next_idx = i40e_get_udp_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", - ntohs(port)); - return; - } - - /* New port: add it and mark its index in the bitmap */ - pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; - pf->pending_udp_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - } - - /** - * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away - * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to - **/ - static void i40e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) - { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 idx; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - } else { - netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", - ntohs(port)); - } - } - #endif - - #if IS_ENABLED(CONFIG_GENEVE) - /** - * i40e_add_geneve_port - Get notifications about GENEVE ports that come up - * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: New UDP port number that GENEVE started listening to - **/ - static void i40e_add_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) - { - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 next_idx; - u8 idx; - - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "udp port %d already offloaded\n", + netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", ntohs(port)); return; }
- /* Now check if there is space to add the new port */ - next_idx = i40e_get_udp_port_idx(pf, 0); - - if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; + break; + default: return; }
/* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); }
/** - * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: UDP port number that GENEVE stopped listening to + * @ti: Tunnel endpoint information **/ - static void i40e_del_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + static void i40e_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 idx;
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) + goto not_found;
- dev_info(&pf->pdev->dev, "deleting geneve port %d\n", - ntohs(port)); - } else { - netdev_warn(netdev, "geneve port %d was not found, not deleting\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) + goto not_found; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) + goto not_found; + break; + default: + goto not_found; } + + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + return; + not_found: + netdev_warn(netdev, "UDP port %d was not found, not deleting\n", + ntohs(port)); } - #endif
static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) @@@ -9033,14 -8989,8 +8989,8 @@@ static const struct net_device_ops i40e .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, .ndo_set_vf_trust = i40e_ndo_set_vf_trust, - #if IS_ENABLED(CONFIG_VXLAN) - .ndo_add_vxlan_port = i40e_add_vxlan_port, - .ndo_del_vxlan_port = i40e_del_vxlan_port, - #endif - #if IS_ENABLED(CONFIG_GENEVE) - .ndo_add_geneve_port = i40e_add_geneve_port, - .ndo_del_geneve_port = i40e_del_geneve_port, - #endif + .ndo_udp_tunnel_add = i40e_udp_tunnel_add, + .ndo_udp_tunnel_del = i40e_udp_tunnel_del, .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, @@@ -10133,14 -10083,14 +10083,14 @@@ void i40e_veb_release(struct i40e_veb * static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; - bool is_default = veb->pf->cur_promisc; bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret;
- /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, - veb->enabled_tc, is_default, + veb->enabled_tc, false, &veb->seid, enable_stats, NULL); + + /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", @@@ -10689,12 -10639,8 +10639,8 @@@ static void i40e_print_features(struct } if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += snprintf(&buf[i], REMAIN(i), " DCB"); - #if IS_ENABLED(CONFIG_VXLAN) i += snprintf(&buf[i], REMAIN(i), " VxLAN"); - #endif - #if IS_ENABLED(CONFIG_GENEVE) i += snprintf(&buf[i], REMAIN(i), " Geneve"); - #endif if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE @@@ -10769,7 -10715,8 +10715,7 @@@ static int i40e_probe(struct pci_dev *p }
/* set up pci connections */ - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), i40e_driver_name); + err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); @@@ -11266,7 -11213,8 +11212,7 @@@ err_ioremap kfree(pf); err_pf_alloc: pci_disable_pcie_error_reporting(pdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@@ -11377,7 -11325,8 +11323,7 @@@ static void i40e_remove(struct pci_dev
iounmap(hw->hw_addr); kfree(pf); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@@ -11522,6 -11471,7 +11468,7 @@@ static int i40e_suspend(struct pci_dev { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; + int retval = 0;
set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); @@@ -11533,10 -11483,16 +11480,16 @@@ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_stop_misc_vector(pf); + + retval = pci_save_state(pdev); + if (retval) + return retval; + pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot);
- return 0; + return retval; }
/** diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1629468,468fa9d..04dcf67 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@@ -50,7 -50,7 +50,7 @@@ #include <linux/if_bridge.h> #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> - #include <net/vxlan.h> + #include <net/udp_tunnel.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> @@@ -5722,9 -5722,7 +5722,7 @@@ static int ixgbe_sw_init(struct ixgbe_a #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif - #ifdef CONFIG_IXGBE_VXLAN adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; - #endif break; default: break; @@@ -6158,9 -6156,7 +6156,7 @@@ int ixgbe_open(struct net_device *netde ixgbe_up_complete(adapter);
ixgbe_clear_vxlan_port(adapter); - #ifdef CONFIG_IXGBE_VXLAN - vxlan_get_rx_port(netdev); - #endif + udp_tunnel_get_rx_info(netdev);
return 0;
@@@ -7262,14 -7258,12 +7258,12 @@@ static void ixgbe_service_task(struct w ixgbe_service_event_complete(adapter); return; } - #ifdef CONFIG_IXGBE_VXLAN - rtnl_lock(); if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; - vxlan_get_rx_port(adapter->netdev); + udp_tunnel_get_rx_info(adapter->netdev); + rtnl_unlock(); } - rtnl_unlock(); - #endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@@ -7697,7 -7691,6 +7691,6 @@@ static void ixgbe_atr(struct ixgbe_rin /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); - #ifdef CONFIG_IXGBE_VXLAN if (skb->encapsulation && first->protocol == htons(ETH_P_IP) && hdr.ipv4->protocol != IPPROTO_UDP) { @@@ -7708,7 -7701,6 +7701,6 @@@ udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); } - #endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */ switch (hdr.ipv4->version) { @@@ -8770,14 -8762,12 +8762,12 @@@ static int ixgbe_set_features(struct ne
netdev->features = features;
- #ifdef CONFIG_IXGBE_VXLAN if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { if (features & NETIF_F_RXCSUM) adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; else ixgbe_clear_vxlan_port(adapter); } - #endif /* CONFIG_IXGBE_VXLAN */
if (need_reset) ixgbe_do_reset(netdev); @@@ -8788,23 -8778,27 +8778,27 @@@ return 0; }
- #ifdef CONFIG_IXGBE_VXLAN /** * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up * @dev: The port's netdev * @sa_family: Socket Family that VXLAN is notifiying us about * @port: New UDP port number that VXLAN started listening to + * @type: Enumerated type specifying UDP tunnel type **/ - static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) + static void ixgbe_add_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; + __be16 port = ti->port;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) return;
if (adapter->vxlan_port == port) @@@ -8826,28 -8820,31 +8820,31 @@@ * @dev: The port's netdev * @sa_family: Socket Family that VXLAN is notifying us about * @port: UDP port number that VXLAN stopped listening to + * @type: Enumerated type specifying UDP tunnel type **/ - static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) + static void ixgbe_del_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev);
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) return;
- if (adapter->vxlan_port != port) { + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { netdev_info(dev, "Port %d was not found, not deleting\n", - ntohs(port)); + ntohs(ti->port)); return; }
ixgbe_clear_vxlan_port(adapter); adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; } - #endif /* CONFIG_IXGBE_VXLAN */
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, @@@ -9160,10 -9157,8 +9157,8 @@@ static const struct net_device_ops ixgb .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - #ifdef CONFIG_IXGBE_VXLAN - .ndo_add_vxlan_port = ixgbe_add_vxlan_port, - .ndo_del_vxlan_port = ixgbe_del_vxlan_port, - #endif /* CONFIG_IXGBE_VXLAN */ + .ndo_udp_tunnel_add = ixgbe_add_vxlan_port, + .ndo_udp_tunnel_del = ixgbe_del_vxlan_port, .ndo_features_check = ixgbe_features_check, };
@@@ -9331,7 -9326,8 +9326,7 @@@ static int ixgbe_probe(struct pci_dev * pci_using_dac = 0; }
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), ixgbe_driver_name); + err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); @@@ -9717,7 -9713,8 +9712,7 @@@ err_ioremap disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: if (!adapter || disable_dev) @@@ -9784,7 -9781,8 +9779,7 @@@ static void ixgbe_remove(struct pci_de
#endif iounmap(adapter->io_addr); - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_mem_regions(pdev);
e_dev_info("complete\n");
diff --combined drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0c0dfd6,d42083a..6083775 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@@ -67,6 -67,17 +67,17 @@@ int mlx4_en_setup_tc(struct net_device offset += priv->num_tx_rings_p_up; }
+ #ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (up) { + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + priv->cee_params.dcb_cfg.pfc_state = false; + } + } + #endif /* CONFIG_MLX4_EN_DCB */ + return 0; }
@@@ -406,18 -417,14 +417,18 @@@ static int mlx4_en_vlan_rx_add_vid(stru mutex_lock(&mdev->state_lock); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); - if (err) + if (err) { en_err(priv, "Failed configuring VLAN filter\n"); + goto out; + } } - if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) - en_dbg(HW, priv, "failed adding vlan %d\n", vid); - mutex_unlock(&mdev->state_lock); + err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); + if (err) + en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
- return 0; +out: + mutex_unlock(&mdev->state_lock); + return err; }
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, @@@ -425,7 -432,7 +436,7 @@@ { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - int err; + int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@@ -442,7 -449,7 +453,7 @@@ } mutex_unlock(&mdev->state_lock);
- return 0; + return err; }
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) @@@ -1201,8 -1208,8 +1212,8 @@@ static void mlx4_en_netpoll(struct net_ struct mlx4_en_cq *cq; int i;
- for (i = 0; i < priv->rx_ring_num; i++) { - cq = priv->rx_cq[i]; + for (i = 0; i < priv->tx_ring_num; i++) { + cq = priv->tx_cq[i]; napi_schedule(&cq->napi); } } @@@ -1696,10 -1703,9 +1707,9 @@@ int mlx4_en_start_port(struct net_devic /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task);
- #ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) - vxlan_get_rx_port(dev); - #endif + udp_tunnel_get_rx_info(dev); + priv->port_up = true; netif_tx_start_all_queues(dev); netif_device_attach(dev); @@@ -2036,20 -2042,11 +2046,20 @@@ err return -ENOMEM; }
+static void mlx4_en_shutdown(struct net_device *dev) +{ + rtnl_lock(); + netif_device_detach(dev); + mlx4_en_close(dev); + rtnl_unlock(); +}
void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + bool shutdown = mdev->dev->persist->interface_state & + MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@@ -2057,10 -2054,7 +2067,10 @@@ if (priv->registered) { devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, priv->port)); - unregister_netdev(dev); + if (shutdown) + mlx4_en_shutdown(dev); + else + unregister_netdev(dev); }
if (priv->allocated) @@@ -2085,8 -2079,7 +2095,8 @@@ kfree(priv->tx_ring); kfree(priv->tx_cq);
- free_netdev(dev); + if (!shutdown) + free_netdev(dev); }
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) @@@ -2359,7 -2352,6 +2369,6 @@@ static int mlx4_en_get_phys_port_id(str return 0; }
- #ifdef CONFIG_MLX4_EN_VXLAN static void mlx4_en_add_vxlan_offloads(struct work_struct *work) { int ret; @@@ -2409,15 -2401,19 +2418,19 @@@ static void mlx4_en_del_vxlan_offloads( }
static void mlx4_en_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + if (ti->sa_family != AF_INET) return;
- if (sa_family == AF_INET6) + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return;
current_port = priv->vxlan_port; @@@ -2432,15 -2428,19 +2445,19 @@@ }
static void mlx4_en_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return;
current_port = priv->vxlan_port; @@@ -2464,18 -2464,12 +2481,17 @@@ static netdev_features_t mlx4_en_featur * strip that feature if this is an IPv6 encapsulated frame. */ if (skb->encapsulation && - (skb->ip_summed == CHECKSUM_PARTIAL) && - (ip_hdr(skb)->version != 4)) - features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + (skb->ip_summed == CHECKSUM_PARTIAL)) { + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (!priv->vxlan_port || + (ip_hdr(skb)->version != 4) || + (udp_hdr(skb)->dest != priv->vxlan_port)) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + }
return features; } - #endif
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) { @@@ -2528,11 -2522,9 +2544,9 @@@ static const struct net_device_ops mlx4 .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, - #ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, - #endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, };
@@@ -2566,11 -2558,9 +2580,9 @@@ static const struct net_device_ops mlx4 .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, - #ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, - #endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, };
@@@ -2836,6 -2826,9 +2848,9 @@@ int mlx4_en_init_netdev(struct mlx4_en_ struct mlx4_en_priv *priv; int i; int err; + #ifdef CONFIG_MLX4_EN_DCB + struct tc_configuration *tc; + #endif
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), MAX_TX_RINGS, MAX_RX_RINGS); @@@ -2861,10 -2854,8 +2876,8 @@@ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); - #ifdef CONFIG_MLX4_EN_VXLAN INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); - #endif #ifdef CONFIG_RFS_ACCEL INIT_LIST_HEAD(&priv->filters); spin_lock_init(&priv->filters_lock); @@@ -2904,6 -2895,17 +2917,17 @@@ priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; + priv->flags |= MLX4_EN_DCB_ENABLED; + priv->cee_params.dcb_cfg.pfc_state = false; + + for (i = 0; i < MLX4_EN_NUM_UP; i++) { + tc = &priv->cee_params.dcb_cfg.tc_config[i]; + tc->dcb_pfc = pfc_disabled; + } + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { dev->dcbnl_ops = &mlx4_en_dcbnl_ops; } else { diff --combined drivers/net/ethernet/mellanox/mlx4/main.c index 546fab0,3564aad..b673a5f --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@@ -292,6 -292,7 +292,7 @@@ static int _mlx4_dev_port(struct mlx4_d dev->caps.pkey_table_len[port] = port_cap->max_pkeys; dev->caps.port_width_cap[port] = port_cap->max_port_width; dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; + dev->caps.max_tc_eth = port_cap->max_tc_eth; dev->caps.def_mac[port] = port_cap->def_mac; dev->caps.supported_type[port] = port_cap->supported_port_types; dev->caps.suggested_type[port] = port_cap->suggested_type; @@@ -3222,7 -3223,6 +3223,7 @@@ static int mlx4_load_one(struct pci_de
INIT_LIST_HEAD(&priv->pgdir_list); mutex_init(&priv->pgdir_mutex); + spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list); mutex_init(&priv->bf_mutex); @@@ -4135,11 -4135,8 +4136,11 @@@ static void mlx4_shutdown(struct pci_de
mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); - if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) { + /* Notify mlx4 clients that the kernel is being shut down */ + persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN; mlx4_unload_one(pdev); + } mutex_unlock(&persist->interface_state_mutex); }
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h index baa991a,da885c0..b97511b --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@@ -79,6 -79,7 +79,7 @@@
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 + #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 @@@ -88,6 -89,7 +89,7 @@@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) + #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_BF_BUDGET 16 @@@ -143,11 -145,32 +145,32 @@@ struct mlx5e_umr_wqe struct mlx5_wqe_data_seg data; };
+ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { + "rx_cqe_moder", + }; + + enum mlx5e_priv_flag { + MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), + }; + + #define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \ + do { \ + if (enable) \ + priv->pflags |= pflag; \ + else \ + priv->pflags &= ~pflag; \ + } while (0) + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ #endif
+ struct mlx5e_cq_moder { + u16 usec; + u16 pkts; + }; + struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@@ -156,12 -179,11 +179,11 @@@ u8 log_rq_size; u16 num_channels; u8 num_tc; + u8 rx_cq_period_mode; bool rx_cqe_compress_admin; bool rx_cqe_compress; - u16 rx_cq_moderation_usec; - u16 rx_cq_moderation_pkts; - u16 tx_cq_moderation_usec; - u16 tx_cq_moderation_pkts; + struct mlx5e_cq_moder rx_cq_moderation; + struct mlx5e_cq_moder tx_cq_moderation; u16 min_rx_wqes; bool lro_en; u32 lro_wqe_sz; @@@ -173,6 -195,7 +195,7 @@@ #ifdef CONFIG_MLX5_CORE_EN_DCB struct ieee_ets ets; #endif + bool rx_am_enabled; };
struct mlx5e_tstamp { @@@ -191,6 -214,7 +214,7 @@@ enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, + MLX5E_RQ_STATE_AM, };
struct mlx5e_cq { @@@ -198,6 -222,7 +222,7 @@@ struct mlx5_cqwq wq;
/* data path - accessed per napi poll */ + u16 event_ctr; struct napi_struct *napi; struct mlx5_core_cq mcq; struct mlx5e_channel *channel; @@@ -225,6 -250,30 +250,30 @@@ struct mlx5e_dma_info dma_addr_t addr; };
+ struct mlx5e_rx_am_stats { + int ppms; /* packets per msec */ + int epms; /* events per msec */ + }; + + struct mlx5e_rx_am_sample { + ktime_t time; + unsigned int pkt_ctr; + u16 event_ctr; + }; + + struct mlx5e_rx_am { /* Adaptive Moderation */ + u8 state; + struct mlx5e_rx_am_stats prev_stats; + struct mlx5e_rx_am_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; + }; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@@ -245,6 -294,8 +294,8 @@@ unsigned long state; int ix;
+ struct mlx5e_rx_am am; /* Adaptive Moderation */ + /* control */ struct mlx5_wq_ctrl wq_ctrl; u8 wq_type; @@@ -354,6 -405,7 +405,7 @@@ struct mlx5e_sq struct mlx5e_channel *channel; int tc; struct mlx5e_ico_wqe_info *ico_wqe_info; + u32 rate_limit; } ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) @@@ -401,7 -453,7 +453,7 @@@ enum mlx5e_traffic_types };
enum { - MLX5E_STATE_ASYNC_EVENTS_ENABLE, + MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, }; @@@ -530,6 -582,7 +582,7 @@@ struct mlx5e_priv u32 indir_rqtn; u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; + u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; @@@ -540,6 -593,7 +593,7 @@@ struct work_struct set_rx_mode_work; struct delayed_work update_stats_work;
+ u32 pflags; struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; @@@ -562,6 -616,7 +616,7 @@@ enum mlx5e_link_mode MLX5E_10GBASE_ER = 14, MLX5E_40GBASE_SR4 = 15, MLX5E_40GBASE_LR4 = 16, + MLX5E_50GBASE_SR2 = 18, MLX5E_100GBASE_CR4 = 20, MLX5E_100GBASE_SR4 = 21, MLX5E_100GBASE_KR4 = 22, @@@ -579,6 -634,9 +634,9 @@@
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+ + void mlx5e_build_ptys2ethtool_map(void); + void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); @@@ -612,6 -670,10 +670,10 @@@ void mlx5e_free_rx_fragmented_mpwqe(str struct mlx5e_mpw_info *wi); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+ void mlx5e_rx_am(struct mlx5e_rq *rq); + void mlx5e_rx_am_work(struct work_struct *work); + struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode); + void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv); @@@ -647,6 -709,9 +709,9 @@@ void mlx5e_build_default_indir_rqt(stru int num_channels); int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, + u8 cq_period_mode); + static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index e667a87,39a4d96..b29684d --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@@ -48,123 -48,85 +48,85 @@@ static void mlx5e_get_drvinfo(struct ne sizeof(drvinfo->bus_info)); }
- static const struct { - u32 supported; - u32 advertised; + struct ptys2ethtool_config { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); u32 speed; - } ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = { - [MLX5E_1000BASE_CX_SGMII] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_1000BASE_KX] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_CX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_20GBASE_KR2] = { - .supported = SUPPORTED_20000baseKR2_Full, - .advertised = ADVERTISED_20000baseKR2_Full, - .speed = 20000, - }, - [MLX5E_40GBASE_CR4] = { - .supported = SUPPORTED_40000baseCR4_Full, - .advertised = ADVERTISED_40000baseCR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_KR4] = { - .supported = SUPPORTED_40000baseKR4_Full, - .advertised = ADVERTISED_40000baseKR4_Full, - .speed = 40000, - }, - [MLX5E_56GBASE_R4] = { - .supported = SUPPORTED_56000baseKR4_Full, - .advertised = ADVERTISED_56000baseKR4_Full, - .speed = 56000, - }, - [MLX5E_10GBASE_CR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_SR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_ER] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_40GBASE_SR4] = { - .supported = SUPPORTED_40000baseSR4_Full, - .advertised = ADVERTISED_40000baseSR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_LR4] = { - .supported = SUPPORTED_40000baseLR4_Full, - .advertised = ADVERTISED_40000baseLR4_Full, - .speed = 40000, - }, - [MLX5E_100GBASE_CR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_SR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_KR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_LR4] = { - .speed = 100000, - }, - [MLX5E_100BASE_TX] = { - .speed = 100, - }, - [MLX5E_1000BASE_T] = { - .supported = SUPPORTED_1000baseT_Full, - .advertised = ADVERTISED_1000baseT_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_T] = { - .supported = SUPPORTED_10000baseT_Full, - .advertised = ADVERTISED_10000baseT_Full, - .speed = 1000, - }, - [MLX5E_25GBASE_CR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_KR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_SR] = { - .speed = 25000, - }, - [MLX5E_50GBASE_CR2] = { - .speed = 50000, - }, - [MLX5E_50GBASE_KR2] = { - .speed = 50000, - }, };
+ static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; + + #define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ + ({ \ + struct ptys2ethtool_config *cfg; \ + const unsigned int modes[] = { __VA_ARGS__ }; \ + unsigned int i; \ + cfg = &ptys2ethtool_table[reg_]; \ + cfg->speed = speed_; \ + bitmap_zero(cfg->supported, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + bitmap_zero(cfg->advertised, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ + __set_bit(modes[i], cfg->supported); \ + __set_bit(modes[i], cfg->advertised); \ + } \ + }) + + void mlx5e_build_ptys2ethtool_map(void) + { + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); + } + static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@@ -184,9 -146,7 +146,9 @@@ #define MLX5E_NUM_SQ_STATS(priv) \ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ test_bit(MLX5E_STATE_OPENED, &priv->state)) -#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv)) +#define MLX5E_NUM_PFC_COUNTERS(priv) \ + (hweight8(mlx5e_query_pfc_combined(priv)) * \ + NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset) { @@@ -200,6 -160,8 +162,8 @@@ MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(mlx5e_priv_flags); /* fallthrough */ default: return -EOPNOTSUPP; @@@ -213,41 -175,42 +177,41 @@@ static void mlx5e_fill_stats_strings(st
/* SW counters */ for (i = 0; i < NUM_SW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name); + strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
/* Q counters */ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name); + strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
/* VPORT counters */ for (i = 0; i < NUM_VPORT_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - vport_stats_desc[i].name); + vport_stats_desc[i].format);
/* PPORT counters */ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_802_3_stats_desc[i].name); + pport_802_3_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_2863_stats_desc[i].name); + pport_2863_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_2819_stats_desc[i].name); + pport_2819_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", - prio, - pport_per_prio_traffic_stats_desc[i].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_traffic_stats_desc[i].format, prio); }
pfc_combined = mlx5e_query_pfc_combined(priv); for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", - prio, pport_per_prio_pfc_stats_desc[i].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_pfc_stats_desc[i].format, prio); } }
@@@ -257,24 -220,28 +221,27 @@@ /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, - rq_stats_desc[j].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++) for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, - "tx%d_%s", - priv->channeltc_to_txq_map[i][tc], - sq_stats_desc[j].name); + sq_stats_desc[j].format, + priv->channeltc_to_txq_map[i][tc]); }
static void mlx5e_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct mlx5e_priv *priv = netdev_priv(dev); + int i;
switch (stringset) { case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]); break;
case ETH_SS_TEST: @@@ -519,10 -486,11 +486,11 @@@ static int mlx5e_get_coalesce(struct ne if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) return -ENOTSUPP;
- coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; - coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; - coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; - coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; + coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; + coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec; + coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts; + coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
return 0; } @@@ -533,6 -501,10 +501,10 @@@ static int mlx5e_set_coalesce(struct ne struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channel *c; + bool restart = + !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled; + bool was_opened; + int err = 0; int tc; int i;
@@@ -540,12 -512,19 +512,19 @@@ return -ENOTSUPP;
mutex_lock(&priv->state_lock); - priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; - priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; - priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; - priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened && restart) { + mlx5e_close_locked(netdev); + priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; + } + + priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; + priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; + priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; + priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; + + if (!was_opened || restart) goto out;
for (i = 0; i < priv->params.num_channels; ++i) { @@@ -564,35 -543,37 +543,37 @@@ }
out: + if (was_opened && restart) + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); - return 0; + return err; }
- static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) + static void ptys2ethtool_supported_link(unsigned long *supported_modes, + u32 eth_proto_cap) { - int i; - u32 supported_modes = 0; + int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - supported_modes |= ptys2ethtool_table[i].supported; - } - return supported_modes; + for_each_set_bit(proto, (unsigned long *)ð_proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(supported_modes, supported_modes, + ptys2ethtool_table[proto].supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); }
- static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) + static void ptys2ethtool_adver_link(unsigned long *advertising_modes, + u32 eth_proto_cap) { - int i; - u32 advertising_modes = 0; + int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - advertising_modes |= ptys2ethtool_table[i].advertised; - } - return advertising_modes; + for_each_set_bit(proto, (unsigned long *)ð_proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(advertising_modes, advertising_modes, + ptys2ethtool_table[proto].advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS); }
- static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) + static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings, + u32 eth_proto_cap) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) @@@ -600,7 -581,7 +581,7 @@@ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { - return SUPPORTED_FIBRE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE); }
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) @@@ -608,9 -589,8 +589,8 @@@ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { - return SUPPORTED_Backplane; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane); } - return 0; }
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) @@@ -634,7 -614,7 +614,7 @@@
static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, - struct ethtool_cmd *cmd) + struct ethtool_link_ksettings *link_ksettings) { int i; u32 speed = SPEED_UNKNOWN; @@@ -651,23 -631,32 +631,32 @@@ } } out: - ethtool_cmd_speed_set(cmd, speed); - cmd->duplex = duplex; + link_ksettings->base.speed = speed; + link_ksettings->base.duplex = duplex; }
- static void get_supported(u32 eth_proto_cap, u32 *supported) + static void get_supported(u32 eth_proto_cap, + struct ethtool_link_ksettings *link_ksettings) { - *supported |= ptys2ethtool_supported_port(eth_proto_cap); - *supported |= ptys2ethtool_supported_link(eth_proto_cap); - *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + unsigned long *supported = link_ksettings->link_modes.supported; + + ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); + ptys2ethtool_supported_link(supported, eth_proto_cap); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause); }
static void get_advertising(u32 eth_proto_cap, u8 tx_pause, - u8 rx_pause, u32 *advertising) + u8 rx_pause, + struct ethtool_link_ksettings *link_ksettings) { - *advertising |= ptys2ethtool_adver_link(eth_proto_cap); - *advertising |= tx_pause ? ADVERTISED_Pause : 0; - *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; + unsigned long *advertising = link_ksettings->link_modes.advertising; + + ptys2ethtool_adver_link(advertising, eth_proto_cap); + if (tx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); + if (tx_pause ^ rx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); }
static u8 get_connector_port(u32 eth_proto) @@@ -695,13 -684,16 +684,16 @@@ return PORT_OTHER; }
- static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) + static void get_lp_advertising(u32 eth_proto_lp, + struct ethtool_link_ksettings *link_ksettings) { - *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); + unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; + + ptys2ethtool_adver_link(lp_advertising, eth_proto_lp); }
- static int mlx5e_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) + static int mlx5e_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@@ -710,6 -702,8 +702,8 @@@ u32 eth_proto_admin; u32 eth_proto_lp; u32 eth_proto_oper; + u8 an_disable_admin; + u8 an_status; int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); @@@ -720,35 -714,49 +714,49 @@@ goto err_query_ptys; }
- eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); + an_status = MLX5_GET(ptys_reg, out, an_status);
- cmd->supported = 0; - cmd->advertising = 0; + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- get_supported(eth_proto_cap, &cmd->supported); - get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); - get_speed_duplex(netdev, eth_proto_oper, cmd); + get_supported(eth_proto_cap, link_ksettings); + get_advertising(eth_proto_admin, 0, 0, link_ksettings); + get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = get_connector_port(eth_proto_oper); - get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); + link_ksettings->base.port = get_connector_port(eth_proto_oper); + get_lp_advertising(eth_proto_lp, link_ksettings); + + if (an_status == MLX5_AN_COMPLETE) + ethtool_link_ksettings_add_link_mode(link_ksettings, + lp_advertising, Autoneg);
- cmd->transceiver = XCVR_INTERNAL; + link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE : + AUTONEG_ENABLE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Autoneg); + if (!an_disable_admin) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg);
err_query_ptys: return err; }
- static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) + static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) { u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (ptys2ethtool_table[i].advertised & link_modes) + if (bitmap_intersects(ptys2ethtool_table[i].advertised, + link_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= MLX5E_PROT_MASK(i); }
@@@ -767,21 -775,25 +775,25 @@@ static u32 mlx5e_ethtool2ptys_speed_lin return speed_links; }
- static int mlx5e_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) + static int mlx5e_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + u32 eth_proto_cap, eth_proto_admin; + bool an_changes = false; + u8 an_disable_admin; + u8 an_disable_cap; + bool an_disable; u32 link_modes; + u8 an_status; u32 speed; - u32 eth_proto_cap, eth_proto_admin; - enum mlx5_port_status ps; int err;
- speed = ethtool_cmd_speed(cmd); + speed = link_ksettings->base.speed;
- link_modes = cmd->autoneg == AUTONEG_ENABLE ? - mlx5e_ethtool2ptys_adver_link(cmd->advertising) : + link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? + mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : mlx5e_ethtool2ptys_speed_link(speed);
err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); @@@ -806,15 -818,18 +818,18 @@@ goto out; }
- if (link_modes == eth_proto_admin) + mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status, + &an_disable_cap, &an_disable_admin); + + an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; + an_changes = ((!an_disable && an_disable_admin) || + (an_disable && !an_disable_admin)); + + if (!an_changes && link_modes == eth_proto_admin) goto out;
- mlx5_query_port_admin_status(mdev, &ps); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN); + mlx5_toggle_port_link(mdev);
out: return err; @@@ -1272,6 -1287,87 +1287,87 @@@ static int mlx5e_get_module_eeprom(stru return 0; }
+ typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); + + static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) + { + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + bool rx_mode_changed; + u8 rx_cq_period_mode; + int err = 0; + bool reset; + + rx_cq_period_mode = enable ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode; + + if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && + !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) + return -ENOTSUPP; + + if (!rx_mode_changed) + return 0; + + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (reset) + mlx5e_close_locked(netdev); + + mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode); + + if (reset) + err = mlx5e_open_locked(netdev); + + return err; + } + + static int mlx5e_handle_pflag(struct net_device *netdev, + u32 wanted_flags, + enum mlx5e_priv_flag flag, + mlx5e_pflag_handler pflag_handler) + { + struct mlx5e_priv *priv = netdev_priv(netdev); + bool enable = !!(wanted_flags & flag); + u32 changes = wanted_flags ^ priv->pflags; + int err; + + if (!(changes & flag)) + return 0; + + err = pflag_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s private flag 0x%x failed err %d\n", + enable ? "Enable" : "Disable", flag, err); + return err; + } + + MLX5E_SET_PRIV_FLAG(priv, flag, enable); + return 0; + } + + static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) + { + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_CQE_BASED_MODER, + set_pflag_rx_cqe_based_moder); + + mutex_unlock(&priv->state_lock); + return err ? -EINVAL : 0; + } + + static u32 mlx5e_get_priv_flags(struct net_device *netdev) + { + struct mlx5e_priv *priv = netdev_priv(netdev); + + return priv->pflags; + } + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@@ -1284,8 -1380,8 +1380,8 @@@ .set_channels = mlx5e_set_channels, .get_coalesce = mlx5e_get_coalesce, .set_coalesce = mlx5e_set_coalesce, - .get_settings = mlx5e_get_settings, - .set_settings = mlx5e_set_settings, + .get_link_ksettings = mlx5e_get_link_ksettings, + .set_link_ksettings = mlx5e_set_link_ksettings, .get_rxfh_key_size = mlx5e_get_rxfh_key_size, .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, @@@ -1301,4 -1397,6 +1397,6 @@@ .set_wol = mlx5e_set_wol, .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, + .get_priv_flags = mlx5e_get_priv_flags, + .set_priv_flags = mlx5e_set_priv_flags }; diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cb6defd,02a0f17..a64ce5d --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -40,8 -40,9 +40,9 @@@ #include "vxlan.h"
struct mlx5e_rq_param { - u32 rqc[MLX5_ST_SZ_DW(rqc)]; - struct mlx5_wq_param wq; + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; + bool am_enabled; };
struct mlx5e_sq_param { @@@ -55,6 -56,7 +56,7 @@@ struct mlx5e_cq_param u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; u16 eq_ix; + u8 cq_period_mode; };
struct mlx5e_channel_param { @@@ -105,11 -107,11 +107,11 @@@ static void mlx5e_update_sw_counters(st
s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; - s->lro_packets += rq_stats->lro_packets; - s->lro_bytes += rq_stats->lro_bytes; + s->rx_lro_packets += rq_stats->lro_packets; + s->rx_lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; - s->rx_csum_sw += rq_stats->csum_sw; - s->rx_csum_inner += rq_stats->csum_inner; + s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler += rq_stats->mpwqe_filler; s->rx_mpwqe_frag += rq_stats->mpwqe_frag; @@@ -122,23 -124,24 +124,23 @@@
s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; - s->tso_packets += sq_stats->tso_packets; - s->tso_bytes += sq_stats->tso_bytes; - s->tso_inner_packets += sq_stats->tso_inner_packets; - s->tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; - s->tx_csum_inner += sq_stats->csum_offload_inner; - tx_offload_none += sq_stats->csum_offload_none; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + tx_offload_none += sq_stats->csum_none; } }
/* Update calculated offload counters */ - s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; - s->rx_csum_good = s->rx_packets - s->rx_csum_none - - s->rx_csum_sw; + s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner; + s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
- s->link_down_events = MLX5_GET(ppcnt_reg, + s->link_down_events_phy = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, counter_set.phys_layer_cntrs.link_down_events); } @@@ -243,7 -246,7 +245,7 @@@ static void mlx5e_async_event(struct ml { struct mlx5e_priv *priv = vpriv;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) + if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) return;
switch (event) { @@@ -259,12 -262,12 +261,12 @@@
static void mlx5e_enable_async_events(struct mlx5e_priv *priv) { - set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); }
static void mlx5e_disable_async_events(struct mlx5e_priv *priv) { - clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); }
@@@ -335,6 -338,9 +337,9 @@@ static int mlx5e_create_rq(struct mlx5e wqe->data.byte_count = cpu_to_be32(byte_count); }
+ INIT_WORK(&rq->am.work, mlx5e_rx_am_work); + rq->am.mode = priv->params.rx_cq_period_mode; + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@@ -507,6 -513,9 +512,9 @@@ static int mlx5e_open_rq(struct mlx5e_c if (err) goto err_disable_rq;
+ if (param->am_enabled) + set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; @@@ -535,6 -544,8 +543,8 @@@ static void mlx5e_close_rq(struct mlx5e /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ napi_synchronize(&rq->channel->napi);
+ cancel_work_sync(&rq->am.work); + mlx5e_disable_rq(rq); mlx5e_destroy_rq(rq); } @@@ -579,7 -590,7 +589,7 @@@ static int mlx5e_create_sq(struct mlx5e void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, true); + err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf)); if (err) return err;
@@@ -701,7 -712,8 +711,8 @@@ static int mlx5e_enable_sq(struct mlx5e return err; }
- static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) + static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, + int next_state, bool update_rl, int rl_index) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; @@@ -721,6 -733,10 +732,10 @@@
MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); + if (update_rl && next_state == MLX5_SQC_STATE_RDY) { + MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); + }
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
@@@ -736,6 -752,8 +751,8 @@@ static void mlx5e_disable_sq(struct mlx struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_sq(mdev, sq->sqn); + if (sq->rate_limit) + mlx5_rl_remove_rate(mdev, sq->rate_limit); }
static int mlx5e_open_sq(struct mlx5e_channel *c, @@@ -753,7 -771,8 +770,8 @@@ if (err) goto err_destroy_sq;
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + false, 0); if (err) goto err_disable_sq;
@@@ -792,7 -811,8 +810,8 @@@ static void mlx5e_close_sq(struct mlx5e if (mlx5e_sq_has_room_for(sq, 1)) mlx5e_send_nop(sq, true);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR, + false, 0); }
while (sq->cc != sq->pc) /* wait till sq is empty */ @@@ -886,6 -906,7 +905,7 @@@ static int mlx5e_enable_cq(struct mlx5e
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@@ -915,8 -936,7 +935,7 @@@ static void mlx5e_disable_cq(struct mlx static int mlx5e_open_cq(struct mlx5e_channel *c, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, - u16 moderation_usecs, - u16 moderation_frames) + struct mlx5e_cq_moder moderation) { int err; struct mlx5e_priv *priv = c->priv; @@@ -932,8 -952,8 +951,8 @@@
if (MLX5_CAP_GEN(mdev, cq_moderation)) mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); + moderation.usec, + moderation.pkts); return 0;
err_destroy_cq: @@@ -962,8 -982,7 +981,7 @@@ static int mlx5e_open_tx_cqs(struct mlx
for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, - priv->params.tx_cq_moderation_usec, - priv->params.tx_cq_moderation_pkts); + priv->params.tx_cq_moderation); if (err) goto err_close_tx_cqs; } @@@ -1023,14 -1042,91 +1041,91 @@@ static void mlx5e_build_channeltc_to_tx ix + i * priv->params.num_channels; }
+ static int mlx5e_set_sq_maxrate(struct net_device *dev, + struct mlx5e_sq *sq, u32 rate) + { + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 rl_index = 0; + int err; + + if (rate == sq->rate_limit) + /* nothing to do */ + return 0; + + if (sq->rate_limit) + /* remove current rl index to free space to next ones */ + mlx5_rl_remove_rate(mdev, sq->rate_limit); + + sq->rate_limit = 0; + + if (rate) { + err = mlx5_rl_add_rate(mdev, rate, &rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + return err; + } + } + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RDY, true, rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + /* remove the rate from the table */ + if (rate) + mlx5_rl_remove_rate(mdev, rate); + return err; + } + + sq->rate_limit = rate; + return 0; + } + + static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) + { + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_sq *sq = priv->txq_to_sq_map[index]; + int err = 0; + + if (!mlx5_rl_is_supported(mdev)) { + netdev_err(dev, "Rate limiting is not supported on this device\n"); + return -EINVAL; + } + + /* rate is given in Mb/sec, HW config is in Kb/sec */ + rate = rate << 10; + + /* Check whether rate in valid range, 0 is always valid */ + if (rate && !mlx5_rl_is_in_range(mdev, rate)) { + netdev_err(dev, "TX rate %u, is not in range\n", rate); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_set_sq_maxrate(dev, sq, rate); + if (!err) + priv->tx_rates[index] = rate; + mutex_unlock(&priv->state_lock); + + return err; + } + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { + struct mlx5e_cq_moder icosq_cq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + struct mlx5e_cq_moder rx_cq_profile; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; + struct mlx5e_sq *sq; int err; + int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) @@@ -1044,11 -1140,16 +1139,16 @@@ c->mkey_be = cpu_to_be32(priv->mkey.key); c->num_tc = priv->params.num_tc;
+ if (priv->params.rx_am_enabled) + rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); + else + rx_cq_profile = priv->params.rx_cq_moderation; + mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder); if (err) goto err_napi_del;
@@@ -1057,8 -1158,7 +1157,7 @@@ goto err_close_icosq_cq;
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, - priv->params.rx_cq_moderation_usec, - priv->params.rx_cq_moderation_pkts); + rx_cq_profile); if (err) goto err_close_tx_cqs;
@@@ -1072,6 -1172,16 +1171,16 @@@ if (err) goto err_close_icosq;
+ for (i = 0; i < priv->params.num_tc; i++) { + u32 txq_ix = priv->channeltc_to_txq_map[ix][i]; + + if (priv->tx_rates[txq_ix]) { + sq = priv->txq_to_sq_map[txq_ix]; + mlx5e_set_sq_maxrate(priv->netdev, sq, + priv->tx_rates[txq_ix]); + } + } + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@@ -1148,6 -1258,8 +1257,8 @@@ static void mlx5e_build_rq_param(struc
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; + + param->am_enabled = priv->params.rx_am_enabled; }
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) @@@ -1213,6 -1325,8 +1324,8 @@@ static void mlx5e_build_rx_cq_param(str }
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = priv->params.rx_cq_period_mode; }
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, @@@ -1223,6 -1337,8 +1336,8 @@@ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; }
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, @@@ -1234,6 -1350,8 +1349,8 @@@ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; }
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@@ -2519,25 -2637,31 +2636,31 @@@ static int mlx5e_get_vf_stats(struct ne }
static void mlx5e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); }
static void mlx5e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); }
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, @@@ -2604,6 -2728,7 +2727,7 @@@ static const struct net_device_ops mlx5 .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@@ -2623,8 -2748,9 +2747,9 @@@ static const struct net_device_ops mlx5 .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, - .ndo_add_vxlan_port = mlx5e_add_vxlan_port, - .ndo_del_vxlan_port = mlx5e_del_vxlan_port, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, @@@ -2753,6 -2879,20 +2878,20 @@@ static bool cqe_compress_heuristic(u32 (pci_bw < 40000) && (pci_bw < link_speed)); }
+ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) + { + params->rx_cq_period_mode = cq_period_mode; + + params->rx_cq_moderation.pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; + } + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) @@@ -2760,6 -2900,9 +2899,9 @@@ struct mlx5e_priv *priv = netdev_priv(netdev); u32 link_speed = 0; u32 pci_bw = 0; + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@@ -2805,13 -2948,13 +2947,13 @@@
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); - priv->params.rx_cq_moderation_usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - priv->params.rx_cq_moderation_pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - priv->params.tx_cq_moderation_usec = + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - priv->params.tx_cq_moderation_pkts = + priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.num_tc = 1; @@@ -2826,6 -2969,10 +2968,10 @@@ priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ /* Initialize pflags */ + MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + priv->mdev = mdev; priv->netdev = netdev; priv->params.num_channels = num_channels; @@@ -3127,7 -3274,7 +3273,7 @@@ static void *mlx5e_create_netdev(struc
if (mlx5e_vxlan_allowed(mdev)) { rtnl_lock(); - vxlan_get_rx_port(netdev); + udp_tunnel_get_rx_info(netdev); rtnl_unlock(); }
@@@ -3233,6 -3380,7 +3379,7 @@@ static struct mlx5_interface mlx5e_inte
void mlx5e_init(void) { + mlx5e_build_ptys2ethtool_map(); mlx5_register_interface(&mlx5e_interface); }
diff --combined drivers/net/ethernet/mellanox/mlx5/core/main.c index c65f4a1,08cae34..1f3b6d6 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@@ -1144,6 -1144,13 +1144,13 @@@ static int mlx5_load_one(struct mlx5_co dev_err(&pdev->dev, "Failed to init flow steering\n"); goto err_fs; } + + err = mlx5_init_rl_table(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init rate limiting\n"); + goto err_rl; + } + #ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_init(dev); if (err) { @@@ -1183,6 -1190,8 +1190,8 @@@ err_sriov mlx5_eswitch_cleanup(dev->priv.eswitch); #endif err_reg_dev: + mlx5_cleanup_rl_table(dev); + err_rl: mlx5_cleanup_fs(dev); err_fs: mlx5_cleanup_mkey_table(dev); @@@ -1253,6 -1262,7 +1262,7 @@@ static int mlx5_unload_one(struct mlx5_ mlx5_eswitch_cleanup(dev->priv.eswitch); #endif
+ mlx5_cleanup_rl_table(dev); mlx5_cleanup_fs(dev); mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); @@@ -1508,9 -1518,8 +1518,9 @@@ static const struct pci_device_id mlx5_ { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ - { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ + { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ + { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */ { 0, } };
diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 660429e,d23948b..a453fff --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@@ -49,6 -49,7 +49,7 @@@ #include <linux/jiffies.h> #include <linux/bitops.h> #include <linux/list.h> + #include <linux/notifier.h> #include <linux/dcbnl.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@@ -408,11 -409,7 +409,11 @@@ static netdev_tx_t mlxsw_sp_port_xmit(s }
mlxsw_sp_txhdr_construct(skb, &tx_info); - len = skb->len; + /* TX header is consumed by HW on the way so we shouldn't count its + * bytes as being sent. + */ + len = skb->len - MLXSW_TXHDR_LEN; + /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ @@@ -636,14 -633,14 +637,14 @@@ static int mlxsw_sp_port_vlan_mode_tran return 0; }
- static struct mlxsw_sp_vfid * + static struct mlxsw_sp_fid * mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) { - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f;
- list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { - if (vfid->vid == vid) - return vfid; + list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) { + if (f->vid == vid) + return f; }
return NULL; @@@ -655,75 -652,70 +656,70 @@@ static u16 mlxsw_sp_avail_vfid_get(cons MLXSW_SP_VFID_PORT_MAX); }
- static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) + static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) { - u16 fid = mlxsw_sp_vfid_to_fid(vfid); char sfmr_pl[MLXSW_REG_SFMR_LEN];
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); + mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); }
- static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) - { - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - char sfmr_pl[MLXSW_REG_SFMR_LEN]; + static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - } - - static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, - u16 vid) + static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + u16 vid) { struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; + struct mlxsw_sp_fid *f; + u16 vfid, fid; int err;
- n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); - if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { + vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (vfid == MLXSW_SP_VFID_PORT_MAX) { dev_err(dev, "No available vFIDs\n"); return ERR_PTR(-ERANGE); }
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + fid = mlxsw_sp_vfid_to_fid(vfid); + err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + dev_err(dev, "Failed to create FID=%d\n", fid); return ERR_PTR(err); }
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) goto err_allocate_vfid;
- vfid->vfid = n_vfid; - vfid->vid = vid; + f->leave = mlxsw_sp_vport_vfid_leave; + f->fid = fid; + f->vid = vid;
- list_add(&vfid->list, &mlxsw_sp->port_vfids.list); - set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); + list_add(&f->list, &mlxsw_sp->port_vfids.list); + set_bit(vfid, mlxsw_sp->port_vfids.mapped);
- return vfid; + return f;
err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); return ERR_PTR(-ENOMEM); }
static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) + struct mlxsw_sp_fid *f) { - clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); - list_del(&vfid->list); + u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); + + clear_bit(vfid, mlxsw_sp->port_vfids.mapped); + list_del(&f->list);
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
- kfree(vfid); + kfree(f); }
static struct mlxsw_sp_port * - mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_vfid *vfid) + mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_vport;
@@@ -741,8 -733,7 +737,7 @@@ mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; - mlxsw_sp_vport->vport.vfid = vfid; - mlxsw_sp_vport->vport.vid = vfid->vid; + mlxsw_sp_vport->vport.vid = vid;
list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
@@@ -755,13 -746,72 +750,72 @@@ static void mlxsw_sp_port_vport_destroy kfree(mlxsw_sp_vport); }
+ static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool valid) + { + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, + vid); + } + + static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport) + { + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct mlxsw_sp_fid *f; + int err; + + f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid); + if (!f) { + f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid); + if (IS_ERR(f)) + return PTR_ERR(f); + } + + if (!f->ref_count) { + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_flood_set; + } + + err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_fid_map; + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); + f->ref_count++; + + return 0; + + err_vport_fid_map: + if (!f->ref_count) + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); + err_vport_flood_set: + if (!f->ref_count) + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); + return err; + } + + static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) + { + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + + mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); + + if (--f->ref_count == 0) { + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); + } + } + int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; int err;
/* VLAN 0 is added to HW filter when device goes up, but it is @@@ -775,31 -825,10 +829,10 @@@ return 0; }
- vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!vfid) { - vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(vfid); - } - } - - mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); if (!mlxsw_sp_vport) { netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); - err = -ENOMEM; - goto err_port_vport_create; - } - - if (!vfid->nr_vports) { - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, - true, false); - if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_vport_flood_set; - } + return -ENOMEM; }
/* When adding the first VLAN interface on a bridged port we need to @@@ -814,15 -843,10 +847,10 @@@ } }
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); + err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport); if (err) { - netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, vfid->vfid); - goto err_port_vid_to_fid_set; + netdev_err(dev, "Failed to join vFID\n"); + goto err_vport_vfid_join; }
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); @@@ -845,8 -869,6 +873,6 @@@ goto err_port_stp_state_set; }
- vfid->nr_vports++; - return 0;
err_port_stp_state_set: @@@ -854,21 -876,12 +880,12 @@@ err_port_add_vid: mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(vfid->vfid), vid); - err_port_vid_to_fid_set: + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); + err_vport_vfid_join: if (list_is_singular(&mlxsw_sp_port->vports_list)) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); err_port_vp_mode_trans: - if (!vfid->nr_vports) - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); - err_vport_flood_set: mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); - err_port_vport_create: - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; }
@@@ -877,7 -890,7 +894,7 @@@ int mlxsw_sp_port_kill_vid(struct net_d { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f; int err;
/* VLAN 0 is removed from HW filter when device goes down, but @@@ -892,8 -905,6 +909,6 @@@ return 0; }
- vfid = mlxsw_sp_vport->vport.vfid; - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_DISCARDING); if (err) { @@@ -914,16 -925,12 +929,12 @@@ return err; }
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, vfid->vfid); - return err; - } + /* Drop FID reference. If this was the last reference the + * resources will be freed. + */ + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f && !WARN_ON(!f->leave)) + f->leave(mlxsw_sp_vport);
/* When removing the last VLAN interface on a bridged port we need to * transition all active 802.1Q bridge VLANs to use VID to FID @@@ -937,13 -944,8 +948,8 @@@ } }
- vfid->nr_vports--; mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
- /* Destroy the vFID if no vPorts are assigned to it anymore. */ - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); - return 0; }
@@@ -2403,6 -2405,7 +2409,7 @@@ static int mlxsw_sp_init(struct mlxsw_c
mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; + INIT_LIST_HEAD(&mlxsw_sp->fids); INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); @@@ -2479,6 -2482,7 +2486,7 @@@ static void mlxsw_sp_fini(struct mlxsw_ mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_ports_remove(mlxsw_sp); + WARN_ON(!list_empty(&mlxsw_sp->fids)); }
static struct mlxsw_config_profile mlxsw_sp_config_profile = { @@@ -2540,16 -2544,37 +2548,37 @@@ static struct mlxsw_driver mlxsw_sp_dri .profile = &mlxsw_sp_config_profile, };
- static int - mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) + static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, + u16 fid) + { + if (mlxsw_sp_fid_is_vfid(fid)) + return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); + else + return test_bit(fid, lag_port->active_vlans); + } + + static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; + u8 local_port = mlxsw_sp_port->local_port; + u16 lag_id = mlxsw_sp_port->lag_id; + int i, count = 0;
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); - mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); + if (!mlxsw_sp_port->lagged) + return true;
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + struct mlxsw_sp_port *lag_port; + + lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); + if (!lag_port || lag_port->local_port == local_port) + continue; + if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) + count++; + } + + return !count; }
static int @@@ -2564,17 -2589,8 +2593,8 @@@ mlxsw_sp_port_fdb_flush_by_port_fid(con mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); - } - - static int - mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) - { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; - - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); - mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", + mlxsw_sp_port->local_port, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); } @@@ -2590,71 -2606,51 +2610,51 @@@ mlxsw_sp_port_fdb_flush_by_lag_id_fid(c mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+ netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", + mlxsw_sp_port->lag_id, fid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); }
- static int - __mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) + int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { - int err, last_err = 0; - u16 vid; - - for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; - } + if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) + return 0;
- return last_err; + if (mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, + fid); + else + return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); }
- static int - __mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) + static bool mlxsw_sp_port_dev_check(const struct net_device *dev) { - int err, last_err = 0; - u16 vid; - - for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; - } - - return last_err; + return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; }
- static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) + static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { - if (!list_empty(&mlxsw_sp_port->vports_list)) - if (mlxsw_sp_port->lagged) - return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); - else - return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); - else - if (mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); - else - return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); + return !mlxsw_sp->master_bridge.dev || + mlxsw_sp->master_bridge.dev == br_dev; }
- static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) + static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - - if (mlxsw_sp_vport->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, - fid); - else - return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); + mlxsw_sp->master_bridge.dev = br_dev; + mlxsw_sp->master_bridge.ref_count++; }
- static bool mlxsw_sp_port_dev_check(const struct net_device *dev) + static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) { - return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; + if (--mlxsw_sp->master_bridge.ref_count == 0) + mlxsw_sp->master_bridge.dev = NULL; }
- static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) + static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *br_dev) { struct net_device *dev = mlxsw_sp_port->dev; int err; @@@ -2668,6 -2664,8 +2668,8 @@@ if (err) return err;
+ mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); + mlxsw_sp_port->learning = 1; mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->uc_flood = 1; @@@ -2676,16 -2674,14 +2678,14 @@@ return 0; }
- static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, - bool flush_fdb) + static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev;
- if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@@ -2694,28 -2690,7 +2694,7 @@@ /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - return mlxsw_sp_port_add_vid(dev, 0, 1); - } - - static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) - { - return !mlxsw_sp->master_bridge.dev || - mlxsw_sp->master_bridge.dev == br_dev; - } - - static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) - { - mlxsw_sp->master_bridge.dev = br_dev; - mlxsw_sp->master_bridge.ref_count++; - } - - static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) - { - if (--mlxsw_sp->master_bridge.ref_count == 0) - mlxsw_sp->master_bridge.dev = NULL; + mlxsw_sp_port_add_vid(dev, 0, 1); }
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) @@@ -2876,65 -2851,33 +2855,33 @@@ err_col_port_add return err; }
- static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb); - - static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *lag_dev) + static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_upper *lag; u16 lag_id = mlxsw_sp_port->lag_id; - int err; + struct mlxsw_sp_upper *lag;
if (!mlxsw_sp_port->lagged) - return 0; + return; lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); WARN_ON(lag->ref_count == 0);
- err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); - if (err) - return err; - err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); - if (err) - return err; - - /* In case we leave a LAG device that has bridges built on top, - * then their teardown sequence is never issued and we need to - * invoke the necessary cleanup routines ourselves. - */ - list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, - vport.list) { - struct net_device *br_dev; - - if (!mlxsw_sp_vport->bridged) - continue; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); - } + mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
if (mlxsw_sp_port->bridged) { mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); - mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); - mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); }
- if (lag->ref_count == 1) { - if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); - if (err) - return err; - } + if (lag->ref_count == 1) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, mlxsw_sp_port->local_port); mlxsw_sp_port->lagged = 0; lag->ref_count--; - return 0; }
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, @@@ -2983,42 -2926,25 +2930,25 @@@ static int mlxsw_sp_port_vlan_link(stru u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); + if (WARN_ON(!mlxsw_sp_vport)) return -EINVAL; - }
mlxsw_sp_vport->dev = vlan_dev;
return 0; }
- static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *vlan_dev) + static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) { struct mlxsw_sp_port *mlxsw_sp_vport; u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return -EINVAL; - } - - /* When removing a VLAN device while still bridged we should first - * remove it from the bridge, as we receive the bridge's notification - * when the vPort is already gone. - */ - if (mlxsw_sp_vport->bridged) { - struct net_device *br_dev; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); - } + if (WARN_ON(!mlxsw_sp_vport)) + return;
mlxsw_sp_vport->dev = mlxsw_sp_port->dev; - - return 0; }
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, @@@ -3028,7 -2954,7 +2958,7 @@@ struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; - int err; + int err = 0;
mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@@ -3037,73 -2963,56 +2967,56 @@@ switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) + if (!is_vlan_dev(upper_dev) && + !netif_is_lag_master(upper_dev) && + !netif_is_bridge_master(upper_dev)) + return -EINVAL; + if (!info->linking) break; /* HW limitation forbids to put ports to multiple bridges. */ if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, info->upper_info)) - return NOTIFY_BAD; + return -EINVAL; + if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) + return -EINVAL; + if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && + !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (is_vlan_dev(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to link VLAN device\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to unlink VLAN device\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); } else if (netif_is_bridge_master(upper_dev)) { - if (info->linking) { - err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } - mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - } else { - err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, - true); - mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } - } + if (info->linking) + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, + upper_dev); + else + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); } else if (netif_is_lag_master(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to join link aggregation\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to leave link aggregation\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + } else { + err = -EINVAL; + WARN_ON(1); } break; }
- return NOTIFY_DONE; + return err; }
static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, @@@ -3127,7 -3036,7 +3040,7 @@@ break; }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_port_event(struct net_device *dev, @@@ -3141,7 -3050,7 +3054,7 @@@ return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, @@@ -3154,23 -3063,23 +3067,23 @@@ netdev_for_each_lower_dev(lag_dev, dev, iter) { if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); - if (ret == NOTIFY_BAD) + if (ret) return ret; } }
- return NOTIFY_DONE; + return 0; }
- static struct mlxsw_sp_vfid * + static struct mlxsw_sp_fid * mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, const struct net_device *br_dev) { - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f;
- list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { - if (vfid->br_dev == br_dev) - return vfid; + list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) { + if (f->dev == br_dev) + return f; }
return NULL; @@@ -3192,180 -3101,127 +3105,127 @@@ static u16 mlxsw_sp_avail_br_vfid_get(c MLXSW_SP_VFID_BR_MAX); }
- static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) + static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + + static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; + struct mlxsw_sp_fid *f; + u16 vfid, fid; int err;
- n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); - if (n_vfid == MLXSW_SP_VFID_MAX) { + vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); + if (vfid == MLXSW_SP_VFID_MAX) { dev_err(dev, "No available vFIDs\n"); return ERR_PTR(-ERANGE); }
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + fid = mlxsw_sp_vfid_to_fid(vfid); + err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + dev_err(dev, "Failed to create FID=%d\n", fid); return ERR_PTR(err); }
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) goto err_allocate_vfid;
- vfid->vfid = n_vfid; - vfid->br_dev = br_dev; + f->leave = mlxsw_sp_vport_br_vfid_leave; + f->fid = fid; + f->dev = br_dev;
- list_add(&vfid->list, &mlxsw_sp->br_vfids.list); - set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + list_add(&f->list, &mlxsw_sp->br_vfids.list); + set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
- return vfid; + return f;
err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); return ERR_PTR(-ENOMEM); }
static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) + struct mlxsw_sp_fid *f) { - u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); + u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid);
clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); - list_del(&vfid->list); + list_del(&f->list);
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
- kfree(vfid); + kfree(f); }
- static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb) + static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid, *new_vfid; + struct mlxsw_sp_fid *f; int err;
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - WARN_ON(!vfid); - return -EINVAL; + f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (!f) { + f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (IS_ERR(f)) + return PTR_ERR(f); }
- /* We need a vFID to go back to after leaving the bridge's vFID. */ - new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!new_vfid) { - new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(new_vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(new_vfid); - } - } + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_flood_set;
- /* Invalidate existing {Port, VID} to vFID mapping and create a new - * one for the new vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } + err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_fid_map;
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(new_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - new_vfid->vfid); - goto err_port_vid_to_fid_validate; - } + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); + f->ref_count++;
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) { - netdev_err(dev, "Failed to disable learning\n"); - goto err_port_vid_learning_set; - } + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); - if (err) { - netdev_err(dev, "Failed clear to clear flooding\n"); - goto err_vport_flood_set; - } + return 0;
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_FORWARDING); - if (err) { - netdev_err(dev, "Failed to set STP state\n"); - goto err_port_stp_state_set; - } + err_vport_fid_map: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); + err_vport_flood_set: + if (!f->ref_count) + mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); + return err; + } + + static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) + { + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) - netdev_err(dev, "Failed to flush FDB\n"); + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
- /* Switch between the vFIDs and destroy the old one if needed. */ - new_vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = new_vfid; - vfid->nr_vports--; - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
- mlxsw_sp_vport->learning = 0; - mlxsw_sp_vport->learning_sync = 0; - mlxsw_sp_vport->uc_flood = 0; - mlxsw_sp_vport->bridged = 0; + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
- return 0; + mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
- err_port_stp_state_set: - err_vport_flood_set: - err_port_vid_learning_set: - err_port_vid_to_fid_validate: - err_port_vid_to_fid_invalidate: - /* Rollback vFID only if new. */ - if (!new_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); - return err; + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); }
static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, struct net_device *br_dev) { - struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid; int err;
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create bridge vFID\n"); - return PTR_ERR(vfid); - } - } + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev); if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_port_flood_set; + netdev_err(dev, "Failed to join vFID\n"); + goto err_vport_br_vfid_join; }
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); @@@ -3374,38 -3230,6 +3234,6 @@@ goto err_port_vid_learning_set; }
- /* We need to invalidate existing {Port, VID} to vFID mapping and - * create a new one for the bridge's vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - old_vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } - - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - vfid->vfid); - goto err_port_vid_to_fid_validate; - } - - /* Switch between the vFIDs and destroy the old one if needed. */ - vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = vfid; - old_vfid->nr_vports--; - if (!old_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); - mlxsw_sp_vport->learning = 1; mlxsw_sp_vport->learning_sync = 1; mlxsw_sp_vport->uc_flood = 1; @@@ -3413,20 -3237,32 +3241,32 @@@
return 0;
- err_port_vid_to_fid_validate: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); - err_port_vid_to_fid_invalidate: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); err_port_vid_learning_set: - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); - err_port_flood_set: - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); + err_vport_br_vfid_join: + mlxsw_sp_vport_vfid_join(mlxsw_sp_vport); return err; }
+ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) + { + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + + mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); + + mlxsw_sp_vport_vfid_join(mlxsw_sp_vport); + + mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, + MLXSW_REG_SPMS_STATE_FORWARDING); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; + } + static bool mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, const struct net_device *br_dev) @@@ -3435,7 -3271,9 +3275,9 @@@
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, vport.list) { - if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + + if (dev && dev == br_dev) return false; }
@@@ -3450,56 -3288,39 +3292,39 @@@ static int mlxsw_sp_netdevice_vport_eve struct netdev_notifier_changeupper_info *info = ptr; struct mlxsw_sp_port *mlxsw_sp_vport; struct net_device *upper_dev; - int err; + int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) - break; if (!netif_is_bridge_master(upper_dev)) - return NOTIFY_BAD; + return -EINVAL; + if (!info->linking) + break; /* We can't have multiple VLAN interfaces configured on * the same port and being members in the same bridge. */ if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master) - break; if (info->linking) { - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return NOTIFY_BAD; - } + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, upper_dev); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } } else { - /* We ignore bridge's unlinking notifications if vPort - * is gone, since we already left the bridge when the - * VLAN device was unlinked from the real device. - */ if (!mlxsw_sp_vport) - return NOTIFY_DONE; - err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, - upper_dev, true); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } + return 0; + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); } }
- return NOTIFY_DONE; + return err; }
static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, @@@ -3514,12 -3335,12 +3339,12 @@@ if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, vid); - if (ret == NOTIFY_BAD) + if (ret) return ret; } }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, @@@ -3535,24 -3356,23 +3360,23 @@@ return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, vid);
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int err = 0;
if (mlxsw_sp_port_dev_check(dev)) - return mlxsw_sp_netdevice_port_event(dev, event, ptr); - - if (netif_is_lag_master(dev)) - return mlxsw_sp_netdevice_lag_event(dev, event, ptr); - - if (is_vlan_dev(dev)) - return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + err = mlxsw_sp_netdevice_port_event(dev, event, ptr); + else if (netif_is_lag_master(dev)) + err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); + else if (is_vlan_dev(dev)) + err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
- return NOTIFY_DONE; + return notifier_from_errno(err); }
static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { diff --combined drivers/net/ethernet/netronome/nfp/nfp_net_common.c index ba26bb3,2195ed3..c25a8ba --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@@ -1979,7 -1979,7 +1979,7 @@@ static int __nfp_net_set_config_and_ena if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - vxlan_get_rx_port(nn->netdev); + udp_tunnel_get_rx_info(nn->netdev); }
return err; @@@ -2015,7 -2015,7 +2015,7 @@@ static void nfp_net_open_stack(struct n
netif_tx_wake_all_queues(nn->netdev);
- enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); nfp_net_read_link_status(nn); }
@@@ -2044,7 -2044,7 +2044,7 @@@ static int nfp_net_netdev_open(struct n NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); if (err) goto err_free_exn; - disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), GFP_KERNEL); @@@ -2133,7 -2133,7 +2133,7 @@@ static void nfp_net_close_stack(struct { unsigned int r;
- disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); netif_carrier_off(nn->netdev); nn->link_up = false;
@@@ -2551,26 -2551,32 +2551,32 @@@ static int nfp_net_find_vxlan_idx(struc }
static void nfp_net_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct nfp_net *nn = netdev_priv(netdev); int idx;
- idx = nfp_net_find_vxlan_idx(nn, port); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + idx = nfp_net_find_vxlan_idx(nn, ti->port); if (idx == -ENOSPC) return;
if (!nn->vxlan_usecnt[idx]++) - nfp_net_set_vxlan_port(nn, idx, port); + nfp_net_set_vxlan_port(nn, idx, ti->port); }
static void nfp_net_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct nfp_net *nn = netdev_priv(netdev); int idx;
- idx = nfp_net_find_vxlan_idx(nn, port); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + idx = nfp_net_find_vxlan_idx(nn, ti->port); if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC) return;
@@@ -2589,8 -2595,8 +2595,8 @@@ static const struct net_device_ops nfp_ .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, - .ndo_add_vxlan_port = nfp_net_add_vxlan_port, - .ndo_del_vxlan_port = nfp_net_del_vxlan_port, + .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, + .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, };
/** diff --combined drivers/net/ethernet/qlogic/qed/qed_l2.c index aada4c7,d121a8b..a12c6ca --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@@ -72,7 -72,6 +72,7 @@@ int qed_sp_eth_vport_start(struct qed_h p_ramrod->mtu = cpu_to_le16(p_params->mtu); p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; p_ramrod->drop_ttl0_en = p_params->drop_ttl0; + p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); @@@ -248,6 -247,10 +248,6 @@@ qed_sp_update_accept_mode(struct qed_hw SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, !!(accept_filter & QED_ACCEPT_NONE));
- SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, - (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && - !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); - SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, !!(accept_filter & QED_ACCEPT_NONE));
@@@ -572,9 -575,12 +572,12 @@@ int qed_sp_eth_rxq_start_ramrod(struct p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- rc = qed_spq_post(p_hwfn, p_ent, NULL); + p_ramrod->vf_rx_prod_index = params->vf_qid; + if (params->vf_qid) + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
- return rc; + return qed_spq_post(p_hwfn, p_ent, NULL); }
static int @@@ -612,7 -618,7 +615,7 @@@ qed_sp_eth_rx_queue_start(struct qed_hw
*pp_prod = (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_PRODS_OFFSET(abs_l2_queue); + MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), @@@ -756,9 -762,9 +759,9 @@@ int qed_sp_eth_txq_start_ramrod(struct struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_hw_cid_data *p_tx_cid; - u8 abs_vport_id; + u16 pq_id, abs_tx_q_id = 0; int rc = -EINVAL; - u16 pq_id; + u8 abs_vport_id;
/* Store information for the stop */ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; @@@ -769,6 -775,10 +772,10 @@@ if (rc) return rc;
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id); + if (rc) + return rc; + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = cid; @@@ -788,6 -798,7 +795,7 @@@ p_ramrod->sb_index = p_params->sb_idx; p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
@@@ -1482,51 -1493,51 +1490,51 @@@ static void __qed_get_vport_port_stats( offsetof(struct public_port, stats), sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.pmm.r64; - p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127; - p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255; - p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511; - p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023; - p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518; - p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522; - p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047; - p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095; - p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216; - p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383; - p_stats->rx_crc_errors += port_stats.pmm.rfcs; - p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; - p_stats->rx_pause_frames += port_stats.pmm.rxpf; - p_stats->rx_pfc_frames += port_stats.pmm.rxpp; - p_stats->rx_align_errors += port_stats.pmm.raln; - p_stats->rx_carrier_errors += port_stats.pmm.rfcr; - p_stats->rx_oversize_packets += port_stats.pmm.rovr; - p_stats->rx_jabbers += port_stats.pmm.rjbr; - p_stats->rx_undersize_packets += port_stats.pmm.rund; - p_stats->rx_fragments += port_stats.pmm.rfrg; - p_stats->tx_64_byte_packets += port_stats.pmm.t64; - p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; - p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; - p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; - p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; - p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; - p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; - p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; - p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; - p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; - p_stats->tx_pause_frames += port_stats.pmm.txpf; - p_stats->tx_pfc_frames += port_stats.pmm.txpp; - p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; - p_stats->tx_total_collisions += port_stats.pmm.tncl; - p_stats->rx_mac_bytes += port_stats.pmm.rbyte; - p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; - p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; - p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; - p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; - p_stats->tx_mac_bytes += port_stats.pmm.tbyte; - p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; - p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; - p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; - p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + p_stats->rx_64_byte_packets += port_stats.eth.r64; + p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; + p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; + p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; + p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; + p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; + p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; + p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; + p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; + p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; + p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; + p_stats->rx_crc_errors += port_stats.eth.rfcs; + p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; + p_stats->rx_pause_frames += port_stats.eth.rxpf; + p_stats->rx_pfc_frames += port_stats.eth.rxpp; + p_stats->rx_align_errors += port_stats.eth.raln; + p_stats->rx_carrier_errors += port_stats.eth.rfcr; + p_stats->rx_oversize_packets += port_stats.eth.rovr; + p_stats->rx_jabbers += port_stats.eth.rjbr; + p_stats->rx_undersize_packets += port_stats.eth.rund; + p_stats->rx_fragments += port_stats.eth.rfrg; + p_stats->tx_64_byte_packets += port_stats.eth.t64; + p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; + p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; + p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; + p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; + p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; + p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; + p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; + p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; + p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; + p_stats->tx_pause_frames += port_stats.eth.txpf; + p_stats->tx_pfc_frames += port_stats.eth.txpp; + p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; + p_stats->tx_total_collisions += port_stats.eth.tncl; + p_stats->rx_mac_bytes += port_stats.eth.rbyte; + p_stats->rx_mac_uc_packets += port_stats.eth.rxuca; + p_stats->rx_mac_mc_packets += port_stats.eth.rxmca; + p_stats->rx_mac_bc_packets += port_stats.eth.rxbca; + p_stats->rx_mac_frames_ok += port_stats.eth.rxpok; + p_stats->tx_mac_bytes += port_stats.eth.tbyte; + p_stats->tx_mac_uc_packets += port_stats.eth.txuca; + p_stats->tx_mac_mc_packets += port_stats.eth.txmca; + p_stats->tx_mac_bc_packets += port_stats.eth.txbca; + p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf; for (j = 0; j < 8; j++) { p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; p_stats->brb_discards += port_stats.brb.brb_discard[j]; @@@ -1745,8 -1756,7 +1753,8 @@@ static int qed_start_vport(struct qed_d start.vport_id, start.mtu); }
- qed_reset_vport_stats(cdev); + if (params->clear_stats) + qed_reset_vport_stats(cdev);
return 0; } @@@ -2156,11 -2166,18 +2164,18 @@@ static int qed_fp_cqe_completion(struc extern const struct qed_iov_hv_ops qed_iov_ops_pass; #endif
+ #ifdef CONFIG_DCB + extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; + #endif + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, #ifdef CONFIG_QED_SRIOV .iov = &qed_iov_ops_pass, #endif + #ifdef CONFIG_DCB + .dcb = &qed_dcbnl_ops_pass, + #endif .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, .check_mac = &qed_check_mac, diff --combined drivers/net/ethernet/qlogic/qed/qed_main.c index c7e01b3,e32ee57..1f13abb --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@@ -207,6 -207,8 +207,8 @@@ int qed_fill_dev_info(struct qed_dev *c dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; + dev_info->rdma_supported = + (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
@@@ -832,7 -834,8 +834,8 @@@ static int qed_slowpath_start(struct qe goto err2; }
- data = cdev->firmware->data; + /* First Dword used to diffrentiate between various sources */ + data = cdev->firmware->data + sizeof(u32); }
memset(&tunn_info, 0, sizeof(tunn_info)); @@@ -900,7 -903,8 +903,8 @@@ static int qed_slowpath_stop(struct qed
if (IS_PF(cdev)) { qed_free_stream_mem(cdev); - qed_sriov_disable(cdev, true); + if (IS_QED_ETH_IF(cdev)) + qed_sriov_disable(cdev, true);
qed_nic_stop(cdev); qed_slowpath_irq_free(cdev); @@@ -991,8 -995,7 +995,7 @@@ static bool qed_can_link_change(struct return true; }
- static int qed_set_link(struct qed_dev *cdev, - struct qed_link_params *params) + static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { struct qed_hwfn *hwfn; struct qed_mcp_link_params *link_params; @@@ -1032,7 -1035,7 +1035,7 @@@ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; if (params->adv_speeds & 0) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) link_params->speed.forced_speed = params->forced_speed; @@@ -1053,19 -1056,19 +1056,19 @@@ if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { switch (params->loopback_mode) { case QED_LINK_LOOPBACK_INT_PHY: - link_params->loopback_mode = PMM_LOOPBACK_INT_PHY; + link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; break; case QED_LINK_LOOPBACK_EXT_PHY: - link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY; + link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; break; case QED_LINK_LOOPBACK_EXT: - link_params->loopback_mode = PMM_LOOPBACK_EXT; + link_params->loopback_mode = ETH_LOOPBACK_EXT; break; case QED_LINK_LOOPBACK_MAC: - link_params->loopback_mode = PMM_LOOPBACK_MAC; + link_params->loopback_mode = ETH_LOOPBACK_MAC; break; default: - link_params->loopback_mode = PMM_LOOPBACK_NONE; + link_params->loopback_mode = ETH_LOOPBACK_NONE; break; } } @@@ -1085,7 -1088,6 +1088,7 @@@ static int qed_get_port_type(u32 media_ case MEDIA_SFPP_10G_FIBER: case MEDIA_SFP_1G_FIBER: case MEDIA_XFP_FIBER: + case MEDIA_MODULE_FIBER: case MEDIA_KR: port_type = PORT_FIBRE; break; @@@ -1185,7 -1187,7 +1188,7 @@@ static void qed_fill_link(struct qed_hw NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->advertised_caps |= 0; if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->advertised_caps |= 0;
if (link_caps.speed_capabilities & @@@ -1202,7 -1204,7 +1205,7 @@@ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->supported_caps |= 0; if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->supported_caps |= 0;
if (link.link_up) @@@ -1301,6 -1303,38 +1304,38 @@@ static int qed_drain(struct qed_dev *cd return 0; }
+ static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) + { + *rx_coal = cdev->rx_coalesce_usecs; + *tx_coal = cdev->tx_coalesce_usecs; + } + + static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id) + { + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int hwfn_index; + int status = 0; + + hwfn_index = qid % cdev->num_hwfns; + hwfn = &cdev->hwfns[hwfn_index]; + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, + qid / cdev->num_hwfns, sb_id); + if (status) + goto out; + status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, + qid / cdev->num_hwfns, sb_id); + out: + qed_ptt_release(hwfn, ptt); + + return status; + } + static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@@ -1347,5 -1381,7 +1382,7 @@@ const struct qed_common_ops qed_common_ .update_msglvl = &qed_init_dp, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .get_coalesce = &qed_get_coalesce, + .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, }; diff --combined drivers/net/ethernet/qlogic/qed/qed_spq.c index b122f60,ad9bf5c..97ffeae --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@@ -213,15 -213,19 +213,15 @@@ static int qed_spq_hw_post(struct qed_h SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD); db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - - /* validate producer is up to-date */ - rmb(); - db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
- /* do not reorder */ - barrier(); + /* make sure the SPQE is updated before the doorbell */ + wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */ - mmiowb(); + wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", @@@ -339,6 -343,7 +339,7 @@@ struct qed_eq *qed_eq_alloc(struct qed_ if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, num_elem, sizeof(union event_ring_element), &p_eq->chain)) { @@@ -412,10 -417,10 +413,10 @@@ int qed_eth_cqe_completion(struct qed_h ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = p_hwfn->p_spq; - struct qed_spq_entry *p_virt = NULL; - dma_addr_t p_phys = 0; - unsigned int i = 0; + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_virt = NULL; + dma_addr_t p_phys = 0; + u32 i, capacity;
INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); @@@ -427,7 -432,8 +428,8 @@@ p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); p_virt = p_spq->p_virt;
- for (i = 0; i < p_spq->chain.capacity; i++) { + capacity = qed_chain_get_capacity(&p_spq->chain); + for (i = 0; i < capacity; i++) { DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool); @@@ -455,9 -461,10 +457,10 @@@
int qed_spq_alloc(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = NULL; - dma_addr_t p_phys = 0; - struct qed_spq_entry *p_virt = NULL; + struct qed_spq_entry *p_virt = NULL; + struct qed_spq *p_spq = NULL; + dma_addr_t p_phys = 0; + u32 capacity;
/* SPQ struct */ p_spq = @@@ -471,6 -478,7 +474,7 @@@ if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_SINGLE, + QED_CHAIN_CNT_TYPE_U16, 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), &p_spq->chain)) { @@@ -479,11 -487,11 +483,11 @@@ }
/* allocate and fill the SPQ elements (incl. ramrod data list) */ + capacity = qed_chain_get_capacity(&p_spq->chain); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - &p_phys, - GFP_KERNEL); + &p_phys, GFP_KERNEL);
if (!p_virt) goto spq_allocate_fail; @@@ -503,16 -511,18 +507,18 @@@ spq_allocate_fail void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; + u32 capacity;
if (!p_spq) return;
- if (p_spq->p_virt) + if (p_spq->p_virt) { + capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - p_spq->p_virt, - p_spq->p_phys); + p_spq->p_virt, p_spq->p_phys); + }
qed_chain_free(p_hwfn->cdev, &p_spq->chain); ; @@@ -610,9 -620,7 +616,9 @@@ qed_spq_add_entry(struct qed_hwfn *p_hw
*p_en2 = *p_ent;
- kfree(p_ent); + /* EBLOCK responsible to free the allocated p_ent */ + if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) + kfree(p_ent);
p_ent = p_en2; } @@@ -747,15 -755,6 +753,15 @@@ int qed_spq_post(struct qed_hwfn *p_hwf * Thus, after gaining the answer perform the cleanup here. */ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); + + if (p_ent->queue == &p_spq->unlimited_pending) { + /* This is an allocated p_ent which does not need to + * return to pool. + */ + kfree(p_ent); + return rc; + } + if (rc) goto spq_post_fail2;
@@@ -851,12 -850,8 +857,12 @@@ int qed_spq_completion(struct qed_hwfn found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, fw_return_code);
- if (found->comp_mode != QED_SPQ_MODE_EBLOCK) - /* EBLOCK is responsible for freeing its own entry */ + if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || + (found->queue == &p_spq->unlimited_pending)) + /* EBLOCK is responsible for returning its own entry into the + * free list, unless it originally added the entry into the + * unlimited pending list. + */ qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */ @@@ -882,9 -877,9 +888,9 @@@ struct qed_consq *qed_consq_alloc(struc if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, - &p_consq->chain)) { + 0x80, &p_consq->chain)) { DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); goto consq_allocate_fail; } diff --combined drivers/net/ethernet/qlogic/qede/qede_main.c index f8e11f9,2972742..19bc631 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@@ -24,12 -24,7 +24,7 @@@ #include <linux/netdev_features.h> #include <linux/udp.h> #include <linux/tcp.h> - #ifdef CONFIG_QEDE_VXLAN - #include <net/vxlan.h> - #endif - #ifdef CONFIG_QEDE_GENEVE - #include <net/geneve.h> - #endif + #include <net/udp_tunnel.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/tcp.h> @@@ -579,8 -574,6 +574,6 @@@ netdev_tx_t qede_start_xmit(struct sk_b
/* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { - u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT; - /* We don't re-calculate IP checksum as it is already done by * the upper stack */ @@@ -590,14 -583,8 +583,8 @@@ if (xmit_type & XMIT_ENC) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - } else { - /* In cases when OS doesn't indicate for inner offloads - * when packet is tunnelled, we need to override the HW - * tunnel configuration so that packets are treated as - * regular non tunnelled packets and no inner offloads - * are done by the hardware. - */ - first_bd->data.bitfields |= cpu_to_le16(temp); + first_bd->data.bitfields |= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; }
/* If the packet is IPv6 with extension header, indicate that @@@ -655,6 -642,10 +642,10 @@@ tx_data_bd = (struct eth_tx_bd *)third_bd; data_split = true; } + } else { + first_bd->data.bitfields |= + (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; }
/* Handle fragmented skb */ @@@ -2116,75 -2107,75 +2107,75 @@@ int qede_set_features(struct net_devic return 0; }
- #ifdef CONFIG_QEDE_VXLAN - static void qede_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + static void qede_udp_tunnel_add(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port);
- if (edev->vxlan_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (edev->vxlan_dst_port) + return;
- edev->vxlan_dst_port = t_port; + edev->vxlan_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", + t_port);
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); - } + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (edev->geneve_dst_port) + return;
- static void qede_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) - { - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + edev->geneve_dst_port = t_port;
- if (t_port != edev->vxlan_dst_port) + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: return; + }
- edev->vxlan_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } - #endif
- #ifdef CONFIG_QEDE_GENEVE - static void qede_add_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + static void qede_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port);
- if (edev->geneve_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (t_port != edev->vxlan_dst_port) + return;
- edev->geneve_dst_port = t_port; + edev->vxlan_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); - } + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", + t_port);
- static void qede_del_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) - { - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (t_port != edev->geneve_dst_port) + return;
- if (t_port != edev->geneve_dst_port) - return; + edev->geneve_dst_port = 0;
- edev->geneve_dst_port = 0; + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + }
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } - #endif
static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, @@@ -2208,14 -2199,8 +2199,8 @@@ .ndo_get_vf_config = qede_get_vf_config, .ndo_set_vf_rate = qede_set_vf_rate, #endif - #ifdef CONFIG_QEDE_VXLAN - .ndo_add_vxlan_port = qede_add_vxlan_port, - .ndo_del_vxlan_port = qede_del_vxlan_port, - #endif - #ifdef CONFIG_QEDE_GENEVE - .ndo_add_geneve_port = qede_add_geneve_port, - .ndo_del_geneve_port = qede_del_geneve_port, - #endif + .ndo_udp_tunnel_add = qede_udp_tunnel_add, + .ndo_udp_tunnel_del = qede_udp_tunnel_del, };
/* ------------------------------------------------------------------------- @@@ -2505,6 -2490,10 +2490,10 @@@ static int __qede_probe(struct pci_dev
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+ #ifdef CONFIG_DCB + qede_set_dcbnl_ops(edev->ndev); + #endif + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); mutex_init(&edev->qede_lock);
@@@ -2823,6 -2812,7 +2812,7 @@@ static int qede_alloc_mem_rxq(struct qe rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring); @@@ -2834,6 -2824,7 +2824,7 @@@ rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring); @@@ -2885,9 -2876,9 +2876,9 @@@ static int qede_alloc_mem_txq(struct qe rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, NUM_TX_BDS_MAX, - sizeof(*p_virt), - &txq->tx_pbl); + sizeof(*p_virt), &txq->tx_pbl); if (rc) goto err;
@@@ -3231,7 -3222,7 +3222,7 @@@ static int qede_stop_queues(struct qede return rc; }
-static int qede_start_queues(struct qede_dev *edev) +static int qede_start_queues(struct qede_dev *edev, bool clear_stats) { int rc, tc, i; int vlan_removal_en = 1; @@@ -3462,7 -3453,6 +3453,7 @@@ out
enum qede_load_mode { QEDE_LOAD_NORMAL, + QEDE_LOAD_RELOAD, };
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) @@@ -3501,7 -3491,7 +3492,7 @@@ goto err3; DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev); + rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD); if (rc) goto err4; DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); @@@ -3556,7 -3546,7 +3547,7 @@@ void qede_reload(struct qede_dev *edev if (func) func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL); + qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock); qede_config_rx_mode(edev->ndev); @@@ -3578,12 -3568,8 +3569,8 @@@ static int qede_open(struct net_device if (rc) return rc;
- #ifdef CONFIG_QEDE_VXLAN - vxlan_get_rx_port(ndev); - #endif - #ifdef CONFIG_QEDE_GENEVE - geneve_get_rx_port(ndev); - #endif + udp_tunnel_get_rx_info(ndev); + return 0; }
diff --combined drivers/net/ethernet/ti/cpsw.c index 5319089,6d0c5a0..1a93a1f --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@@ -364,7 -364,6 +364,6 @@@ static inline void slave_write(struct c }
struct cpsw_priv { - spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; struct napi_struct napi_rx; @@@ -1244,6 -1243,7 +1243,7 @@@ static void cpsw_slave_stop(struct cpsw slave->phy = NULL; cpsw_ale_control_set(priv->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + soft_reset_slave(slave); }
static int cpsw_ndo_open(struct net_device *ndev) @@@ -1252,7 -1252,11 +1252,11 @@@ int i, ret; u32 reg;
- pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + }
if (!cpsw_common_res_usage_state(priv)) cpsw_intr_disable(priv); @@@ -1278,6 -1282,7 +1282,7 @@@
if (!cpsw_common_res_usage_state(priv)) { struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); + int buf_num;
/* setup tx dma to fixed prio and zero offset */ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); @@@ -1305,10 -1310,8 +1310,8 @@@ enable_irq(priv->irqs_table[0]); }
- if (WARN_ON(!priv->data.rx_descs)) - priv->data.rx_descs = 128; - - for (i = 0; i < priv->data.rx_descs; i++) { + buf_num = cpdma_chan_get_rx_buf_num(priv->dma); + for (i = 0; i < buf_num; i++) { struct sk_buff *skb;
ret = -ENOMEM; @@@ -1611,10 -1614,17 +1614,17 @@@ static int cpsw_ndo_set_mac_address(str struct sockaddr *addr = (struct sockaddr *)p; int flags = 0; u16 vid = 0; + int ret;
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
+ ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { vid = priv->slaves[priv->emac_port].port_vlan; flags = ALE_VLAN; @@@ -1629,6 -1639,8 +1639,8 @@@ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); for_each_slave(priv, cpsw_set_slave_mac, priv);
+ pm_runtime_put(&priv->pdev->dev); + return 0; }
@@@ -1693,10 -1705,17 +1705,17 @@@ static int cpsw_ndo_vlan_rx_add_vid(str __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); + int ret;
if (vid == priv->data.default_vlan) return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual @@@ -1711,7 -1730,10 +1730,10 @@@ }
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); - return cpsw_add_vlan_ale_entry(priv, vid); + ret = cpsw_add_vlan_ale_entry(priv, vid); + + pm_runtime_put(&priv->pdev->dev); + return ret; }
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, @@@ -1723,6 -1745,12 +1745,12 @@@ if (vid == priv->data.default_vlan) return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { int i;
@@@ -1742,8 -1770,10 +1770,10 @@@ if (ret != 0) return ret;
- return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, - 0, ALE_VLAN, vid); + ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); + pm_runtime_put(&priv->pdev->dev); + return ret; }
static const struct net_device_ops cpsw_netdev_ops = { @@@ -1902,10 -1932,33 +1932,33 @@@ static int cpsw_set_pauseparam(struct n priv->tx_pause = pause->tx_pause ? true : false;
for_each_slave(priv, _cpsw_adjust_link, priv, &link); - return 0; }
+ static int cpsw_ethtool_op_begin(struct net_device *ndev) + { + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); + pm_runtime_put_noidle(&priv->pdev->dev); + } + + return ret; + } + + static void cpsw_ethtool_op_complete(struct net_device *ndev) + { + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_put(&priv->pdev->dev); + if (ret < 0) + cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); + } + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@@ -1925,6 -1978,8 +1978,8 @@@ .set_wol = cpsw_set_wol, .get_regs_len = cpsw_get_regs_len, .get_regs = cpsw_get_regs, + .begin = cpsw_ethtool_op_begin, + .complete = cpsw_ethtool_op_complete, };
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@@ -1999,12 -2054,6 +2054,6 @@@ static int cpsw_probe_dt(struct cpsw_pl } data->bd_ram_size = prop;
- if (of_property_read_u32(node, "rx_descs", &prop)) { - dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n"); - return -EINVAL; - } - data->rx_descs = prop; - if (of_property_read_u32(node, "mac_control", &prop)) { dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); return -EINVAL; @@@ -2022,7 -2071,7 +2071,7 @@@ if (ret) dev_warn(&pdev->dev, "Doesn't have any child node\n");
- for_each_child_of_node(node, slave_node) { + for_each_available_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; int lenp; @@@ -2124,7 -2173,6 +2173,6 @@@ static int cpsw_probe_dual_emac(struct }
priv_sl2 = netdev_priv(ndev); - spin_lock_init(&priv_sl2->lock); priv_sl2->data = *data; priv_sl2->pdev = pdev; priv_sl2->ndev = ndev; @@@ -2243,7 -2291,6 +2291,6 @@@ static int cpsw_probe(struct platform_d
platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); - spin_lock_init(&priv->lock); priv->pdev = pdev; priv->ndev = ndev; priv->dev = &ndev->dev; @@@ -2321,7 -2368,11 +2368,11 @@@ /* Need to enable clocks with runtime PM api to access module * registers */ - pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + goto clean_runtime_disable_ret; + } priv->version = readl(&priv->regs->id_ver); pm_runtime_put_sync(&pdev->dev);
@@@ -2505,6 -2556,8 +2556,6 @@@ clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: - cpdma_chan_destroy(priv->txch); - cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); @@@ -2532,6 -2585,8 +2583,6 @@@ static int cpsw_remove(struct platform_ unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale); - cpdma_chan_destroy(priv->txch); - cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); pm_runtime_disable(&pdev->dev); device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); @@@ -2554,16 -2609,12 +2605,12 @@@ static int cpsw_suspend(struct device * for (i = 0; i < priv->data.slaves; i++) { if (netif_running(priv->slaves[i].ndev)) cpsw_ndo_stop(priv->slaves[i].ndev); - soft_reset_slave(priv->slaves + i); } } else { if (netif_running(ndev)) cpsw_ndo_stop(ndev); - for_each_slave(priv, soft_reset_slave); }
- pm_runtime_put_sync(&pdev->dev); - /* Select sleep pin state */ pinctrl_pm_select_sleep_state(&pdev->dev);
@@@ -2576,8 -2627,6 +2623,6 @@@ static int cpsw_resume(struct device *d struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev);
- pm_runtime_get_sync(&pdev->dev); - /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev);
diff --combined drivers/net/geneve.c index cc39cef,aa61708..310e0b9c --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@@ -12,7 -12,6 +12,6 @@@
#include <linux/kernel.h> #include <linux/module.h> - #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/hash.h> #include <net/dst_metadata.h> @@@ -397,23 -396,6 +396,6 @@@ static struct socket *geneve_create_soc return sock; }
- static void geneve_notify_add_rx_port(struct geneve_sock *gs) - { - struct net_device *dev; - struct sock *sk = gs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = geneve_get_sk_family(gs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_add_geneve_port) - dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, - port); - } - rcu_read_unlock(); - } - static int geneve_hlen(struct genevehdr *gh) { return sizeof(*gh) + gh->opt_len * 4; @@@ -533,7 -515,7 +515,7 @@@ static struct geneve_sock *geneve_socke INIT_HLIST_HEAD(&gs->vni_list[h]);
/* Initialize the geneve udp offloads structure */ - geneve_notify_add_rx_port(gs); + udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
/* Mark socket as an encapsulation socket */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); @@@ -548,31 -530,13 +530,13 @@@ return gs; }
- static void geneve_notify_del_rx_port(struct geneve_sock *gs) - { - struct net_device *dev; - struct sock *sk = gs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = geneve_get_sk_family(gs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_del_geneve_port) - dev->netdev_ops->ndo_del_geneve_port(dev, sa_family, - port); - } - - rcu_read_unlock(); - } - static void __geneve_sock_release(struct geneve_sock *gs) { if (!gs || --gs->refcnt) return;
list_del(&gs->list); - geneve_notify_del_rx_port(gs); + udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); udp_tunnel_sock_release(gs->sock); kfree_rcu(gs, rcu); } @@@ -958,8 -922,8 +922,8 @@@ tx_error dev->stats.collisions++; else if (err == -ENETUNREACH) dev->stats.tx_carrier_errors++; - else - dev->stats.tx_errors++; + + dev->stats.tx_errors++; return NETDEV_TX_OK; }
@@@ -1048,8 -1012,8 +1012,8 @@@ tx_error dev->stats.collisions++; else if (err == -ENETUNREACH) dev->stats.tx_carrier_errors++; - else - dev->stats.tx_errors++; + + dev->stats.tx_errors++; return NETDEV_TX_OK; } #endif @@@ -1165,29 -1129,20 +1129,20 @@@ static struct device_type geneve_type .name = "geneve", };
- /* Calls the ndo_add_geneve_port of the caller in order to + /* Calls the ndo_add_udp_enc_port of the caller in order to * supply the listening GENEVE udp ports. Callers are expected - * to implement the ndo_add_geneve_port. + * to implement the ndo_add_udp_enc_port. */ static void geneve_push_rx_ports(struct net_device *dev) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; - sa_family_t sa_family; - struct sock *sk; - __be16 port; - - if (!dev->netdev_ops->ndo_add_geneve_port) - return;
rcu_read_lock(); - list_for_each_entry_rcu(gs, &gn->sock_list, list) { - sk = gs->sock->sk; - sa_family = sk->sk_family; - port = inet_sk(sk)->inet_sport; - dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, port); - } + list_for_each_entry_rcu(gs, &gn->sock_list, list) + udp_tunnel_push_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); rcu_read_unlock(); }
@@@ -1508,7 -1463,6 +1463,7 @@@ struct net_device *geneve_dev_create_fb { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; + LIST_HEAD(list_kill); int err;
memset(tb, 0, sizeof(tb)); @@@ -1520,10 -1474,8 +1475,10 @@@ err = geneve_configure(net, dev, &geneve_remote_unspec, 0, 0, 0, 0, htons(dst_port), true, GENEVE_F_UDP_ZERO_CSUM6_RX); - if (err) - goto err; + if (err) { + free_netdev(dev); + return ERR_PTR(err); + }
/* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. @@@ -1532,15 -1484,10 +1487,15 @@@ if (err) goto err;
+ err = rtnl_configure_link(dev, NULL); + if (err < 0) + goto err; + return dev;
err: - free_netdev(dev); + geneve_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); @@@ -1550,7 -1497,7 +1505,7 @@@ static int geneve_netdevice_event(struc { struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_OFFLOAD_PUSH_GENEVE) + if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) geneve_push_rx_ports(dev);
return NOTIFY_DONE; diff --combined drivers/net/phy/fixed_phy.c index 9ec7f73,b376ada..c649c10 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@@ -23,9 -23,9 +23,10 @@@ #include <linux/slab.h> #include <linux/of.h> #include <linux/gpio.h> + #include <linux/seqlock.h> +#include <linux/idr.h>
- #define MII_REGS_NUM 29 + #include "swphy.h"
struct fixed_mdio_bus { struct mii_bus *mii_bus; @@@ -34,8 -34,8 +35,8 @@@
struct fixed_phy { int addr; - u16 regs[MII_REGS_NUM]; struct phy_device *phydev; + seqcount_t seqcount; struct fixed_phy_status status; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; @@@ -47,103 -47,10 +48,10 @@@ static struct fixed_mdio_bus platform_f .phys = LIST_HEAD_INIT(platform_fmb.phys), };
- static int fixed_phy_update_regs(struct fixed_phy *fp) + static void fixed_phy_update(struct fixed_phy *fp) { - u16 bmsr = BMSR_ANEGCAPABLE; - u16 bmcr = 0; - u16 lpagb = 0; - u16 lpa = 0; - if (gpio_is_valid(fp->link_gpio)) fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio); - - if (fp->status.duplex) { - switch (fp->status.speed) { - case 1000: - bmsr |= BMSR_ESTATEN; - break; - case 100: - bmsr |= BMSR_100FULL; - break; - case 10: - bmsr |= BMSR_10FULL; - break; - default: - break; - } - } else { - switch (fp->status.speed) { - case 1000: - bmsr |= BMSR_ESTATEN; - break; - case 100: - bmsr |= BMSR_100HALF; - break; - case 10: - bmsr |= BMSR_10HALF; - break; - default: - break; - } - } - - if (fp->status.link) { - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; - - if (fp->status.duplex) { - bmcr |= BMCR_FULLDPLX; - - switch (fp->status.speed) { - case 1000: - bmcr |= BMCR_SPEED1000; - lpagb |= LPA_1000FULL; - break; - case 100: - bmcr |= BMCR_SPEED100; - lpa |= LPA_100FULL; - break; - case 10: - lpa |= LPA_10FULL; - break; - default: - pr_warn("fixed phy: unknown speed\n"); - return -EINVAL; - } - } else { - switch (fp->status.speed) { - case 1000: - bmcr |= BMCR_SPEED1000; - lpagb |= LPA_1000HALF; - break; - case 100: - bmcr |= BMCR_SPEED100; - lpa |= LPA_100HALF; - break; - case 10: - lpa |= LPA_10HALF; - break; - default: - pr_warn("fixed phy: unknown speed\n"); - return -EINVAL; - } - } - - if (fp->status.pause) - lpa |= LPA_PAUSE_CAP; - - if (fp->status.asym_pause) - lpa |= LPA_PAUSE_ASYM; - } - - fp->regs[MII_PHYSID1] = 0; - fp->regs[MII_PHYSID2] = 0; - - fp->regs[MII_BMSR] = bmsr; - fp->regs[MII_BMCR] = bmcr; - fp->regs[MII_LPA] = lpa; - fp->regs[MII_STAT1000] = lpagb; - - return 0; }
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) @@@ -151,29 -58,23 +59,23 @@@ struct fixed_mdio_bus *fmb = bus->priv; struct fixed_phy *fp;
- if (reg_num >= MII_REGS_NUM) - return -1; - - /* We do not support emulating Clause 45 over Clause 22 register reads - * return an error instead of bogus data. - */ - switch (reg_num) { - case MII_MMD_CTRL: - case MII_MMD_DATA: - return -1; - default: - break; - } - list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phy_addr) { - /* Issue callback if user registered it. */ - if (fp->link_update) { - fp->link_update(fp->phydev->attached_dev, - &fp->status); - fixed_phy_update_regs(fp); - } - return fp->regs[reg_num]; + struct fixed_phy_status state; + int s; + + do { + s = read_seqcount_begin(&fp->seqcount); + /* Issue callback if user registered it. */ + if (fp->link_update) { + fp->link_update(fp->phydev->attached_dev, + &fp->status); + fixed_phy_update(fp); + } + state = fp->status; + } while (read_seqcount_retry(&fp->seqcount, s)); + + return swphy_read_reg(reg_num, &state); } }
@@@ -225,6 -126,7 +127,7 @@@ int fixed_phy_update_state(struct phy_d
list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phydev->mdio.addr) { + write_seqcount_begin(&fp->seqcount); #define _UPD(x) if (changed->x) \ fp->status.x = status->x _UPD(link); @@@ -233,7 -135,8 +136,8 @@@ _UPD(pause); _UPD(asym_pause); #undef _UPD - fixed_phy_update_regs(fp); + fixed_phy_update(fp); + write_seqcount_end(&fp->seqcount); return 0; } } @@@ -250,11 -153,15 +154,15 @@@ int fixed_phy_add(unsigned int irq, in struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp;
+ ret = swphy_validate_state(status); + if (ret < 0) + return ret; + fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM;
- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); + seqcount_init(&fp->seqcount);
if (irq != PHY_POLL) fmb->mii_bus->irq[phy_addr] = irq; @@@ -270,25 -177,18 +178,20 @@@ goto err_regs; }
- ret = fixed_phy_update_regs(fp); - if (ret) - goto err_gpio; + fixed_phy_update(fp);
list_add_tail(&fp->node, &fmb->phys);
return 0;
- err_gpio: - if (gpio_is_valid(fp->link_gpio)) - gpio_free(fp->link_gpio); err_regs: kfree(fp); return ret; } EXPORT_SYMBOL_GPL(fixed_phy_add);
+static DEFINE_IDA(phy_fixed_ida); + static void fixed_phy_del(int phy_addr) { struct fixed_mdio_bus *fmb = &platform_fmb; @@@ -300,12 -200,14 +203,12 @@@ if (gpio_is_valid(fp->link_gpio)) gpio_free(fp->link_gpio); kfree(fp); + ida_simple_remove(&phy_fixed_ida, phy_addr); return; } } }
-static int phy_fixed_addr; -static DEFINE_SPINLOCK(phy_fixed_addr_lock); - struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, int link_gpio, @@@ -320,15 -222,17 +223,15 @@@ return ERR_PTR(-EPROBE_DEFER);
/* Get the next available PHY address, up to PHY_MAX_ADDR */ - spin_lock(&phy_fixed_addr_lock); - if (phy_fixed_addr == PHY_MAX_ADDR) { - spin_unlock(&phy_fixed_addr_lock); - return ERR_PTR(-ENOSPC); - } - phy_addr = phy_fixed_addr++; - spin_unlock(&phy_fixed_addr_lock); + phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL); + if (phy_addr < 0) + return ERR_PTR(phy_addr);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio); - if (ret < 0) + if (ret < 0) { + ida_simple_remove(&phy_fixed_ida, phy_addr); return ERR_PTR(ret); + }
phy = get_phy_device(fmb->mii_bus, phy_addr, false); if (IS_ERR(phy)) { @@@ -433,7 -337,6 +336,7 @@@ static void __exit fixed_mdio_bus_exit( list_del(&fp->node); kfree(fp); } + ida_destroy(&phy_fixed_ida); } module_exit(fixed_mdio_bus_exit);
diff --combined drivers/net/team/team.c index fdee772,0a1bb83..f9eebea --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -1203,10 -1203,8 +1203,10 @@@ static int team_port_add(struct team *t goto err_dev_open; }
+ netif_addr_lock_bh(dev); dev_uc_sync_multiple(port_dev, dev); dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev);
err = vlan_vids_add_by_dev(port_dev, dev); if (err) { @@@ -1576,23 -1574,6 +1576,6 @@@ static const struct team_option team_op }, };
- static struct lock_class_key team_netdev_xmit_lock_key; - static struct lock_class_key team_netdev_addr_lock_key; - static struct lock_class_key team_tx_busylock_key; - - static void team_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *unused) - { - lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key); - } - - static void team_set_lockdep_class(struct net_device *dev) - { - lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL); - dev->qdisc_tx_busylock = &team_tx_busylock_key; - }
static int team_init(struct net_device *dev) { @@@ -1628,7 -1609,7 +1611,7 @@@ goto err_options_register; netif_carrier_off(dev);
- team_set_lockdep_class(dev); + netdev_lockdep_set_classes(dev);
return 0;
diff --combined drivers/net/usb/r8152.c index 4e257b8,11178f9..24d36728 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -31,7 -31,7 +31,7 @@@ #define NETNEXT_VERSION "08"
/* Information for net */ -#define NET_VERSION "3" +#define NET_VERSION "4"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers nic_swsd@realtek.com" @@@ -116,7 -116,6 +116,7 @@@ #define USB_TX_DMA 0xd434 #define USB_TOLERANCE 0xd490 #define USB_LPM_CTRL 0xd41a +#define USB_BMU_RESET 0xd4b0 #define USB_UPS_CTRL 0xd800 #define USB_MISC_0 0xd81a #define USB_POWER_CUT 0xd80a @@@ -339,10 -338,6 +339,10 @@@ #define TEST_MODE_DISABLE 0x00000001 #define TX_SIZE_ADJUST1 0x00000100
+/* USB_BMU_RESET */ +#define BMU_RESET_EP_IN 0x01 +#define BMU_RESET_EP_OUT 0x02 + /* USB_UPS_CTRL */ #define POWER_CUT 0x0100
@@@ -607,7 -602,7 +607,7 @@@ struct r8152 struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; - struct delayed_work schedule; + struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ #ifdef CONFIG_PM_SLEEP @@@ -624,6 -619,7 +624,7 @@@ int (*eee_get)(struct r8152 *, struct ethtool_eee *); int (*eee_set)(struct r8152 *, struct ethtool_eee *); bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); } rtl_ops;
int intr_interval; @@@ -632,8 -628,11 +633,11 @@@ u32 tx_qlen; u32 coalesce; u16 ocp_base; + u16 speed; u8 *intr_buff; u8 version; + u8 duplex; + u8 autoneg; };
enum rtl_version { @@@ -1747,7 -1746,7 +1751,7 @@@ static int rx_bottom(struct r8152 *tp, pkt_len -= CRC_SIZE; rx_data += sizeof(struct rx_desc);
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len); + skb = napi_alloc_skb(&tp->napi, pkt_len); if (!skb) { stats->rx_dropped++; goto find_next_rx; @@@ -2174,7 -2173,7 +2178,7 @@@ static void r8153_set_rx_early_timeout( static void r8153_set_rx_early_size(struct r8152 *tp) { u32 mtu = tp->netdev->mtu; - u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4; + u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); } @@@ -2461,17 -2460,6 +2465,17 @@@ static void r8153_teredo_off(struct r81 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); }
+static void rtl_reset_bmu(struct r8152 *tp) +{ + u32 ocp_data; + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET); + ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT); + ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); + ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT; + ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); +} + static void r8152_aldps_en(struct r8152 *tp, bool enable) { if (enable) { @@@ -2515,8 -2503,6 +2519,6 @@@ static void r8152b_exit_oob(struct r815
rxdy_gated_en(tp, true); r8153_teredo_off(tp); - r8152b_hw_phy_cfg(tp); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@@ -2694,10 -2680,7 +2696,8 @@@ static void r8153_first_init(struct r81 ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- r8153_hw_phy_cfg(tp); - rtl8152_nic_reset(tp); + rtl_reset_bmu(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; @@@ -2759,7 -2742,6 +2759,7 @@@ static void r8153_enter_oob(struct r815 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl_disable(tp); + rtl_reset_bmu(tp);
for (i = 0; i < 1000; i++) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); @@@ -2821,7 -2803,6 +2821,7 @@@ static void rtl8153_disable(struct r815 { r8153_aldps_en(tp, false); rtl_disable(tp); + rtl_reset_bmu(tp); r8153_aldps_en(tp, true); usb_enable_lpm(tp->udev); } @@@ -2891,7 -2872,7 +2891,7 @@@ static int rtl8152_set_speed(struct r81 bmcr = BMCR_ANENABLE | BMCR_ANRESTART; }
- if (test_bit(PHY_RESET, &tp->flags)) + if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET;
if (tp->mii.supports_gmii) @@@ -2900,7 -2881,7 +2900,7 @@@ r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr);
- if (test_and_clear_bit(PHY_RESET, &tp->flags)) { + if (bmcr & BMCR_RESET) { int i;
for (i = 0; i < 50; i++) { @@@ -3059,6 -3040,27 +3059,27 @@@ out1 usb_autopm_put_interface(tp->intf); }
+ static void rtl_hw_phy_work_func_t(struct work_struct *work) + { + struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + if (usb_autopm_get_interface(tp->intf) < 0) + return; + + mutex_lock(&tp->control); + + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); + } + #ifdef CONFIG_PM_SLEEP static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) @@@ -3107,9 -3109,6 +3128,6 @@@ static int rtl8152_open(struct net_devi
tp->rtl_ops.up(tp);
- rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); @@@ -3401,11 -3400,15 +3419,11 @@@ static void r8153_init(struct r8152 *tp r8153_power_cut_en(tp, false); r8153_u1u2en(tp, true);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, - PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN | - U1U2_SPDWN_EN | L1_SPDWN_EN); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, - PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN | - TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN | - EEE_SPDWN_EN); + /* MAC clock speed down */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp); r8153_aldps_en(tp, true); @@@ -3533,6 -3536,7 +3551,7 @@@ static int rtl8152_resume(struct usb_in
if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); netif_device_attach(tp->netdev); }
@@@ -3547,10 -3551,6 +3566,6 @@@ napi_enable(&tp->napi); } else { tp->rtl_ops.up(tp); - rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? - SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(tp->netdev); set_bit(WORK_ENABLE, &tp->flags); } @@@ -3680,6 -3680,11 +3695,11 @@@ static int rtl8152_set_settings(struct mutex_lock(&tp->control);
ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); + if (!ret) { + tp->autoneg = cmd->autoneg; + tp->speed = cmd->speed; + tp->duplex = cmd->duplex; + }
mutex_unlock(&tp->control);
@@@ -4137,6 -4142,7 +4157,7 @@@ static int rtl_ops_init(struct r8152 *t ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; + ops->hw_phy_cfg = r8152b_hw_phy_cfg; break;
case RTL_VER_03: @@@ -4152,6 -4158,7 +4173,7 @@@ ops->eee_get = r8153_get_eee; ops->eee_set = r8153_set_eee; ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153_hw_phy_cfg; break;
default: @@@ -4198,6 -4205,7 +4220,7 @@@ static int rtl8152_probe(struct usb_int
mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); + INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; @@@ -4237,9 -4245,14 +4260,14 @@@ break; }
+ tp->autoneg = AUTONEG_ENABLE; + tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; + tp->duplex = DUPLEX_FULL; + intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp);
usb_set_intfdata(intf, tp); @@@ -4285,6 -4298,7 +4313,7 @@@ static void rtl8152_disconnect(struct u
netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); + cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); free_netdev(tp->netdev); } diff --combined drivers/net/vrf.c index 8bd8c7e,b376282..db79fc5 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@@ -35,6 -35,7 +35,7 @@@ #include <net/route.h> #include <net/addrconf.h> #include <net/l3mdev.h> + #include <net/fib_rules.h>
#define RT_FL_TOS(oldflp4) \ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) @@@ -42,9 -43,14 +43,14 @@@ #define DRV_NAME "vrf" #define DRV_VERSION "1.0"
+ #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ + static bool add_fib_rules = true; + struct net_vrf { struct rtable __rcu *rth; + struct rtable __rcu *rth_local; struct rt6_info __rcu *rt6; + struct rt6_info __rcu *rt6_local; u32 tb_id; };
@@@ -54,9 -60,20 +60,20 @@@ struct pcpu_dstats u64 tx_drps; u64 rx_pkts; u64 rx_bytes; + u64 rx_drps; struct u64_stats_sync syncp; };
+ static void vrf_rx_stats(struct net_device *dev, int len) + { + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + dstats->rx_pkts++; + dstats->rx_bytes += len; + u64_stats_update_end(&dstats->syncp); + } + static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) { vrf_dev->stats.tx_errors++; @@@ -91,6 -108,34 +108,34 @@@ static struct rtnl_link_stats64 *vrf_ge return stats; }
+ /* Local traffic destined to local address. Reinsert the packet to rx + * path, similar to loopback handling. + */ + static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, + struct dst_entry *dst) + { + int len = skb->len; + + skb_orphan(skb); + + skb_dst_set(skb, dst); + skb_dst_force(skb); + + /* set pkt_type to avoid skb hitting packet taps twice - + * once on Tx and again in Rx processing + */ + skb->pkt_type = PACKET_LOOPBACK; + + skb->protocol = eth_type_trans(skb, dev); + + if (likely(netif_rx(skb) == NET_RX_SUCCESS)) + vrf_rx_stats(dev, len); + else + this_cpu_inc(dev->dstats->rx_drps); + + return NETDEV_TX_OK; + } + #if IS_ENABLED(CONFIG_IPV6) static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, struct net_device *dev) @@@ -117,8 -162,51 +162,51 @@@ goto err;
skb_dst_drop(skb); + + /* if dst.dev is loopback or the VRF device again this is locally + * originated traffic destined to a local address. Short circuit + * to Rx path using our local dst + */ + if (dst->dev == net->loopback_dev || dst->dev == dev) { + struct net_vrf *vrf = netdev_priv(dev); + struct rt6_info *rt6_local; + + /* release looked up dst and use cached local dst */ + dst_release(dst); + + rcu_read_lock(); + + rt6_local = rcu_dereference(vrf->rt6_local); + if (unlikely(!rt6_local)) { + rcu_read_unlock(); + goto err; + } + + /* Ordering issue: cached local dst is created on newlink + * before the IPv6 initialization. Using the local dst + * requires rt6i_idev to be set so make sure it is. + */ + if (unlikely(!rt6_local->rt6i_idev)) { + rt6_local->rt6i_idev = in6_dev_get(dev); + if (!rt6_local->rt6i_idev) { + rcu_read_unlock(); + goto err; + } + } + + dst = &rt6_local->dst; + dst_hold(dst); + + rcu_read_unlock(); + + return vrf_local_xmit(skb, dev, &rt6_local->dst); + } + skb_dst_set(skb, dst);
+ /* strip the ethernet header added for pass through VRF device */ + __skb_pull(skb, skb_network_offset(skb)); + ret = ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(ret))) dev->stats.tx_errors++; @@@ -139,29 -227,6 +227,6 @@@ static netdev_tx_t vrf_process_v6_outbo } #endif
- static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4, - struct net_device *vrf_dev) - { - struct rtable *rt; - int err = 1; - - rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL); - if (IS_ERR(rt)) - goto out; - - /* TO-DO: what about broadcast ? */ - if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { - ip_rt_put(rt); - goto out; - } - - skb_dst_drop(skb); - skb_dst_set(skb, &rt->dst); - err = 0; - out: - return err; - } - static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { @@@ -176,9 -241,51 +241,51 @@@ FLOWI_FLAG_SKIP_NH_OIF, .daddr = ip4h->daddr, }; + struct net *net = dev_net(vrf_dev); + struct rtable *rt; + + rt = ip_route_output_flow(net, &fl4, NULL); + if (IS_ERR(rt)) + goto err;
- if (vrf_send_v4_prep(skb, &fl4, vrf_dev)) + if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { + ip_rt_put(rt); goto err; + } + + skb_dst_drop(skb); + + /* if dst.dev is loopback or the VRF device again this is locally + * originated traffic destined to a local address. Short circuit + * to Rx path using our local dst + */ + if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) { + struct net_vrf *vrf = netdev_priv(vrf_dev); + struct rtable *rth_local; + struct dst_entry *dst = NULL; + + ip_rt_put(rt); + + rcu_read_lock(); + + rth_local = rcu_dereference(vrf->rth_local); + if (likely(rth_local)) { + dst = &rth_local->dst; + dst_hold(dst); + } + + rcu_read_unlock(); + + if (unlikely(!dst)) + goto err; + + return vrf_local_xmit(skb, vrf_dev, dst); + } + + skb_dst_set(skb, &rt->dst); + + /* strip the ethernet header added for pass through VRF device */ + __skb_pull(skb, skb_network_offset(skb));
if (!ip4h->saddr) { ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, @@@ -200,9 -307,6 +307,6 @@@ err
static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) { - /* strip the ethernet header added for pass through VRF device */ - __skb_pull(skb, skb_network_offset(skb)); - switch (skb->protocol) { case htons(ETH_P_IP): return vrf_process_v4_outbound(skb, dev); @@@ -274,45 -378,92 +378,92 @@@ static int vrf_output6(struct net *net }
/* holding rtnl */ - static void vrf_rt6_release(struct net_vrf *vrf) + static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); + struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local); + struct net *net = dev_net(dev); + struct dst_entry *dst;
- rcu_assign_pointer(vrf->rt6, NULL); + RCU_INIT_POINTER(vrf->rt6, NULL); + RCU_INIT_POINTER(vrf->rt6_local, NULL); + synchronize_rcu(); + + /* move dev in dst's to loopback so this VRF device can be deleted + * - based on dst_ifdown + */ + if (rt6) { + dst = &rt6->dst; + dev_put(dst->dev); + dst->dev = net->loopback_dev; + dev_hold(dst->dev); + dst_release(dst); + }
- if (rt6) - dst_release(&rt6->dst); + if (rt6_local) { + if (rt6_local->rt6i_idev) + in6_dev_put(rt6_local->rt6i_idev); + + dst = &rt6_local->dst; + dev_put(dst->dev); + dst->dev = net->loopback_dev; + dev_hold(dst->dev); + dst_release(dst); + } }
static int vrf_rt6_create(struct net_device *dev) { + int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE; struct net_vrf *vrf = netdev_priv(dev); struct net *net = dev_net(dev); struct fib6_table *rt6i_table; - struct rt6_info *rt6; + struct rt6_info *rt6, *rt6_local; int rc = -ENOMEM;
+ /* IPv6 can be CONFIG enabled and then disabled runtime */ + if (!ipv6_mod_enabled()) + return 0; + rt6i_table = fib6_new_table(net, vrf->tb_id); if (!rt6i_table) goto out;
- rt6 = ip6_dst_alloc(net, dev, - DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); + /* create a dst for routing packets out a VRF device */ + rt6 = ip6_dst_alloc(net, dev, flags); if (!rt6) goto out;
dst_hold(&rt6->dst);
rt6->rt6i_table = rt6i_table; - rt6->dst.output = vrf_output6; + rt6->dst.output = vrf_output6; + + /* create a dst for local routing - packets sent locally + * to local address via the VRF device as a loopback + */ + rt6_local = ip6_dst_alloc(net, dev, flags); + if (!rt6_local) { + dst_release(&rt6->dst); + goto out; + } + + dst_hold(&rt6_local->dst); + + rt6_local->rt6i_idev = in6_dev_get(dev); + rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL; + rt6_local->rt6i_table = rt6i_table; + rt6_local->dst.input = ip6_input; + rcu_assign_pointer(vrf->rt6, rt6); + rcu_assign_pointer(vrf->rt6_local, rt6_local);
rc = 0; out: return rc; } #else - static void vrf_rt6_release(struct net_vrf *vrf) + static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { }
@@@ -381,32 -532,66 +532,66 @@@ static int vrf_output(struct net *net, }
/* holding rtnl */ - static void vrf_rtable_release(struct net_vrf *vrf) + static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) { struct rtable *rth = rtnl_dereference(vrf->rth); + struct rtable *rth_local = rtnl_dereference(vrf->rth_local); + struct net *net = dev_net(dev); + struct dst_entry *dst;
- rcu_assign_pointer(vrf->rth, NULL); + RCU_INIT_POINTER(vrf->rth, NULL); + RCU_INIT_POINTER(vrf->rth_local, NULL); + synchronize_rcu(); + + /* move dev in dst's to loopback so this VRF device can be deleted + * - based on dst_ifdown + */ + if (rth) { + dst = &rth->dst; + dev_put(dst->dev); + dst->dev = net->loopback_dev; + dev_hold(dst->dev); + dst_release(dst); + }
- if (rth) - dst_release(&rth->dst); + if (rth_local) { + dst = &rth_local->dst; + dev_put(dst->dev); + dst->dev = net->loopback_dev; + dev_hold(dst->dev); + dst_release(dst); + } }
static int vrf_rtable_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct rtable *rth; + struct rtable *rth, *rth_local;
if (!fib_new_table(dev_net(dev), vrf->tb_id)) return -ENOMEM;
+ /* create a dst for routing packets out through a VRF device */ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); if (!rth) return -ENOMEM;
+ /* create a dst for local ingress routing - packets sent locally + * to local address via the VRF device as a loopback + */ + rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0); + if (!rth_local) { + dst_release(&rth->dst); + return -ENOMEM; + } + - rth->dst.output = vrf_output; + rth->dst.output = vrf_output; rth->rt_table_id = vrf->tb_id;
+ rth_local->rt_table_id = vrf->tb_id; + rcu_assign_pointer(vrf->rth, rth); + rcu_assign_pointer(vrf->rth_local, rth_local);
return 0; } @@@ -477,8 -662,8 +662,8 @@@ static void vrf_dev_uninit(struct net_d struct net_device *port_dev; struct list_head *iter;
- vrf_rtable_release(vrf); - vrf_rt6_release(vrf); + vrf_rtable_release(dev, vrf); + vrf_rt6_release(dev, vrf);
netdev_for_each_lower_dev(dev, port_dev, iter) vrf_del_slave(dev, port_dev); @@@ -504,10 -689,16 +689,16 @@@ static int vrf_dev_init(struct net_devi
dev->flags = IFF_MASTER | IFF_NOARP;
+ /* MTU is irrelevant for VRF device; set to 64k similar to lo */ + dev->mtu = 64 * 1024; + + /* similarly, oper state is irrelevant; set to up to avoid confusion */ + dev->operstate = IF_OPER_UP; + netdev_lockdep_set_classes(dev); return 0;
out_rth: - vrf_rtable_release(vrf); + vrf_rtable_release(dev, vrf); out_stats: free_percpu(dev->dstats); dev->dstats = NULL; @@@ -623,11 -814,78 +814,78 @@@ out return rc; }
+ static struct rt6_info *vrf_ip6_route_lookup(struct net *net, + const struct net_device *dev, + struct flowi6 *fl6, + int ifindex, + int flags) + { + struct net_vrf *vrf = netdev_priv(dev); + struct fib6_table *table = NULL; + struct rt6_info *rt6; + + rcu_read_lock(); + + /* fib6_table does not have a refcnt and can not be freed */ + rt6 = rcu_dereference(vrf->rt6); + if (likely(rt6)) + table = rt6->rt6i_table; + + rcu_read_unlock(); + + if (!table) + return NULL; + + return ip6_pol_route(net, table, ifindex, fl6, flags); + } + + static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, + int ifindex) + { + const struct ipv6hdr *iph = ipv6_hdr(skb); + struct flowi6 fl6 = { + .daddr = iph->daddr, + .saddr = iph->saddr, + .flowlabel = ip6_flowinfo(iph), + .flowi6_mark = skb->mark, + .flowi6_proto = iph->nexthdr, + .flowi6_iif = ifindex, + }; + struct net *net = dev_net(vrf_dev); + struct rt6_info *rt6; + + rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, + RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); + if (unlikely(!rt6)) + return; + + if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst)) + return; + + skb_dst_set(skb, &rt6->dst); + } + static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { - /* if packet is NDISC keep the ingress interface */ - if (!ipv6_ndisc_frame(skb)) { + int orig_iif = skb->skb_iif; + bool need_strict; + + /* loopback traffic; do not push through packet taps again. + * Reset pkt_type for upper layers to process skb + */ + if (skb->pkt_type == PACKET_LOOPBACK) { + skb->dev = vrf_dev; + skb->skb_iif = vrf_dev->ifindex; + skb->pkt_type = PACKET_HOST; + goto out; + } + + /* if packet is NDISC or addressed to multicast or link-local + * then keep the ingress interface + */ + need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); + if (!ipv6_ndisc_frame(skb) && !need_strict) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex;
@@@ -638,6 -896,10 +896,10 @@@ IP6CB(skb)->flags |= IP6SKB_L3SLAVE; }
+ if (need_strict) + vrf_ip6_input_dst(skb, vrf_dev, orig_iif); + + out: return skb; }
@@@ -655,10 -917,19 +917,19 @@@ static struct sk_buff *vrf_ip_rcv(struc skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex;
+ /* loopback traffic; do not push through packet taps again. + * Reset pkt_type for upper layers to process skb + */ + if (skb->pkt_type == PACKET_LOOPBACK) { + skb->pkt_type = PACKET_HOST; + goto out; + } + skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); skb_pull(skb, skb->mac_len);
+ out: return skb; }
@@@ -679,13 -950,37 +950,37 @@@ static struct sk_buff *vrf_l3_rcv(struc
#if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, - const struct flowi6 *fl6) + struct flowi6 *fl6) { + bool need_strict = rt6_need_strict(&fl6->daddr); + struct net_vrf *vrf = netdev_priv(dev); + struct net *net = dev_net(dev); struct dst_entry *dst = NULL; + struct rt6_info *rt;
- if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) { - struct net_vrf *vrf = netdev_priv(dev); - struct rt6_info *rt; + /* send to link-local or multicast address */ + if (need_strict) { + int flags = RT6_LOOKUP_F_IFACE; + + /* VRF device does not have a link-local address and + * sending packets to link-local or mcast addresses over + * a VRF device does not make sense + */ + if (fl6->flowi6_oif == dev->ifindex) { + struct dst_entry *dst = &net->ipv6.ip6_null_entry->dst; + + dst_hold(dst); + return dst; + } + + if (!ipv6_addr_any(&fl6->saddr)) + flags |= RT6_LOOKUP_F_HAS_SADDR; + + rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags); + if (rt) + dst = &rt->dst; + + } else if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
rcu_read_lock();
@@@ -698,8 -993,52 +993,52 @@@ rcu_read_unlock(); }
+ /* make sure oif is set to VRF device for lookup */ + if (!need_strict) + fl6->flowi6_oif = dev->ifindex; + return dst; } + + /* called under rcu_read_lock */ + static int vrf_get_saddr6(struct net_device *dev, const struct sock *sk, + struct flowi6 *fl6) + { + struct net *net = dev_net(dev); + struct dst_entry *dst; + struct rt6_info *rt; + int err; + + if (rt6_need_strict(&fl6->daddr)) { + rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, + RT6_LOOKUP_F_IFACE); + if (unlikely(!rt)) + return 0; + + dst = &rt->dst; + } else { + __u8 flags = fl6->flowi6_flags; + + fl6->flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC; + fl6->flowi6_flags |= FLOWI_FLAG_SKIP_NH_OIF; + + dst = ip6_route_output(net, sk, fl6); + rt = (struct rt6_info *)dst; + + fl6->flowi6_flags = flags; + } + + err = dst->error; + if (!err) { + err = ip6_route_get_saddr(net, rt, &fl6->daddr, + sk ? inet6_sk(sk)->srcprefs : 0, + &fl6->saddr); + } + + dst_release(dst); + + return err; + } #endif
static const struct l3mdev_ops vrf_l3mdev_ops = { @@@ -709,6 -1048,7 +1048,7 @@@ .l3mdev_l3_rcv = vrf_l3_rcv, #if IS_ENABLED(CONFIG_IPV6) .l3mdev_get_rt6_dst = vrf_get_rt6_dst, + .l3mdev_get_saddr6 = vrf_get_saddr6, #endif };
@@@ -723,6 -1063,94 +1063,94 @@@ static const struct ethtool_ops vrf_eth .get_drvinfo = vrf_get_drvinfo, };
+ static inline size_t vrf_fib_rule_nl_size(void) + { + size_t sz; + + sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); + sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ + sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ + + return sz; + } + + static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) + { + struct fib_rule_hdr *frh; + struct nlmsghdr *nlh; + struct sk_buff *skb; + int err; + + if (family == AF_INET6 && !ipv6_mod_enabled()) + return 0; + + skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0); + if (!nlh) + goto nla_put_failure; + + /* rule only needs to appear once */ + nlh->nlmsg_flags &= NLM_F_EXCL; + + frh = nlmsg_data(nlh); + memset(frh, 0, sizeof(*frh)); + frh->family = family; + frh->action = FR_ACT_TO_TBL; + + if (nla_put_u32(skb, FRA_L3MDEV, 1)) + goto nla_put_failure; + + if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF)) + goto nla_put_failure; + + nlmsg_end(skb, nlh); + + /* fib_nl_{new,del}rule handling looks for net from skb->sk */ + skb->sk = dev_net(dev)->rtnl; + if (add_it) { + err = fib_nl_newrule(skb, nlh); + if (err == -EEXIST) + err = 0; + } else { + err = fib_nl_delrule(skb, nlh); + if (err == -ENOENT) + err = 0; + } + nlmsg_free(skb); + + return err; + + nla_put_failure: + nlmsg_free(skb); + + return -EMSGSIZE; + } + + static int vrf_add_fib_rules(const struct net_device *dev) + { + int err; + + err = vrf_fib_rule(dev, AF_INET, true); + if (err < 0) + goto out_err; + + err = vrf_fib_rule(dev, AF_INET6, true); + if (err < 0) + goto ipv6_err; + + return 0; + + ipv6_err: + vrf_fib_rule(dev, AF_INET, false); + + out_err: + netdev_err(dev, "Failed to add FIB rules.\n"); + return err; + } + static void vrf_setup(struct net_device *dev) { ether_setup(dev); @@@ -741,6 -1169,20 +1169,20 @@@
/* don't allow vrf devices to change network namespaces. */ dev->features |= NETIF_F_NETNS_LOCAL; + + /* does not make sense for a VLAN to be added to a vrf device */ + dev->features |= NETIF_F_VLAN_CHALLENGED; + + /* enable offload features */ + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM; + dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; + + dev->hw_features = dev->features; + dev->hw_enc_features = dev->features; + + /* default to no qdisc; user can add if desired */ + dev->priv_flags |= IFF_NO_QUEUE; }
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) @@@ -763,6 -1205,7 +1205,7 @@@ static int vrf_newlink(struct net *src_ struct nlattr *tb[], struct nlattr *data[]) { struct net_vrf *vrf = netdev_priv(dev); + int err;
if (!data || !data[IFLA_VRF_TABLE]) return -EINVAL; @@@ -771,7 -1214,21 +1214,21 @@@
dev->priv_flags |= IFF_L3MDEV_MASTER;
- return register_netdevice(dev); + err = register_netdevice(dev); + if (err) + goto out; + + if (add_fib_rules) { + err = vrf_add_fib_rules(dev); + if (err) { + unregister_netdevice(dev); + goto out; + } + add_fib_rules = false; + } + + out: + return err; }
static size_t vrf_nl_getsize(const struct net_device *dev) diff --combined drivers/net/vxlan.c index b3b9db6,abb9cd2..ae7455d --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@@ -11,32 -11,18 +11,18 @@@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h> - #include <linux/types.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> - #include <linux/skbuff.h> - #include <linux/rculist.h> - #include <linux/netdevice.h> - #include <linux/in.h> - #include <linux/ip.h> #include <linux/udp.h> #include <linux/igmp.h> - #include <linux/etherdevice.h> #include <linux/if_ether.h> - #include <linux/if_vlan.h> - #include <linux/hash.h> #include <linux/ethtool.h> #include <net/arp.h> #include <net/ndisc.h> #include <net/ip.h> - #include <net/ip_tunnels.h> #include <net/icmp.h> - #include <net/udp.h> - #include <net/udp_tunnel.h> #include <net/rtnetlink.h> - #include <net/route.h> - #include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/net_namespace.h> #include <net/netns/generic.h> @@@ -44,12 -30,9 +30,9 @@@ #include <net/protocol.h>
#if IS_ENABLED(CONFIG_IPV6) - #include <net/ipv6.h> - #include <net/addrconf.h> #include <net/ip6_tunnel.h> #include <net/ip6_checksum.h> #endif - #include <net/dst_metadata.h>
#define VXLAN_VERSION "0.1"
@@@ -619,42 -602,6 +602,6 @@@ static int vxlan_gro_complete(struct so return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); }
- /* Notify netdevs that UDP port started listening */ - static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) - { - struct net_device *dev; - struct sock *sk = vs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = vxlan_get_sk_family(vs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_add_vxlan_port) - dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, - port); - } - rcu_read_unlock(); - } - - /* Notify netdevs that UDP port is no more listening */ - static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) - { - struct net_device *dev; - struct sock *sk = vs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = vxlan_get_sk_family(vs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_del_vxlan_port) - dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family, - port); - } - rcu_read_unlock(); - } - /* Add new entry to forwarding table -- assumes lock held */ static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, @@@ -1050,7 -997,10 +997,10 @@@ static bool __vxlan_sock_release_prep(s vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); - vxlan_notify_del_rx_port(vs); + udp_tunnel_notify_del_rx_port(vs->sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); spin_unlock(&vn->sock_lock);
return true; @@@ -2525,30 -2475,24 +2475,24 @@@ static struct device_type vxlan_type = .name = "vxlan", };
- /* Calls the ndo_add_vxlan_port of the caller in order to + /* Calls the ndo_add_udp_enc_port of the caller in order to * supply the listening VXLAN udp ports. Callers are expected - * to implement the ndo_add_vxlan_port. + * to implement the ndo_add_udp_enc_port. */ static void vxlan_push_rx_ports(struct net_device *dev) { struct vxlan_sock *vs; struct net *net = dev_net(dev); struct vxlan_net *vn = net_generic(net, vxlan_net_id); - sa_family_t sa_family; - __be16 port; unsigned int i;
- if (!dev->netdev_ops->ndo_add_vxlan_port) - return; - spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { - hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { - port = inet_sk(vs->sock->sk)->inet_sport; - sa_family = vxlan_get_sk_family(vs); - dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, - port); - } + hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) + udp_tunnel_push_rx_port(dev, vs->sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); } spin_unlock(&vn->sock_lock); } @@@ -2750,7 -2694,10 +2694,10 @@@ static struct vxlan_sock *vxlan_socket_
spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); - vxlan_notify_add_rx_port(vs); + udp_tunnel_notify_add_rx_port(sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */ @@@ -2952,6 -2899,30 +2899,6 @@@ static int vxlan_dev_configure(struct n return 0; }
-struct net_device *vxlan_dev_create(struct net *net, const char *name, - u8 name_assign_type, struct vxlan_config *conf) -{ - struct nlattr *tb[IFLA_MAX+1]; - struct net_device *dev; - int err; - - memset(&tb, 0, sizeof(tb)); - - dev = rtnl_create_link(net, name, name_assign_type, - &vxlan_link_ops, tb); - if (IS_ERR(dev)) - return dev; - - err = vxlan_dev_configure(net, dev, conf); - if (err < 0) { - free_netdev(dev); - return ERR_PTR(err); - } - - return dev; -} -EXPORT_SYMBOL_GPL(vxlan_dev_create); - static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { @@@ -3244,40 -3215,6 +3191,40 @@@ static struct rtnl_link_ops vxlan_link_ .get_link_net = vxlan_get_link_net, };
+struct net_device *vxlan_dev_create(struct net *net, const char *name, + u8 name_assign_type, + struct vxlan_config *conf) +{ + struct nlattr *tb[IFLA_MAX + 1]; + struct net_device *dev; + int err; + + memset(&tb, 0, sizeof(tb)); + + dev = rtnl_create_link(net, name, name_assign_type, + &vxlan_link_ops, tb); + if (IS_ERR(dev)) + return dev; + + err = vxlan_dev_configure(net, dev, conf); + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } + + err = rtnl_configure_link(dev, NULL); + if (err < 0) { + LIST_HEAD(list_kill); + + vxlan_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); + return ERR_PTR(err); + } + + return dev; +} +EXPORT_SYMBOL_GPL(vxlan_dev_create); + static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, struct net_device *dev) { @@@ -3308,7 -3245,7 +3255,7 @@@ static int vxlan_netdevice_event(struc
if (event == NETDEV_UNREGISTER) vxlan_handle_lowerdev_unregister(vn, dev); - else if (event == NETDEV_OFFLOAD_PUSH_VXLAN) + else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) vxlan_push_rx_ports(dev);
return NOTIFY_DONE; diff --combined drivers/net/wireless/ath/ath10k/core.c index a92a0ba,c6291c2..dfb3db0 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@@ -18,6 -18,7 +18,7 @@@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> + #include <asm/byteorder.h>
#include "core.h" #include "mac.h" @@@ -55,7 -56,7 +56,7 @@@ static const struct ath10k_hw_params at .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@@ -69,6 -70,25 +70,25 @@@ }, }, { + .id = QCA9887_HW_1_0_VERSION, + .dev_id = QCA9887_1_0_DEVICE_ID, + .name = "qca9887 hw1.0", + .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, + .cal_data_len = 2116, + .fw = { + .dir = QCA9887_HW_1_0_FW_DIR, + .board = QCA9887_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA9887_BOARD_DATA_SZ, + .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ, + }, + }, + { .id = QCA6174_HW_2_1_VERSION, .dev_id = QCA6164_2_1_DEVICE_ID, .name = "qca6164 hw2.1", @@@ -148,6 -168,7 +168,7 @@@ .uart_pin = 7, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, @@@ -163,6 -184,29 +184,29 @@@ }, }, { + .id = QCA9984_HW_1_0_DEV_VERSION, + .dev_id = QCA9984_1_0_DEVICE_ID, + .name = "qca9984/qca9994 hw1.0", + .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, + .cal_data_len = 12064, + .fw = { + .dir = QCA9984_HW_1_0_FW_DIR, + .board = QCA9984_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + }, + { .id = QCA9377_HW_1_0_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, .name = "qca9377 hw1.0", @@@ -202,9 -246,10 +246,10 @@@ .name = "qca4019 hw1.0", .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x0010000, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 125000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, @@@ -236,6 -281,7 +281,7 @@@ static const char *const ath10k_core_fw [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", + [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param", };
static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@@ -531,6 -577,35 +577,35 @@@ out return ret; }
+ static int ath10k_download_cal_eeprom(struct ath10k *ar) + { + size_t data_len; + void *data = NULL; + int ret; + + ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); + if (ret) { + if (ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = 0; + + out_free: + kfree(data); + + return ret; + } + static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) { u32 result, address; @@@ -1083,7 -1158,7 +1158,7 @@@ int ath10k_core_fetch_firmware_api_n(st }
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", - ar->running_fw->fw_file.fw_features, + fw_file->fw_features, sizeof(fw_file->fw_features)); break; case ATH10K_FW_IE_FW_IMAGE: @@@ -1293,7 -1368,17 +1368,17 @@@ static int ath10k_download_cal_data(str }
ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot did not find DT entry, try OTP next: %d\n", + "boot did not find DT entry, try target EEPROM next: %d\n", + ret); + + ret = ath10k_download_cal_eeprom(ar); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_EEPROM; + goto done; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find target EEPROM entry, try OTP next: %d\n", ret);
ret = ath10k_download_and_run_otp(ar); @@@ -1733,6 -1818,16 +1818,16 @@@ int ath10k_core_start(struct ath10k *ar if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+ /* 10.4 firmware supports BT-Coex without reloading firmware + * via pdev param. To support Bluetooth coexistence pdev param, + * WMI_COEX_GPIO_SUPPORT of extended resource config should be + * enabled always. + */ + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) + val |= WMI_10_4_COEX_GPIO_SUPPORT; + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, @@@ -2062,6 -2157,7 +2157,7 @@@ struct ath10k *ath10k_core_create(size_
switch (hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: ar->regs = &qca988x_regs; ar->hw_values = &qca988x_values; break; @@@ -2071,6 -2167,7 +2167,7 @@@ ar->hw_values = &qca6174_values; break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: ar->regs = &qca99x0_regs; ar->hw_values = &qca99x0_values; break; @@@ -2159,5 -2256,5 +2256,5 @@@ void ath10k_core_destroy(struct ath10k EXPORT_SYMBOL(ath10k_core_destroy);
MODULE_AUTHOR("Qualcomm Atheros"); - MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); + MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); diff --combined drivers/net/wireless/ath/ath10k/htt_rx.c index 813cdd2,3b35c7a..80e6453 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@@ -748,7 -748,7 +748,7 @@@ ath10k_htt_rx_h_peer_channel(struct ath if (WARN_ON_ONCE(!arvif)) return NULL;
- if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) + if (WARN_ON_ONCE(ath10k_mac_vif_chan(arvif->vif, &def))) return NULL;
return def.chan; @@@ -939,7 -939,8 +939,8 @@@ static void ath10k_process_rx(struct at is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? "mcast" : "ucast", (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, - status->flag == 0 ? "legacy" : "", + (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ? + "legacy" : "", status->flag & RX_FLAG_HT ? "ht" : "", status->flag & RX_FLAG_VHT ? "vht" : "", status->flag & RX_FLAG_40MHZ ? "40" : "", @@@ -1904,6 -1905,7 +1905,6 @@@ static void ath10k_htt_rx_in_ord_ind(st return; } } - ath10k_htt_rx_msdu_buff_replenish(htt); }
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, @@@ -2181,34 -2183,6 +2182,6 @@@ static void ath10k_htt_rx_tx_mode_switc ath10k_mac_tx_push_pending(ar); }
- static inline enum nl80211_band phy_mode_to_band(u32 phy_mode) - { - enum nl80211_band band; - - switch (phy_mode) { - case MODE_11A: - case MODE_11NA_HT20: - case MODE_11NA_HT40: - case MODE_11AC_VHT20: - case MODE_11AC_VHT40: - case MODE_11AC_VHT80: - band = NL80211_BAND_5GHZ; - break; - case MODE_11G: - case MODE_11B: - case MODE_11GONLY: - case MODE_11NG_HT20: - case MODE_11NG_HT40: - case MODE_11AC_VHT20_2G: - case MODE_11AC_VHT40_2G: - case MODE_11AC_VHT80_2G: - default: - band = NL80211_BAND_2GHZ; - } - - return band; - } - void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { bool release; @@@ -2290,7 -2264,6 +2263,6 @@@ bool ath10k_htt_t2h_msg_handler(struct ath10k_htt_tx_mgmt_dec_pending(htt); spin_unlock_bh(&htt->tx_lock); } - ath10k_mac_tx_push_pending(ar); break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: @@@ -2441,8 -2414,6 +2413,6 @@@ static void ath10k_htt_txrx_compl_task( dev_kfree_skb_any(skb); }
- ath10k_mac_tx_push_pending(ar); - num_mpdus = atomic_read(&htt->num_mpdus_ready);
while (num_mpdus) { diff --combined drivers/net/wireless/ath/ath10k/mac.c index 4040f94,3a170b1..d4b7a16 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@@ -62,6 -62,32 +62,32 @@@ static struct ieee80211_rate ath10k_rat { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, };
+ static struct ieee80211_rate ath10k_rates_rev2[] = { + { .bitrate = 10, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, + { .bitrate = 20, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + + { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, + { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, + { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, + { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, + { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, + { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, + { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, + { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, + }; + #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) @@@ -70,6 -96,9 +96,9 @@@ #define ath10k_g_rates (ath10k_rates + 0) #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+ #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) + #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) + static bool ath10k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { @@@ -679,10 -708,10 +708,10 @@@ static int ath10k_peer_create(struct at
peer = ath10k_peer_find(ar, vdev_id, addr); if (!peer) { + spin_unlock_bh(&ar->data_lock); ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", addr, vdev_id); ath10k_wmi_peer_delete(ar, vdev_id, addr); - spin_unlock_bh(&ar->data_lock); return -ENOENT; }
@@@ -3781,6 -3810,9 +3810,9 @@@ void ath10k_mac_tx_push_pending(struct int ret; int max;
+ if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) + return; + spin_lock_bh(&ar->txqs_lock); rcu_read_lock();
@@@ -4051,9 -4083,7 +4083,7 @@@ static void ath10k_mac_op_wake_tx_queue list_add_tail(&artxq->list, &ar->txqs); spin_unlock_bh(&ar->txqs_lock);
- if (ath10k_mac_tx_can_push(hw, txq)) - tasklet_schedule(&ar->htt.txrx_compl_task); - + ath10k_mac_tx_push_pending(ar); ath10k_htt_tx_txq_update(hw, txq); }
@@@ -4467,6 -4497,19 +4497,19 @@@ static int ath10k_start(struct ieee8021 } }
+ param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, param, 0); + if (ret) { + ath10k_warn(ar, + "failed to set btcoex param: %d\n", ret); + goto err_core_stop; + } + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + } + ar->num_started_vdevs = 0; ath10k_regd_update(ar);
@@@ -7695,8 -7738,14 +7738,14 @@@ int ath10k_mac_register(struct ath10k * band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); band->channels = channels; - band->n_bitrates = ath10k_g_rates_size; - band->bitrates = ath10k_g_rates; + + if (ar->hw_params.cck_rate_map_rev2) { + band->n_bitrates = ath10k_g_rates_rev2_size; + band->bitrates = ath10k_g_rates_rev2; + } else { + band->n_bitrates = ath10k_g_rates_size; + band->bitrates = ath10k_g_rates; + }
ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; } diff --combined include/linux/acpi.h index ad2564d,4d4bb49..75c7b5b --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@@ -208,6 -208,7 +208,6 @@@ void acpi_boot_table_init (void) int acpi_mps_check (void); int acpi_numa_init (void);
-void early_acpi_table_init(void *data, size_t size); int acpi_table_init (void); int acpi_table_parse(char *id, acpi_tbl_table_handler handler); int __init acpi_parse_entries(char *id, unsigned long table_size, @@@ -231,26 -232,12 +231,26 @@@ int acpi_table_parse_madt(enum acpi_mad int acpi_parse_mcfg (struct acpi_table_header *header); void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
-/* the following four functions are architecture-dependent */ +/* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); + +#if defined(CONFIG_X86) || defined(CONFIG_IA64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); +#else +static inline void +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) { } +#endif + void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); + +#ifdef CONFIG_ARM64 +void acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa); +#else +static inline void +acpi_numa_gicc_affinity_init(struct acpi_srat_gicc_affinity *pa) { } +#endif + int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); -void acpi_numa_arch_fixup(void);
#ifndef PHYS_CPUID_INVALID typedef u32 phys_cpuid_t; @@@ -556,6 -543,11 +556,11 @@@ struct platform_device *acpi_create_pla
struct fwnode_handle;
+ static inline bool acpi_dev_found(const char *hid) + { + return false; + } + static inline bool is_acpi_node(struct fwnode_handle *fwnode) { return false; @@@ -601,6 -593,7 +606,6 @@@ static inline const char *acpi_dev_name return NULL; }
-static inline void early_acpi_table_init(void *data, size_t size) { } static inline void acpi_early_init(void) { } static inline void acpi_subsystem_init(void) { }
@@@ -666,6 -659,14 +671,14 @@@ static inline bool acpi_driver_match_de return false; }
+ static inline union acpi_object *acpi_evaluate_dsm(acpi_handle handle, + const u8 *uuid, + int rev, int func, + union acpi_object *argv4) + { + return NULL; + } + static inline int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) { @@@ -1009,10 -1010,4 +1022,10 @@@ static inline struct fwnode_handle *acp #define acpi_probe_device_table(t) ({ int __r = 0; __r;}) #endif
+#ifdef CONFIG_ACPI_TABLE_UPGRADE +void acpi_table_upgrade(void); +#else +static inline void acpi_table_upgrade(void) { } +#endif + #endif /*_LINUX_ACPI_H*/ diff --combined include/linux/bpf.h index 0de4de6,9adfef6..8411032 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@@ -11,14 -11,17 +11,17 @@@ #include <linux/workqueue.h> #include <linux/file.h> #include <linux/percpu.h> + #include <linux/err.h>
+ struct perf_event; struct bpf_map;
/* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ struct bpf_map *(*map_alloc)(union bpf_attr *attr); - void (*map_free)(struct bpf_map *); + void (*map_release)(struct bpf_map *map, struct file *map_file); + void (*map_free)(struct bpf_map *map); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
/* funcs callable from userspace and from eBPF programs */ @@@ -27,8 -30,9 +30,9 @@@ int (*map_delete_elem)(struct bpf_map *map, void *key);
/* funcs called by prog_array and perf_event_array map */ - void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); - void (*map_fd_put_ptr) (void *ptr); + void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, + int fd); + void (*map_fd_put_ptr)(void *ptr); };
struct bpf_map { @@@ -111,31 -115,6 +115,31 @@@ enum bpf_access_type BPF_WRITE = 2 };
+/* types of values stored in eBPF registers */ +enum bpf_reg_type { + NOT_INIT = 0, /* nothing was written into register */ + UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */ + PTR_TO_CTX, /* reg points to bpf_context */ + CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ + PTR_TO_MAP_VALUE, /* reg points to map element value */ + PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ + FRAME_PTR, /* reg == frame_pointer */ + PTR_TO_STACK, /* reg == frame_pointer + imm */ + CONST_IMM, /* constant integer value */ + + /* PTR_TO_PACKET represents: + * skb->data + * skb->data + imm + * skb->data + (u16) var + * skb->data + (u16) var + imm + * if (range > 0) then [ptr, ptr + range - off) is safe to access + * if (id > 0) means that some 'var' was added + * if (off > 0) menas that 'imm' was added + */ + PTR_TO_PACKET, + PTR_TO_PACKET_END, /* skb->data + headlen */ +}; + struct bpf_prog;
struct bpf_verifier_ops { @@@ -145,8 -124,7 +149,8 @@@ /* return true if 'size' wide access at offset 'off' within bpf_context * with 'type' (read or write) is allowed */ - bool (*is_valid_access)(int off, int size, enum bpf_access_type type); + bool (*is_valid_access)(int off, int size, enum bpf_access_type type, + enum bpf_reg_type *reg_type);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, int src_reg, int ctx_off, @@@ -189,11 -167,19 +193,19 @@@ struct bpf_array void __percpu *pptrs[0] __aligned(8); }; }; + #define MAX_TAIL_CALL_CNT 32
+ struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct rcu_head rcu; + }; + u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); - void bpf_fd_array_map_clear(struct bpf_map *map); + bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@@ -231,8 -217,13 +243,13 @@@ int bpf_percpu_hash_update(struct bpf_m u64 flags); int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, u64 flags); + int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); + void bpf_fd_array_map_clear(struct bpf_map *map); + /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. * Best-effort only. No barriers here, since it _will_ race with concurrent @@@ -264,10 -255,6 +281,10 @@@ static inline struct bpf_prog *bpf_prog static inline void bpf_prog_put(struct bpf_prog *prog) { } + +static inline void bpf_prog_put_rcu(struct bpf_prog *prog) +{ +} #endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */ diff --combined include/linux/mlx4/device.h index d46a0e7,4dbc145..e6f6910 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@@ -466,7 -466,6 +466,7 @@@ enum enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, + MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2, };
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ @@@ -536,6 -535,7 +536,7 @@@ struct mlx4_caps int max_rq_desc_sz; int max_qp_init_rdma; int max_qp_dest_rdma; + int max_tc_eth; u32 *qp0_qkey; u32 *qp0_proxy; u32 *qp1_proxy; @@@ -1495,6 -1495,7 +1496,7 @@@ int mlx4_mr_rereg_mem_write(struct mlx4
int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset, u16 size, u8 *data); + int mlx4_max_tc(struct mlx4_dev *dev);
/* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) diff --combined include/linux/qed/qed_eth_if.h index 6c876a6,71d523b..4475a9d --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@@ -49,7 -49,6 +49,7 @@@ struct qed_start_vport_params bool drop_ttl0; u8 vport_id; u16 mtu; + bool clear_stats; };
struct qed_stop_rxq_params { @@@ -114,6 -113,7 +114,7 @@@ struct qed_queue_start_common_params u8 vport_id; u16 sb; u16 sb_idx; + u16 vf_qid; };
struct qed_tunn_params { @@@ -128,11 -128,73 +129,73 @@@ struct qed_eth_cb_ops void (*force_mac) (void *dev, u8 *mac); };
+ #ifdef CONFIG_DCB + /* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration + * of dcbnl_rtnl_ops structure. + */ + struct qed_eth_dcbnl_ops { + /* IEEE 802.1Qaz std */ + int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app); + int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app); + + /* CEE std */ + u8 (*getstate)(struct qed_dev *cdev); + u8 (*setstate)(struct qed_dev *cdev, u8 state); + void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting); + void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting); + u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap); + int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num); + u8 (*getpfcstate)(struct qed_dev *cdev); + int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id); + u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags); + + /* DCBX configuration */ + u8 (*getdcbx)(struct qed_dev *cdev); + void (*setpgtccfgtx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgtccfgrx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + u8 (*setall)(struct qed_dev *cdev); + int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num); + void (*setpfcstate)(struct qed_dev *cdev, u8 state); + int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up); + u8 (*setdcbx)(struct qed_dev *cdev, u8 state); + u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags); + + /* Peer apps */ + int (*peer_getappinfo)(struct qed_dev *cdev, + struct dcb_peer_app_info *info, + u16 *app_count); + int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table); + + /* CEE peer */ + int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc); + int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg); + }; + #endif + struct qed_eth_ops { const struct qed_common_ops *common; #ifdef CONFIG_QED_SRIOV const struct qed_iov_hv_ops *iov; #endif + #ifdef CONFIG_DCB + const struct qed_eth_dcbnl_ops *dcb; + #endif
int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); diff --combined kernel/trace/bpf_trace.c index 26f603d,037ea6e..3de25fb --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@@ -192,27 -192,22 +192,26 @@@ static u64 bpf_perf_event_read(u64 r1, { struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_event_entry *ee; struct perf_event *event; - struct file *file;
if (unlikely(index >= array->map.max_entries)) return -E2BIG;
- file = READ_ONCE(array->ptrs[index]); - if (unlikely(!file)) + ee = READ_ONCE(array->ptrs[index]); + if (unlikely(!ee)) return -ENOENT;
- event = file->private_data; - + event = ee->event; /* make sure event is local and doesn't have pmu::count */ if (event->oncpu != smp_processor_id() || event->pmu->count) return -EINVAL;
+ if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && + event->attr.type != PERF_TYPE_RAW)) + return -EINVAL; + /* * we don't know if the function is run successfully by the * return value. It can be judged in other places, such as @@@ -237,8 -232,8 +236,8 @@@ static u64 bpf_perf_event_output(u64 r1 u64 index = flags & BPF_F_INDEX_MASK; void *data = (void *) (long) r4; struct perf_sample_data sample_data; + struct bpf_event_entry *ee; struct perf_event *event; - struct file *file; struct perf_raw_record raw = { .size = size, .data = data, @@@ -251,12 -246,11 +250,11 @@@ if (unlikely(index >= array->map.max_entries)) return -E2BIG;
- file = READ_ONCE(array->ptrs[index]); - if (unlikely(!file)) + ee = READ_ONCE(array->ptrs[index]); + if (unlikely(!ee)) return -ENOENT;
- event = file->private_data; - + event = ee->event; if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) return -EINVAL; @@@ -353,8 -347,7 +351,8 @@@ static const struct bpf_func_proto *kpr }
/* bpf+kprobe programs can access fields of 'struct pt_regs' */ -static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) +static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, + enum bpf_reg_type *reg_type) { /* check bounds */ if (off < 0 || off >= sizeof(struct pt_regs)) @@@ -432,8 -425,7 +430,8 @@@ static const struct bpf_func_proto *tp_ } }
-static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type) +static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, + enum bpf_reg_type *reg_type) { if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) return false; diff --combined net/batman-adv/routing.c index 6c2901a,f75091c..396c013 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@@ -374,7 -374,6 +374,7 @@@ int batadv_recv_icmp_packet(struct sk_b if (skb_cow(skb, ETH_HLEN) < 0) goto out;
+ ethhdr = eth_hdr(skb); icmph = (struct batadv_icmp_header *)skb->data; icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph; if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN) @@@ -654,7 -653,7 +654,7 @@@ static int batadv_route_unicast_packet( len + ETH_HLEN);
ret = NET_RX_SUCCESS; - } else if (res == NET_XMIT_POLICED) { + } else if (res == -EINPROGRESS) { /* skb was buffered and consumed */ ret = NET_RX_SUCCESS; } diff --combined net/core/filter.c index c4b330c,df6860c..cb9fc16 --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -748,6 -748,17 +748,17 @@@ static bool chk_code_allowed(u16 code_t return codes[code_to_probe]; }
+ static bool bpf_check_basics_ok(const struct sock_filter *filter, + unsigned int flen) + { + if (filter == NULL) + return false; + if (flen == 0 || flen > BPF_MAXINSNS) + return false; + + return true; + } + /** * bpf_check_classic - verify socket filter code * @filter: filter to verify @@@ -768,9 -779,6 +779,6 @@@ static int bpf_check_classic(const stru bool anc_found; int pc;
- if (flen == 0 || flen > BPF_MAXINSNS) - return -EINVAL; - /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *ftest = &filter[pc]; @@@ -1065,7 -1073,7 +1073,7 @@@ int bpf_prog_create(struct bpf_prog **p struct bpf_prog *fp;
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); @@@ -1112,7 -1120,7 +1120,7 @@@ int bpf_prog_create_from_user(struct bp int err;
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); @@@ -1207,7 -1215,6 +1215,6 @@@ stati struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) { unsigned int fsize = bpf_classic_proglen(fprog); - unsigned int bpf_fsize = bpf_prog_size(fprog->len); struct bpf_prog *prog; int err;
@@@ -1215,10 -1222,10 +1222,10 @@@ return ERR_PTR(-EPERM);
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return ERR_PTR(-EINVAL);
- prog = bpf_prog_alloc(bpf_fsize, 0); + prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!prog) return ERR_PTR(-ENOMEM);
@@@ -1603,9 -1610,36 +1610,36 @@@ static const struct bpf_func_proto bpf_ .arg5_type = ARG_ANYTHING, };
+ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) + { + if (skb_at_tc_ingress(skb)) + skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); + + return dev_forward_skb(dev, skb); + } + + static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) + { + int ret; + + if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { + net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); + kfree_skb(skb); + return -ENETDOWN; + } + + skb->dev = dev; + + __this_cpu_inc(xmit_recursion); + ret = dev_queue_xmit(skb); + __this_cpu_dec(xmit_recursion); + + return ret; + } + static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; + struct sk_buff *skb = (struct sk_buff *) (long) r1; struct net_device *dev;
if (unlikely(flags & ~(BPF_F_INGRESS))) @@@ -1615,19 -1649,12 +1649,12 @@@ if (unlikely(!dev)) return -EINVAL;
- skb2 = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!skb2)) + skb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb)) return -ENOMEM;
- if (flags & BPF_F_INGRESS) { - if (skb_at_tc_ingress(skb2)) - skb_postpush_rcsum(skb2, skb_mac_header(skb2), - skb2->mac_len); - return dev_forward_skb(dev, skb2); - } - - skb2->dev = dev; - return dev_queue_xmit(skb2); + return flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); }
static const struct bpf_func_proto bpf_clone_redirect_proto = { @@@ -1671,15 -1698,8 +1698,8 @@@ int skb_do_redirect(struct sk_buff *skb return -EINVAL; }
- if (ri->flags & BPF_F_INGRESS) { - if (skb_at_tc_ingress(skb)) - skb_postpush_rcsum(skb, skb_mac_header(skb), - skb->mac_len); - return dev_forward_skb(dev, skb); - } - - skb->dev = dev; - return dev_queue_xmit(skb); + return ri->flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); }
static const struct bpf_func_proto bpf_redirect_proto = { @@@ -2085,8 -2105,7 +2105,8 @@@ static bool __is_valid_access(int off, }
static bool sk_filter_is_valid_access(int off, int size, - enum bpf_access_type type) + enum bpf_access_type type, + enum bpf_reg_type *reg_type) { switch (off) { case offsetof(struct __sk_buff, tc_classid): @@@ -2109,8 -2128,7 +2129,8 @@@ }
static bool tc_cls_act_is_valid_access(int off, int size, - enum bpf_access_type type) + enum bpf_access_type type, + enum bpf_reg_type *reg_type) { if (type == BPF_WRITE) { switch (off) { @@@ -2125,16 -2143,6 +2145,16 @@@ return false; } } + + switch (off) { + case offsetof(struct __sk_buff, data): + *reg_type = PTR_TO_PACKET; + break; + case offsetof(struct __sk_buff, data_end): + *reg_type = PTR_TO_PACKET_END; + break; + } + return __is_valid_access(off, size, type); }
diff --combined net/ipv4/gre_demux.c index de1d119,c4c3e43..b798862 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@@ -62,26 -62,26 +62,26 @@@ EXPORT_SYMBOL_GPL(gre_del_protocol)
/* Fills in tpi and returns header length to be pulled. */ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err, __be16 proto) + bool *csum_err, __be16 proto, int nhs) { const struct gre_base_hdr *greh; __be32 *options; int hdr_len;
- if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) + if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr)))) return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb); + greh = (struct gre_base_hdr *)(skb->data + nhs); if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags); hdr_len = gre_calc_hlen(tpi->flags);
- if (!pskb_may_pull(skb, hdr_len)) + if (!pskb_may_pull(skb, nhs + hdr_len)) return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb); + greh = (struct gre_base_hdr *)(skb->data + nhs); tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1); @@@ -117,6 -117,7 +117,7 @@@ if ((*(u8 *)options & 0xF0) != 0x40) hdr_len += 4; } + tpi->hdr_len = hdr_len; return hdr_len; } EXPORT_SYMBOL(gre_parse_header); diff --combined net/ipv4/ip_gre.c index 1d000af,8eec78f..5b1481b --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@@ -49,6 -49,12 +49,6 @@@ #include <net/gre.h> #include <net/dst_metadata.h>
-#if IS_ENABLED(CONFIG_IPV6) -#include <net/ipv6.h> -#include <net/ip6_fib.h> -#include <net/ip6_route.h> -#endif - /* Problems & solutions -------------------- @@@ -138,6 -144,7 +138,7 @@@ static void ipgre_err(struct sk_buff *s const struct iphdr *iph; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; + unsigned int data_len = 0; struct ip_tunnel *t;
switch (type) { @@@ -163,6 -170,7 +164,7 @@@ case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return; + data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ break;
case ICMP_REDIRECT: @@@ -181,6 -189,13 +183,13 @@@ if (!t) return;
+ #if IS_ENABLED(CONFIG_IPV6) + if (tpi->proto == htons(ETH_P_IPV6) && + !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, + type, data_len)) + return; + #endif + if (t->parms.iph.daddr == 0 || ipv4_is_multicast(t->parms.iph.daddr)) return; @@@ -211,14 -226,12 +220,14 @@@ static void gre_err(struct sk_buff *skb * by themselves??? */
+ const struct iphdr *iph = (struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct tnl_ptk_info tpi; bool csum_err = false;
- if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) { + if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), + iph->ihl * 4) < 0) { if (!csum_err) /* ignore csum errors. */ return; } @@@ -334,7 -347,7 +343,7 @@@ static int gre_rcv(struct sk_buff *skb } #endif
- hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)); + hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0); if (hdr_len < 0) goto drop;
@@@ -837,17 -850,19 +846,19 @@@ out return ipgre_tunnel_validate(tb, data); }
- static void ipgre_netlink_parms(struct net_device *dev, + static int ipgre_netlink_parms(struct net_device *dev, struct nlattr *data[], struct nlattr *tb[], struct ip_tunnel_parm *parms) { + struct ip_tunnel *t = netdev_priv(dev); + memset(parms, 0, sizeof(*parms));
parms->iph.protocol = IPPROTO_GRE;
if (!data) - return; + return 0;
if (data[IFLA_GRE_LINK]) parms->link = nla_get_u32(data[IFLA_GRE_LINK]); @@@ -876,16 -891,26 +887,26 @@@ if (data[IFLA_GRE_TOS]) parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
- if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) + if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) { + if (t->ignore_df) + return -EINVAL; parms->iph.frag_off = htons(IP_DF); + }
if (data[IFLA_GRE_COLLECT_METADATA]) { - struct ip_tunnel *t = netdev_priv(dev); - t->collect_md = true; if (dev->type == ARPHRD_IPGRE) dev->type = ARPHRD_NONE; } + + if (data[IFLA_GRE_IGNORE_DF]) { + if (nla_get_u8(data[IFLA_GRE_IGNORE_DF]) + && (parms->iph.frag_off & htons(IP_DF))) + return -EINVAL; + t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]); + } + + return 0; }
/* This function returns true when ENCAP attributes are present in the nl msg */ @@@ -956,16 -981,19 +977,19 @@@ static int ipgre_newlink(struct net *sr { struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + int err;
if (ipgre_netlink_encap_parms(data, &ipencap)) { struct ip_tunnel *t = netdev_priv(dev); - int err = ip_tunnel_encap_setup(t, &ipencap); + err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0) return err; }
- ipgre_netlink_parms(dev, data, tb, &p); + err = ipgre_netlink_parms(dev, data, tb, &p); + if (err < 0) + return err; return ip_tunnel_newlink(dev, tb, &p); }
@@@ -974,16 -1002,19 +998,19 @@@ static int ipgre_changelink(struct net_ { struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + int err;
if (ipgre_netlink_encap_parms(data, &ipencap)) { struct ip_tunnel *t = netdev_priv(dev); - int err = ip_tunnel_encap_setup(t, &ipencap); + err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0) return err; }
- ipgre_netlink_parms(dev, data, tb, &p); + err = ipgre_netlink_parms(dev, data, tb, &p); + if (err < 0) + return err; return ip_tunnel_changelink(dev, tb, &p); }
@@@ -1020,6 -1051,8 +1047,8 @@@ static size_t ipgre_get_size(const stru nla_total_size(2) + /* IFLA_GRE_COLLECT_METADATA */ nla_total_size(0) + + /* IFLA_GRE_IGNORE_DF */ + nla_total_size(1) + 0; }
@@@ -1053,6 -1086,9 +1082,9 @@@ static int ipgre_fill_info(struct sk_bu t->encap.flags)) goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df)) + goto nla_put_failure; + if (t->collect_md) { if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) goto nla_put_failure; @@@ -1080,6 -1116,7 +1112,7 @@@ static const struct nla_policy ipgre_po [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, + [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 }, };
static struct rtnl_link_ops ipgre_link_ops __read_mostly = { @@@ -1117,7 -1154,6 +1150,7 @@@ struct net_device *gretap_fb_dev_create { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; + LIST_HEAD(list_kill); struct ip_tunnel *t; int err;
@@@ -1133,10 -1169,8 +1166,10 @@@ t->collect_md = true;
err = ipgre_newlink(net, dev, tb, NULL); - if (err < 0) - goto out; + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + }
/* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. @@@ -1145,14 -1179,9 +1178,14 @@@ if (err) goto out;
+ err = rtnl_configure_link(dev, NULL); + if (err < 0) + goto out; + return dev; out: - free_netdev(dev); + ip_tunnel_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(gretap_fb_dev_create); diff --combined net/ipv4/tcp_output.c index e00e972,b1bcba0..b26aa87 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -911,9 -911,12 +911,12 @@@ static int tcp_transmit_skb(struct soc int err;
BUG_ON(!skb || !tcp_skb_pcount(skb)); + tp = tcp_sk(sk);
if (clone_it) { skb_mstamp_get(&skb->skb_mstamp); + TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq + - tp->snd_una;
if (unlikely(skb_cloned(skb))) skb = pskb_copy(skb, gfp_mask); @@@ -924,7 -927,6 +927,6 @@@ }
inet = inet_sk(sk); - tp = tcp_sk(sk); tcb = TCP_SKB_CB(skb); memset(&opts, 0, sizeof(opts));
@@@ -2751,7 -2753,7 +2753,7 @@@ void tcp_xmit_retransmit_queue(struct s struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; struct sk_buff *hole = NULL; - u32 last_lost; + u32 max_segs, last_lost; int mib_idx; int fwd_rexmitting = 0;
@@@ -2771,7 -2773,6 +2773,7 @@@ last_lost = tp->snd_una; }
+ max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk)); tcp_for_write_queue_from(skb, sk) { __u8 sacked = TCP_SKB_CB(skb)->sacked; int segs; @@@ -2785,10 -2786,6 +2787,10 @@@ segs = tp->snd_cwnd - tcp_packets_in_flight(tp); if (segs <= 0) return; + /* In case tcp_shift_skb_data() have aggregated large skbs, + * we need to make sure not sending too bigs TSO packets + */ + segs = min_t(int, segs, max_segs);
if (fwd_rexmitting) { begin_fwd: diff --combined net/ipv6/icmp.c index a4fa840,fd11f58..bd59c34 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@@ -98,7 -98,7 +98,7 @@@ static void icmpv6_err(struct sk_buff *
if (!(type & ICMPV6_INFOMSG_MASK)) if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) - ping_err(skb, offset, info); + ping_err(skb, offset, ntohl(info)); }
static int icmpv6_rcv(struct sk_buff *skb); @@@ -388,7 -388,8 +388,8 @@@ relookup_failed /* * Send an ICMP message in response to a packet in error */ - static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) + static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr) { struct net *net = dev_net(skb->dev); struct inet6_dev *idev = NULL; @@@ -475,6 -476,8 +476,8 @@@ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.daddr = hdr->saddr; + if (force_saddr) + saddr = force_saddr; if (saddr) fl6.saddr = *saddr; fl6.flowi6_mark = mark; @@@ -502,12 -505,14 +505,14 @@@ else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif;
+ ipc6.tclass = np->tclass; + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out;
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - ipc6.tclass = np->tclass; ipc6.dontfrag = np->dontfrag; ipc6.opt = NULL;
@@@ -549,10 -554,75 +554,75 @@@ out */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { - icmp6_send(skb, ICMPV6_PARAMPROB, code, pos); + icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL); kfree_skb(skb); }
+ /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH + * if sufficient data bytes are available + * @nhs is the size of the tunnel header(s) : + * Either an IPv4 header for SIT encap + * an IPv4 header + GRE header for GRE encap + */ + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, + unsigned int data_len) + { + struct in6_addr temp_saddr; + struct rt6_info *rt; + struct sk_buff *skb2; + u32 info = 0; + + if (!pskb_may_pull(skb, nhs + sizeof(struct ipv6hdr) + 8)) + return 1; + + /* RFC 4884 (partial) support for ICMP extensions */ + if (data_len < 128 || (data_len & 7) || skb->len < data_len) + data_len = 0; + + skb2 = data_len ? skb_copy(skb, GFP_ATOMIC) : skb_clone(skb, GFP_ATOMIC); + + if (!skb2) + return 1; + + skb_dst_drop(skb2); + skb_pull(skb2, nhs); + skb_reset_network_header(skb2); + + rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); + + if (rt && rt->dst.dev) + skb2->dev = rt->dst.dev; + + ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &temp_saddr); + + if (data_len) { + /* RFC 4884 (partial) support : + * insert 0 padding at the end, before the extensions + */ + __skb_push(skb2, nhs); + skb_reset_network_header(skb2); + memmove(skb2->data, skb2->data + nhs, data_len - nhs); + memset(skb2->data + data_len - nhs, 0, nhs); + /* RFC 4884 4.5 : Length is measured in 64-bit words, + * and stored in reserved[0] + */ + info = (data_len/8) << 24; + } + if (type == ICMP_TIME_EXCEEDED) + icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, + info, &temp_saddr); + else + icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, + info, &temp_saddr); + if (rt) + ip6_rt_put(rt); + + kfree_skb(skb2); + + return 0; + } + EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach); + static void icmpv6_echo_reply(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); @@@ -585,7 -655,7 +655,7 @@@ fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) fl6.saddr = *saddr; - fl6.flowi6_oif = l3mdev_fib_oif(skb->dev); + fl6.flowi6_oif = skb->dev->ifindex; fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; fl6.flowi6_mark = mark; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); diff --combined net/ipv6/route.c index 520b788,08b77f4..4981755 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -1042,8 -1042,8 +1042,8 @@@ static struct rt6_info *rt6_make_pcpu_r return pcpu_rt; }
- static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, - struct flowi6 *fl6, int flags) + struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, + int oif, struct flowi6 *fl6, int flags) { struct fib6_node *fn, *saved_fn; struct rt6_info *rt; @@@ -1139,6 -1139,7 +1139,7 @@@ redo_rt6_select
} } + EXPORT_SYMBOL_GPL(ip6_pol_route);
static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) @@@ -1782,7 -1783,7 +1783,7 @@@ static struct rt6_info *ip6_nh_lookup_t }; struct fib6_table *table; struct rt6_info *rt; - int flags = 0; + int flags = RT6_LOOKUP_F_IFACE;
table = fib6_get_table(net, cfg->fc_table); if (!table) @@@ -2200,7 -2201,7 +2201,7 @@@ static void rt6_do_redirect(struct dst_ * first-hop router for the specified ICMP Destination Address. */
- if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) { + if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) { net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); return; } @@@ -2235,12 -2236,12 +2236,12 @@@ * We have finally decided to accept it. */
- neigh_update(neigh, lladdr, NUD_STALE, + ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE| (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| - NEIGH_UPDATE_F_ISROUTER)) - ); + NEIGH_UPDATE_F_ISROUTER)), + NDISC_REDIRECT, &ndopts);
nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL); if (!nrt) @@@ -2585,23 -2586,6 +2586,6 @@@ struct rt6_info *addrconf_dst_alloc(str return rt; }
- int ip6_route_get_saddr(struct net *net, - struct rt6_info *rt, - const struct in6_addr *daddr, - unsigned int prefs, - struct in6_addr *saddr) - { - struct inet6_dev *idev = - rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL; - int err = 0; - if (rt && rt->rt6i_prefsrc.plen) - *saddr = rt->rt6i_prefsrc.addr; - else - err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, - daddr, prefs, saddr); - return err; - } - /* remove deleted ip from prefsrc entries */ struct arg_dev_net_ip { struct net_device *dev; @@@ -3306,6 -3290,8 +3290,8 @@@ static int inet6_rtm_getroute(struct sk
err = -EINVAL; memset(&fl6, 0, sizeof(fl6)); + rtm = nlmsg_data(nlh); + fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
if (tb[RTA_SRC]) { if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) diff --combined net/ipv6/sit.c index 0619ac7,cdd7146..917a5cd --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@@ -479,47 -479,12 +479,12 @@@ static void ipip6_tunnel_uninit(struct dev_put(dev); }
- /* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH - * if sufficient data bytes are available - */ - static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb) - { - int ihl = ((const struct iphdr *)skb->data)->ihl*4; - struct rt6_info *rt; - struct sk_buff *skb2; - - if (!pskb_may_pull(skb, ihl + sizeof(struct ipv6hdr) + 8)) - return 1; - - skb2 = skb_clone(skb, GFP_ATOMIC); - - if (!skb2) - return 1; - - skb_dst_drop(skb2); - skb_pull(skb2, ihl); - skb_reset_network_header(skb2); - - rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); - - if (rt && rt->dst.dev) - skb2->dev = rt->dst.dev; - - icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); - - if (rt) - ip6_rt_put(rt); - - kfree_skb(skb2); - - return 0; - } - static int ipip6_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; + unsigned int data_len = 0; struct ip_tunnel *t; int err;
@@@ -544,6 -509,7 +509,7 @@@ case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return 0; + data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ break; case ICMP_REDIRECT: break; @@@ -560,22 -526,22 +526,22 @@@
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, - t->parms.link, 0, IPPROTO_IPV6, 0); + t->parms.link, 0, iph->protocol, 0); err = 0; goto out; } if (type == ICMP_REDIRECT) { ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, - IPPROTO_IPV6, 0); + iph->protocol, 0); err = 0; goto out; }
- if (t->parms.iph.daddr == 0) + err = 0; + if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) goto out;
- err = 0; - if (!ipip6_err_gen_icmpv6_unreach(skb)) + if (t->parms.iph.daddr == 0) goto out;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) @@@ -825,9 -791,6 +791,6 @@@ static netdev_tx_t ipip6_tunnel_xmit(st u8 protocol = IPPROTO_IPV6; int t_hlen = tunnel->hlen + sizeof(struct iphdr);
- if (skb->protocol != htons(ETH_P_IPV6)) - goto tx_error; - if (tos == 1) tos = ipv6_get_dsfield(iph6);
diff --combined net/ipv6/udp.c index 005dc82,4bb5c13..0a71a312d --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@@ -115,10 -115,11 +115,10 @@@ static void udp_v6_rehash(struct sock * udp_lib_rehash(sk, new_hash); }
-static inline int compute_score(struct sock *sk, struct net *net, - unsigned short hnum, - const struct in6_addr *saddr, __be16 sport, - const struct in6_addr *daddr, __be16 dport, - int dif) +static int compute_score(struct sock *sk, struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, unsigned short hnum, + int dif) { int score; struct inet_sock *inet; @@@ -161,11 -162,54 +161,11 @@@ return score; }
-static inline int compute_score2(struct sock *sk, struct net *net, - const struct in6_addr *saddr, __be16 sport, - const struct in6_addr *daddr, - unsigned short hnum, int dif) -{ - int score; - struct inet_sock *inet; - - if (!net_eq(sock_net(sk), net) || - udp_sk(sk)->udp_port_hash != hnum || - sk->sk_family != PF_INET6) - return -1; - - if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) - return -1; - - score = 0; - inet = inet_sk(sk); - - if (inet->inet_dport) { - if (inet->inet_dport != sport) - return -1; - score++; - } - - if (!ipv6_addr_any(&sk->sk_v6_daddr)) { - if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) - return -1; - score++; - } - - if (sk->sk_bound_dev_if) { - if (sk->sk_bound_dev_if != dif) - return -1; - score++; - } - - if (sk->sk_incoming_cpu == raw_smp_processor_id()) - score++; - - return score; -} - -/* called with read_rcu_lock() */ +/* called with rcu_read_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned int hnum, int dif, - struct udp_hslot *hslot2, unsigned int slot2, + struct udp_hslot *hslot2, struct sk_buff *skb) { struct sock *sk, *result; @@@ -175,7 -219,7 +175,7 @@@ result = NULL; badness = -1; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { - score = compute_score2(sk, net, saddr, sport, + score = compute_score(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { reuseport = sk->sk_reuseport; @@@ -224,22 -268,17 +224,22 @@@ struct sock *__udp6_lib_lookup(struct n
result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, - hslot2, slot2, skb); + hslot2, skb); if (!result) { + unsigned int old_slot2 = slot2; hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); slot2 = hash2 & udptable->mask; + /* avoid searching the same slot again. */ + if (unlikely(slot2 == old_slot2)) + return result; + hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin;
result = udp6_lib_lookup2(net, saddr, sport, - &in6addr_any, hnum, dif, - hslot2, slot2, skb); + daddr, hnum, dif, + hslot2, skb); } return result; } @@@ -247,7 -286,7 +247,7 @@@ begin result = NULL; badness = -1; sk_for_each_rcu(sk, &hslot->head) { - score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); + score = compute_score(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { @@@ -1207,6 -1246,11 +1207,11 @@@ do_udp_sendmsg
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ if (ipc6.tclass < 0) + ipc6.tclass = np->tclass; + + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); @@@ -1217,9 -1261,6 +1222,6 @@@ if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
- if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: diff --combined net/openvswitch/conntrack.c index d843125,52f3b9b..b4069a9 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@@ -818,22 -818,23 +818,33 @@@ static int ovs_ct_lookup(struct net *ne */ state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED; __ovs_ct_update_key(key, state, &info->zone, exp->master); - } else - return __ovs_ct_lookup(net, key, info, skb); + } else { + struct nf_conn *ct; + int err; + + err = __ovs_ct_lookup(net, key, info, skb); + if (err) + return err; + + ct = (struct nf_conn *)skb->nfct; + if (ct) + nf_ct_deliver_cached_events(ct); + }
return 0; }
+ static bool labels_nonzero(const struct ovs_key_ct_labels *labels) + { + size_t i; + + for (i = 0; i < sizeof(*labels); i++) + if (labels->ct_labels[i]) + return true; + + return false; + } + /* Lookup connection and confirm if unconfirmed. */ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, const struct ovs_conntrack_info *info, @@@ -844,24 -845,32 +855,32 @@@ err = __ovs_ct_lookup(net, key, info, skb); if (err) return err; - /* This is a no-op if the connection has already been confirmed. */ + + /* Apply changes before confirming the connection so that the initial + * conntrack NEW netlink event carries the values given in the CT + * action. + */ + if (info->mark.mask) { + err = ovs_ct_set_mark(skb, key, info->mark.value, + info->mark.mask); + if (err) + return err; + } + if (labels_nonzero(&info->labels.mask)) { + err = ovs_ct_set_labels(skb, key, &info->labels.value, + &info->labels.mask); + if (err) + return err; + } + /* This will take care of sending queued events even if the connection + * is already confirmed. + */ if (nf_conntrack_confirm(skb) != NF_ACCEPT) return -EINVAL;
return 0; }
- static bool labels_nonzero(const struct ovs_key_ct_labels *labels) - { - size_t i; - - for (i = 0; i < sizeof(*labels); i++) - if (labels->ct_labels[i]) - return true; - - return false; - } - /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero * value if 'skb' is freed. */ @@@ -886,19 -895,7 +905,7 @@@ int ovs_ct_execute(struct net *net, str err = ovs_ct_commit(net, key, info, skb); else err = ovs_ct_lookup(net, key, info, skb); - if (err) - goto err;
- if (info->mark.mask) { - err = ovs_ct_set_mark(skb, key, info->mark.value, - info->mark.mask); - if (err) - goto err; - } - if (labels_nonzero(&info->labels.mask)) - err = ovs_ct_set_labels(skb, key, &info->labels.value, - &info->labels.mask); - err: skb_push(skb, nh_ofs); if (err) kfree_skb(skb); @@@ -1155,6 -1152,20 +1162,20 @@@ static int parse_ct(const struct nlatt } }
+ #ifdef CONFIG_NF_CONNTRACK_MARK + if (!info->commit && info->mark.mask) { + OVS_NLERR(log, + "Setting conntrack mark requires 'commit' flag."); + return -EINVAL; + } + #endif + #ifdef CONFIG_NF_CONNTRACK_LABELS + if (!info->commit && labels_nonzero(&info->labels.mask)) { + OVS_NLERR(log, + "Setting conntrack labels requires 'commit' flag."); + return -EINVAL; + } + #endif if (rem > 0) { OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem); return -EINVAL; diff --combined net/rds/ib_cm.c index 7c2a65a,3342876..e48bb1b --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@@ -36,6 -36,7 +36,7 @@@ #include <linux/vmalloc.h> #include <linux/ratelimit.h>
+ #include "rds_single_path.h" #include "rds.h" #include "ib.h"
@@@ -111,7 -112,7 +112,7 @@@ void rds_ib_cm_connect_complete(struct } }
- if (conn->c_version < RDS_PROTOCOL(3,1)) { + if (conn->c_version < RDS_PROTOCOL(3, 1)) { printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed," " no longer supported\n", &conn->c_faddr, @@@ -273,7 -274,7 +274,7 @@@ static void rds_ib_tasklet_fn_send(unsi if (rds_conn_up(conn) && (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || test_bit(0, &conn->c_map_queued))) - rds_send_xmit(ic->conn); + rds_send_xmit(&ic->conn->c_path[0]); }
static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, diff --combined net/rds/loop.c index 814173b,268f07f..15f83db --- a/net/rds/loop.c +++ b/net/rds/loop.c @@@ -34,6 -34,7 +34,7 @@@ #include <linux/slab.h> #include <linux/in.h>
+ #include "rds_single_path.h" #include "rds.h" #include "loop.h"
@@@ -95,9 -96,8 +96,9 @@@ out */ static void rds_loop_inc_free(struct rds_incoming *inc) { - struct rds_message *rm = container_of(inc, struct rds_message, m_inc); - rds_message_put(rm); + struct rds_message *rm = container_of(inc, struct rds_message, m_inc); + + rds_message_put(rm); }
/* we need to at least give the thread something to succeed */ diff --combined net/rds/tcp_connect.c index f6e95d6,ba9ec67..96c2c4d --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@@ -34,6 -34,7 +34,7 @@@ #include <linux/in.h> #include <net/tcp.h>
+ #include "rds_single_path.h" #include "rds.h" #include "tcp.h"
@@@ -54,19 -55,20 +55,20 @@@ void rds_tcp_state_change(struct sock *
rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
- switch(sk->sk_state) { - /* ignore connecting sockets as they make progress */ - case TCP_SYN_SENT: - case TCP_SYN_RECV: - break; - case TCP_ESTABLISHED: - rds_connect_path_complete(&conn->c_path[0], - RDS_CONN_CONNECTING); - break; - case TCP_CLOSE_WAIT: - case TCP_CLOSE: - rds_conn_drop(conn); - default: - break; + switch (sk->sk_state) { + /* ignore connecting sockets as they make progress */ + case TCP_SYN_SENT: + case TCP_SYN_RECV: + break; + case TCP_ESTABLISHED: - rds_connect_path_complete(conn, RDS_CONN_CONNECTING); ++ rds_connect_path_complete(&conn->c_path[0], ++ RDS_CONN_CONNECTING); + break; + case TCP_CLOSE_WAIT: + case TCP_CLOSE: + rds_conn_drop(conn); + default: + break; } out: read_unlock_bh(&sk->sk_callback_lock); diff --combined net/rds/tcp_listen.c index 245542c,22d9bb1..f9cc945 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@@ -35,6 -35,7 +35,7 @@@ #include <linux/in.h> #include <net/tcp.h>
+ #include "rds_single_path.h" #include "rds.h" #include "tcp.h"
@@@ -132,17 -133,19 +133,19 @@@ int rds_tcp_accept_one(struct socket *s * c_transport_data. */ if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr) || - !conn->c_outgoing) { + !conn->c_path[0].cp_outgoing) { goto rst_nsk; } else { rds_tcp_reset_callbacks(new_sock, conn); - conn->c_outgoing = 0; + conn->c_path[0].cp_outgoing = 0; /* rds_connect_path_complete() marks RDS_CONN_UP */ - rds_connect_path_complete(conn, RDS_CONN_RESETTING); + rds_connect_path_complete(&conn->c_path[0], - RDS_CONN_DISCONNECTING); ++ RDS_CONN_RESETTING); } } else { rds_tcp_set_callbacks(new_sock, conn); - rds_connect_path_complete(conn, RDS_CONN_CONNECTING); + rds_connect_path_complete(&conn->c_path[0], + RDS_CONN_CONNECTING); } new_sock = NULL; ret = 0; diff --combined net/rds/tcp_recv.c index 6e6a711,3f8fb38..4a87d9e --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@@ -34,6 -34,7 +34,7 @@@ #include <linux/slab.h> #include <net/tcp.h>
+ #include "rds_single_path.h" #include "rds.h" #include "tcp.h"
@@@ -171,7 -172,7 +172,7 @@@ static int rds_tcp_data_recv(read_descr while (left) { if (!tinc) { tinc = kmem_cache_alloc(rds_tcp_incoming_slab, - arg->gfp); + arg->gfp); if (!tinc) { desc->error = -ENOMEM; goto out; diff --combined net/rds/tcp_send.c index 618be69,2b3414f..710f1aa --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@@ -34,6 -34,7 +34,7 @@@ #include <linux/in.h> #include <net/tcp.h>
+ #include "rds_single_path.h" #include "rds.h" #include "tcp.h"
@@@ -66,19 -67,19 +67,19 @@@ void rds_tcp_xmit_complete(struct rds_c static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) { struct kvec vec = { - .iov_base = data, - .iov_len = len, + .iov_base = data, + .iov_len = len, + }; + struct msghdr msg = { + .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, }; - struct msghdr msg = { - .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, - };
return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); }
/* the core send_sem serializes this with other xmit and shutdown */ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, - unsigned int hdr_off, unsigned int sg, unsigned int off) + unsigned int hdr_off, unsigned int sg, unsigned int off) { struct rds_tcp_connection *tc = conn->c_transport_data; int done = 0; @@@ -196,7 -197,7 +197,7 @@@ void rds_tcp_write_space(struct sock *s tc->t_last_seen_una = rds_tcp_snd_una(tc); rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
- if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) + if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out: diff --combined net/sched/act_api.c index c7a0b0d,f8c61d2..47ec230 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@@ -224,8 -224,8 +224,8 @@@ int tcf_hash_search(struct tc_action_ne } EXPORT_SYMBOL(tcf_hash_search);
- int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a, - int bind) + bool tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a, + int bind) { struct tcf_hashinfo *hinfo = tn->hinfo; struct tcf_common *p = NULL; @@@ -235,9 -235,9 +235,9 @@@ p->tcfc_refcnt++; a->priv = p; a->hinfo = hinfo; - return 1; + return true; } - return 0; + return false; } EXPORT_SYMBOL(tcf_hash_check);
@@@ -283,10 -283,11 +283,11 @@@ err2 p->tcfc_index = index ? index : tcf_hash_new_index(tn); p->tcfc_tm.install = jiffies; p->tcfc_tm.lastuse = jiffies; + p->tcfc_tm.firstuse = 0; if (est) { err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats, &p->tcfc_rate_est, - &p->tcfc_lock, est); + &p->tcfc_lock, NULL, est); if (err) { free_percpu(p->cpu_qstats); goto err2; @@@ -503,8 -504,8 +504,8 @@@ nla_put_failure } EXPORT_SYMBOL(tcf_action_dump_1);
- int - tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref) + int tcf_action_dump(struct sk_buff *skb, struct list_head *actions, + int bind, int ref) { struct tc_action *a; int err = -EINVAL; @@@ -670,7 -671,7 +671,7 @@@ int tcf_action_copy_stats(struct sk_buf if (err < 0) goto errout;
- if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 || + if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfc_bstats) < 0 || gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, &p->tcfc_rate_est) < 0 || gnet_stats_copy_queue(&d, p->cpu_qstats, @@@ -687,9 -688,9 +688,9 @@@ errout return -1; }
- static int - tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq, - u16 flags, int event, int bind, int ref) + static int tca_get_fill(struct sk_buff *skb, struct list_head *actions, + u32 portid, u32 seq, u16 flags, int event, int bind, + int ref) { struct tcamsg *t; struct nlmsghdr *nlh; @@@ -730,7 -731,8 +731,8 @@@ act_get_notify(struct net *net, u32 por skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; - if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { + if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, + 0, 0) <= 0) { kfree_skb(skb); return -EINVAL; } @@@ -838,7 -840,8 +840,8 @@@ static int tca_action_flush(struct net if (a.ops == NULL) /*some idjot trying to flush unknown action */ goto err_out;
- nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); + nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, + sizeof(*t), 0); if (!nlh) goto out_module_put; t = nlmsg_data(nlh); @@@ -1001,7 -1004,8 +1004,8 @@@ static int tc_ctl_action(struct sk_buf u32 portid = skb ? NETLINK_CB(skb).portid : 0; int ret = 0, ovr = 0;
- if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN)) + if ((n->nlmsg_type != RTM_GETACTION) && + !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM;
ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); @@@ -1118,7 -1122,7 +1122,7 @@@ tc_dump_action(struct sk_buff *skb, str nla_nest_end(skb, nest); ret = skb->len; } else - nla_nest_cancel(skb, nest); + nlmsg_trim(skb, b);
nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).portid && ret) diff --combined net/sched/act_ife.c index ea4a2fe,b7fa969..845ab51 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@@ -106,9 -106,9 +106,9 @@@ int ife_get_meta_u16(struct sk_buff *sk } EXPORT_SYMBOL_GPL(ife_get_meta_u16);
-int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval) +int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) { - mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL); + mi->metaval = kmemdup(metaval, sizeof(u32), gfp); if (!mi->metaval) return -ENOMEM;
@@@ -116,9 -116,9 +116,9 @@@ } EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
-int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval) +int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) { - mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL); + mi->metaval = kmemdup(metaval, sizeof(u16), gfp); if (!mi->metaval) return -ENOMEM;
@@@ -240,10 -240,10 +240,10 @@@ static int ife_validate_metatype(struc }
/* called when adding new meta information - * under ife->tcf_lock + * under ife->tcf_lock for existing action */ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, - void *val, int len) + void *val, int len, bool exists) { struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret = 0; @@@ -251,13 -251,11 +251,13 @@@ if (!ops) { ret = -ENOENT; #ifdef CONFIG_MODULES - spin_unlock_bh(&ife->tcf_lock); + if (exists) + spin_unlock_bh(&ife->tcf_lock); rtnl_unlock(); request_module("ifemeta%u", metaid); rtnl_lock(); - spin_lock_bh(&ife->tcf_lock); + if (exists) + spin_lock_bh(&ife->tcf_lock); ops = find_ife_oplist(metaid); #endif } @@@ -274,10 -272,10 +274,10 @@@ }
/* called when adding new meta information - * under ife->tcf_lock + * under ife->tcf_lock for existing action */ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, - int len) + int len, bool atomic) { struct tcf_meta_info *mi = NULL; struct tcf_meta_ops *ops = find_ife_oplist(metaid); @@@ -286,7 -284,7 +286,7 @@@ if (!ops) return -ENOENT;
- mi = kzalloc(sizeof(*mi), GFP_KERNEL); + mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); if (!mi) { /*put back what find_ife_oplist took */ module_put(ops->owner); @@@ -296,7 -294,7 +296,7 @@@ mi->metaid = metaid; mi->ops = ops; if (len > 0) { - ret = ops->alloc(mi, metaval); + ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); if (ret != 0) { kfree(mi); module_put(ops->owner); @@@ -315,13 -313,11 +315,13 @@@ static int use_all_metadata(struct tcf_ int rc = 0; int installed = 0;
+ read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { - rc = add_metainfo(ife, o->metaid, NULL, 0); + rc = add_metainfo(ife, o->metaid, NULL, 0, true); if (rc == 0) installed += 1; } + read_unlock(&ife_mod_lock);
if (installed) return 0; @@@ -389,9 -385,8 +389,9 @@@ static void tcf_ife_cleanup(struct tc_a spin_unlock_bh(&ife->tcf_lock); }
-/* under ife->tcf_lock */ -static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb) +/* under ife->tcf_lock for existing action */ +static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, + bool exists) { int len = 0; int rc = 0; @@@ -403,11 -398,11 +403,11 @@@ val = nla_data(tb[i]); len = nla_len(tb[i]);
- rc = load_metaops_and_vet(ife, i, val, len); + rc = load_metaops_and_vet(ife, i, val, len, exists); if (rc != 0) return rc;
- rc = add_metainfo(ife, i, val, len); + rc = add_metainfo(ife, i, val, len, exists); if (rc) return rc; } @@@ -428,7 -423,8 +428,8 @@@ static int tcf_ife_init(struct net *net u16 ife_type = 0; u8 *daddr = NULL; u8 *saddr = NULL; - int ret = 0, exists = 0; + bool exists = false; + int ret = 0; int err;
err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); @@@ -479,8 -475,7 +480,8 @@@ saddr = nla_data(tb[TCA_IFE_SMAC]); }
- spin_lock_bh(&ife->tcf_lock); + if (exists) + spin_lock_bh(&ife->tcf_lock); ife->tcf_action = parm->action;
if (parm->flags & IFE_ENCODE) { @@@ -510,12 -505,11 +511,12 @@@ metadata_parse_err if (ret == ACT_P_CREATED) _tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock); + if (exists) + spin_unlock_bh(&ife->tcf_lock); return err; }
- err = populate_metalist(ife, tb2); + err = populate_metalist(ife, tb2, exists); if (err) goto metadata_parse_err;
@@@ -530,14 -524,12 +531,14 @@@ if (ret == ACT_P_CREATED) _tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock); + if (exists) + spin_unlock_bh(&ife->tcf_lock); return err; } }
- spin_unlock_bh(&ife->tcf_lock); + if (exists) + spin_unlock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED) tcf_hash_insert(tn, a); @@@ -562,9 -554,7 +563,7 @@@ static int tcf_ife_dump(struct sk_buff if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install); - t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse); - t.expires = jiffies_to_clock_t(ife->tcf_tm.expires); + tcf_tm_dump(&t, &ife->tcf_tm); if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) goto nla_put_failure;
@@@ -632,7 -622,7 +631,7 @@@ static int tcf_ife_decode(struct sk_buf
spin_lock(&ife->tcf_lock); bstats_update(&ife->tcf_bstats, skb); - ife->tcf_tm.lastuse = jiffies; + tcf_lastuse_update(&ife->tcf_tm); spin_unlock(&ife->tcf_lock);
ifehdrln = ntohs(ifehdrln); @@@ -720,7 -710,7 +719,7 @@@ static int tcf_ife_encode(struct sk_buf
spin_lock(&ife->tcf_lock); bstats_update(&ife->tcf_bstats, skb); - ife->tcf_tm.lastuse = jiffies; + tcf_lastuse_update(&ife->tcf_tm);
if (!metalen) { /* no metadata to send */ /* abuse overlimits to count when we allow packet @@@ -811,7 -801,7 +810,7 @@@ static int tcf_ife_act(struct sk_buff * pr_info_ratelimited("unknown failure(policy neither de/encode\n"); spin_lock(&ife->tcf_lock); bstats_update(&ife->tcf_bstats, skb); - ife->tcf_tm.lastuse = jiffies; + tcf_lastuse_update(&ife->tcf_tm); ife->tcf_qstats.drops++; spin_unlock(&ife->tcf_lock);
diff --combined net/sched/act_ipt.c index d4bd19e,6148e32..b8c5060 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@@ -34,7 -34,8 +34,8 @@@ static int ipt_net_id
static int xt_net_id;
- static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook) + static int ipt_init_target(struct xt_entry_target *t, char *table, + unsigned int hook) { struct xt_tgchk_param par; struct xt_target *target; @@@ -96,7 -97,8 +97,8 @@@ static int __tcf_ipt_init(struct tc_act struct tcf_ipt *ipt; struct xt_entry_target *td, *t; char *tname; - int ret = 0, err, exists = 0; + bool exists = false; + int ret = 0, err; u32 hook = 0; u32 index = 0;
@@@ -121,13 -123,10 +123,13 @@@ }
td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); - if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) + if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) { + if (exists) + tcf_hash_release(a, bind); return -EINVAL; + }
- if (!tcf_hash_check(tn, index, a, bind)) { + if (!exists) { ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind, false); if (ret) @@@ -215,7 -214,7 +217,7 @@@ static int tcf_ipt(struct sk_buff *skb
spin_lock(&ipt->tcf_lock);
- ipt->tcf_tm.lastuse = jiffies; + tcf_lastuse_update(&ipt->tcf_tm); bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev @@@ -245,7 -244,7 +247,7 @@@ default: net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n", ret); - result = TC_POLICE_OK; + result = TC_ACT_OK; break; } spin_unlock(&ipt->tcf_lock); @@@ -253,7 -252,8 +255,8 @@@
}
- static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) + static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, + int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_ipt *ipt = a->priv; @@@ -280,11 -280,11 +283,11 @@@ nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) || nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname)) goto nla_put_failure; - tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); - tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); - tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); + + tcf_tm_dump(&tm, &ipt->tcf_tm); if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD)) goto nla_put_failure; + kfree(t); return skb->len;
diff --combined net/sched/sch_fifo.c index 2e4bd2c,6ea0db4..baeed6a --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@@ -19,36 -19,35 +19,39 @@@
/* 1 band FIFO pseudo-"scheduler" */
- static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) + static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) return qdisc_enqueue_tail(skb, sch);
- return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch, to_free); }
- static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) + static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch);
- return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch, to_free); }
- static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) + static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { + unsigned int prev_backlog; + if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch);
+ prev_backlog = sch->qstats.backlog; /* queue full, remove one skb to fulfill the limit */ - __qdisc_queue_drop_head(sch, &sch->q); + __qdisc_queue_drop_head(sch, &sch->q, to_free); qdisc_qstats_drop(sch); qdisc_enqueue_tail(skb, sch);
+ qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); return NET_XMIT_CN; }
@@@ -103,7 -102,6 +106,6 @@@ struct Qdisc_ops pfifo_qdisc_ops __read .enqueue = pfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, - .drop = qdisc_queue_drop, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, @@@ -118,7 -116,6 +120,6 @@@ struct Qdisc_ops bfifo_qdisc_ops __read .enqueue = bfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, - .drop = qdisc_queue_drop, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, @@@ -133,7 -130,6 +134,6 @@@ struct Qdisc_ops pfifo_head_drop_qdisc_ .enqueue = pfifo_tail_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, - .drop = qdisc_queue_drop_head, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, diff --combined net/sched/sch_htb.c index 62f9d81,ba098f2..91982d9 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@@ -117,7 -117,6 +117,6 @@@ struct htb_class * Written often fields */ struct gnet_stats_basic_packed bstats; - struct gnet_stats_queue qstats; struct tc_htb_xstats xstats; /* our special stats */
/* token bucket parameters */ @@@ -140,6 -139,8 +139,8 @@@ enum htb_cmode cmode; /* current mode of the class */ struct rb_node pq_node; /* node for event queue */ struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ + + unsigned int drops ____cacheline_aligned_in_smp; };
struct htb_level { @@@ -569,7 -570,8 +570,8 @@@ static inline void htb_deactivate(struc list_del_init(&cl->un.leaf.drop_list); }
- static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) + static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { int uninitialized_var(ret); struct htb_sched *q = qdisc_priv(sch); @@@ -581,19 -583,20 +583,20 @@@ __skb_queue_tail(&q->direct_queue, skb); q->direct_pkts++; } else { - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { + } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q, + to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); - cl->qstats.drops++; + cl->drops++; } return ret; } else { @@@ -889,7 -892,6 +892,6 @@@ static struct sk_buff *htb_dequeue(stru if (skb != NULL) { ok: qdisc_bstats_update(sch, skb); - qdisc_unthrottled(sch); qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; return skb; @@@ -929,38 -931,13 +931,13 @@@ } qdisc_qstats_overlimit(sch); if (likely(next_event > q->now)) - qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true); + qdisc_watchdog_schedule_ns(&q->watchdog, next_event); else schedule_work(&q->work); fin: return skb; }
- /* try to drop from each class (by prio) until one succeed */ - static unsigned int htb_drop(struct Qdisc *sch) - { - struct htb_sched *q = qdisc_priv(sch); - int prio; - - for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) { - struct list_head *p; - list_for_each(p, q->drops + prio) { - struct htb_class *cl = list_entry(p, struct htb_class, - un.leaf.drop_list); - unsigned int len; - if (cl->un.leaf.q->ops->drop && - (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { - sch->qstats.backlog -= len; - sch->q.qlen--; - if (!cl->un.leaf.q->q.qlen) - htb_deactivate(q, cl); - return len; - } - } - } - return 0; - } - /* reset all classes */ /* always caled under BH & queue lock */ static void htb_reset(struct Qdisc *sch) @@@ -983,7 -960,7 +960,7 @@@ } } qdisc_watchdog_cancel(&q->watchdog); - __skb_queue_purge(&q->direct_queue); + __qdisc_reset_queue(&q->direct_queue); sch->q.qlen = 0; sch->qstats.backlog = 0; memset(q->hlevel, 0, sizeof(q->hlevel)); @@@ -1007,9 -984,7 +984,9 @@@ static void htb_work_func(struct work_s struct htb_sched *q = container_of(work, struct htb_sched, work); struct Qdisc *sch = q->watchdog.qdisc;
+ rcu_read_lock(); __netif_schedule(qdisc_root(sch)); + rcu_read_unlock(); }
static int htb_init(struct Qdisc *sch, struct nlattr *opt) @@@ -1136,16 -1111,22 +1113,22 @@@ static in htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct htb_class *cl = (struct htb_class *)arg; + struct gnet_stats_queue qs = { + .drops = cl->drops, + }; __u32 qlen = 0;
- if (!cl->level && cl->un.leaf.q) + if (!cl->level && cl->un.leaf.q) { qlen = cl->un.leaf.q->q.qlen; + qs.backlog = cl->un.leaf.q->qstats.backlog; + } cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), + d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) + gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); @@@ -1258,7 -1239,7 +1241,7 @@@ static void htb_destroy(struct Qdisc *s htb_destroy_class(sch, cl); } qdisc_class_hash_destroy(&q->clhash); - __skb_queue_purge(&q->direct_queue); + __qdisc_reset_queue(&q->direct_queue); }
static int htb_delete(struct Qdisc *sch, unsigned long arg) @@@ -1397,7 -1378,8 +1380,8 @@@ static int htb_change_class(struct Qdis if (htb_rate_est || tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, - qdisc_root_sleeping_lock(sch), + NULL, + qdisc_root_sleeping_running(sch), tca[TCA_RATE] ? : &est.nla); if (err) { kfree(cl); @@@ -1459,11 -1441,10 +1443,10 @@@ parent->children++; } else { if (tca[TCA_RATE]) { - spinlock_t *lock = qdisc_root_sleeping_lock(sch); - err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, - lock, + NULL, + qdisc_root_sleeping_running(sch), tca[TCA_RATE]); if (err) return err; @@@ -1601,7 -1582,6 +1584,6 @@@ static struct Qdisc_ops htb_qdisc_ops _ .enqueue = htb_enqueue, .dequeue = htb_dequeue, .peek = qdisc_peek_dequeued, - .drop = htb_drop, .init = htb_init, .reset = htb_reset, .destroy = htb_destroy, diff --combined net/sched/sch_netem.c index 178f163,6eac3d8..aaaf021 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@@ -368,9 -368,7 +368,7 @@@ static void tfifo_reset(struct Qdisc *s struct sk_buff *skb = netem_rb_to_skb(p);
rb_erase(p, &q->t_root); - skb->next = NULL; - skb->prev = NULL; - kfree_skb(skb); + rtnl_kfree_skbs(skb, skb); } }
@@@ -399,7 -397,8 +397,8 @@@ static void tfifo_enqueue(struct sk_buf * when we statistically choose to corrupt one, we instead segment it, returning * the first packet to be corrupted, and re-enqueue the remaining frames */ - static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) + static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct sk_buff *segs; netdev_features_t features = netif_skb_features(skb); @@@ -407,7 -406,7 +406,7 @@@ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) { - qdisc_reshape_fail(skb, sch); + qdisc_drop(skb, sch, to_free); return NULL; } consume_skb(skb); @@@ -420,7 -419,8 +419,8 @@@ * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ - static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) + static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct netem_sched_data *q = qdisc_priv(sch); /* We don't fill cb now as skb_unshare() may invalidate it */ @@@ -445,7 -445,7 +445,7 @@@ } if (count == 0) { qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; }
@@@ -465,7 -465,7 +465,7 @@@ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0; - rootq->enqueue(skb2, rootq); + rootq->enqueue(skb2, rootq, to_free); q->duplicate = dupsave; }
@@@ -477,7 -477,7 +477,7 @@@ */ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (skb_is_gso(skb)) { - segs = netem_segment(skb, sch); + segs = netem_segment(skb, sch, to_free); if (!segs) return NET_XMIT_DROP; } else { @@@ -487,10 -487,14 +487,14 @@@ skb = segs; segs = segs->next;
- if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || - (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_help(skb))) { - rc = qdisc_drop(skb, sch); + skb = skb_unshare(skb, GFP_ATOMIC); + if (unlikely(!skb)) { + qdisc_qstats_drop(sch); + goto finish_segs; + } + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_help(skb)) { + qdisc_drop(skb, sch, to_free); goto finish_segs; }
@@@ -499,7 -503,7 +503,7 @@@ }
if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) - return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb);
@@@ -559,7 -563,7 +563,7 @@@ finish_segs segs->next = NULL; qdisc_skb_cb(segs)->pkt_len = segs->len; last_len = segs->len; - rc = qdisc_enqueue(segs, sch); + rc = qdisc_enqueue(segs, sch, to_free); if (rc != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(rc)) qdisc_qstats_drop(sch); @@@ -576,50 -580,17 +580,17 @@@ return NET_XMIT_SUCCESS; }
- static unsigned int netem_drop(struct Qdisc *sch) - { - struct netem_sched_data *q = qdisc_priv(sch); - unsigned int len; - - len = qdisc_queue_drop(sch); - - if (!len) { - struct rb_node *p = rb_first(&q->t_root); - - if (p) { - struct sk_buff *skb = netem_rb_to_skb(p); - - rb_erase(p, &q->t_root); - sch->q.qlen--; - skb->next = NULL; - skb->prev = NULL; - qdisc_qstats_backlog_dec(sch, skb); - kfree_skb(skb); - } - } - if (!len && q->qdisc && q->qdisc->ops->drop) - len = q->qdisc->ops->drop(q->qdisc); - if (len) - qdisc_qstats_drop(sch); - - return len; - } - static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; struct rb_node *p;
- if (qdisc_is_throttled(sch)) - return NULL; - tfifo_dequeue: skb = __skb_dequeue(&sch->q); if (skb) { qdisc_qstats_backlog_dec(sch, skb); deliver: - qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); return skb; } @@@ -650,14 -621,17 +621,17 @@@ #endif
if (q->qdisc) { + unsigned int pkt_len = qdisc_pkt_len(skb); - int err = qdisc_enqueue(skb, q->qdisc); + struct sk_buff *to_free = NULL; + int err;
+ err = qdisc_enqueue(skb, q->qdisc, &to_free); + kfree_skb_list(to_free); - if (unlikely(err != NET_XMIT_SUCCESS)) { - if (net_xmit_drop_count(err)) { - qdisc_qstats_drop(sch); - qdisc_tree_reduce_backlog(sch, 1, - qdisc_pkt_len(skb)); - } + if (err != NET_XMIT_SUCCESS && + net_xmit_drop_count(err)) { + qdisc_qstats_drop(sch); + qdisc_tree_reduce_backlog(sch, 1, + pkt_len); } goto tfifo_dequeue; } @@@ -1143,7 -1117,6 +1117,6 @@@ static struct Qdisc_ops netem_qdisc_op .enqueue = netem_enqueue, .dequeue = netem_dequeue, .peek = qdisc_peek_dequeued, - .drop = netem_drop, .init = netem_init, .reset = netem_reset, .destroy = netem_destroy, diff --combined net/sched/sch_prio.c index a356450,f4d443a..8f57589 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@@ -67,7 -67,7 +67,7 @@@ prio_classify(struct sk_buff *skb, stru }
static int - prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) + prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct Qdisc *qdisc; int ret; @@@ -83,7 -83,7 +83,7 @@@ } #endif
- ret = qdisc_enqueue(skb, qdisc); + ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; @@@ -127,25 -127,6 +127,6 @@@ static struct sk_buff *prio_dequeue(str
}
- static unsigned int prio_drop(struct Qdisc *sch) - { - struct prio_sched_data *q = qdisc_priv(sch); - int prio; - unsigned int len; - struct Qdisc *qdisc; - - for (prio = q->bands-1; prio >= 0; prio--) { - qdisc = q->queues[prio]; - if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { - sch->qstats.backlog -= len; - sch->q.qlen--; - return len; - } - } - return 0; - } - - static void prio_reset(struct Qdisc *sch) { @@@ -172,9 -153,8 +153,9 @@@ prio_destroy(struct Qdisc *sch static int prio_tune(struct Qdisc *sch, struct nlattr *opt) { struct prio_sched_data *q = qdisc_priv(sch); + struct Qdisc *queues[TCQ_PRIO_BANDS]; + int oldbands = q->bands, i; struct tc_prio_qopt *qopt; - int i;
if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; @@@ -188,42 -168,62 +169,42 @@@ return -EINVAL; }
+ /* Before commit, make sure we can allocate all new qdiscs */ + for (i = oldbands; i < qopt->bands; i++) { + queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + TC_H_MAKE(sch->handle, i + 1)); + if (!queues[i]) { + while (i > oldbands) + qdisc_destroy(queues[--i]); + return -ENOMEM; + } + } + sch_tree_lock(sch); q->bands = qopt->bands; memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { + for (i = q->bands; i < oldbands; i++) { struct Qdisc *child = q->queues[i]; - q->queues[i] = &noop_qdisc; - if (child != &noop_qdisc) { - qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); - qdisc_destroy(child); - } - } - sch_tree_unlock(sch);
- for (i = 0; i < q->bands; i++) { - if (q->queues[i] == &noop_qdisc) { - struct Qdisc *child, *old; - - child = qdisc_create_dflt(sch->dev_queue, - &pfifo_qdisc_ops, - TC_H_MAKE(sch->handle, i + 1)); - if (child) { - sch_tree_lock(sch); - old = q->queues[i]; - q->queues[i] = child; - - if (old != &noop_qdisc) { - qdisc_tree_reduce_backlog(old, - old->q.qlen, - old->qstats.backlog); - qdisc_destroy(old); - } - sch_tree_unlock(sch); - } - } + qdisc_tree_reduce_backlog(child, child->q.qlen, + child->qstats.backlog); + qdisc_destroy(child); } + + for (i = oldbands; i < q->bands; i++) + q->queues[i] = queues[i]; + + sch_tree_unlock(sch); return 0; }
static int prio_init(struct Qdisc *sch, struct nlattr *opt) { - struct prio_sched_data *q = qdisc_priv(sch); - int i; - - for (i = 0; i < TCQ_PRIO_BANDS; i++) - q->queues[i] = &noop_qdisc; - - if (opt == NULL) { + if (!opt) return -EINVAL; - } else { - int err;
- if ((err = prio_tune(sch, opt)) != 0) - return err; - } - return 0; + return prio_tune(sch, opt); }
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) @@@ -304,7 -304,8 +285,8 @@@ static int prio_dump_class_stats(struc struct Qdisc *cl_q;
cl_q = q->queues[cl - 1]; - if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), + d, NULL, &cl_q->bstats) < 0 || gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) return -1;
@@@ -363,7 -364,6 +345,6 @@@ static struct Qdisc_ops prio_qdisc_ops .enqueue = prio_enqueue, .dequeue = prio_dequeue, .peek = prio_peek, - .drop = prio_drop, .init = prio_init, .reset = prio_reset, .destroy = prio_destroy, diff --combined net/tipc/bearer.c index bf8f05c,9a70e1d..8584cc4 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@@ -1,7 -1,7 +1,7 @@@ /* * net/tipc/bearer.c: TIPC bearer code * - * Copyright (c) 1996-2006, 2013-2014, Ericsson AB + * Copyright (c) 1996-2006, 2013-2016, Ericsson AB * Copyright (c) 2004-2006, 2010-2013, Wind River Systems * All rights reserved. * @@@ -39,6 -39,7 +39,7 @@@ #include "bearer.h" #include "link.h" #include "discover.h" + #include "monitor.h" #include "bcast.h" #include "netlink.h"
@@@ -313,6 -314,10 +314,10 @@@ restart rcu_assign_pointer(tn->bearer_list[bearer_id], b); if (skb) tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); + + if (tipc_mon_create(net, bearer_id)) + return -ENOMEM; + pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", name, tipc_addr_string_fill(addr_string, disc_domain), priority); @@@ -348,6 -353,7 +353,7 @@@ static void bearer_disable(struct net * tipc_disc_delete(b->link_req); RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL); kfree_rcu(b, rcu); + tipc_mon_delete(net, bearer_id); }
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, @@@ -405,7 -411,7 +411,7 @@@ int tipc_l2_send_msg(struct net *net, s return 0;
/* Send RESET message even if bearer is detached from device */ - tipc_ptr = rtnl_dereference(dev->tipc_ptr); + tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr); if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) goto drop;
diff --combined net/tipc/link.c index 67b6ab9,03f8bdf..c1df33f --- a/net/tipc/link.c +++ b/net/tipc/link.c @@@ -42,6 -42,7 +42,7 @@@ #include "name_distr.h" #include "discover.h" #include "netlink.h" + #include "monitor.h"
#include <linux/pkt_sched.h>
@@@ -87,7 -88,6 +88,6 @@@ struct tipc_stats * @peer_bearer_id: bearer id used by link's peer endpoint * @bearer_id: local bearer id used by link * @tolerance: minimum link continuity loss needed to reset link [in ms] - * @keepalive_intv: link keepalive timer interval * @abort_limit: # of unacknowledged continuity probes needed to reset link * @state: current state of link FSM * @peer_caps: bitmap describing capabilities of peer node @@@ -96,6 -96,7 +96,7 @@@ * @pmsg: convenience pointer to "proto_msg" field * @priority: current link priority * @net_plane: current link network plane ('A' through 'H') + * @mon_state: cookie with information needed by link monitor * @backlog_limit: backlog queue congestion thresholds (indexed by importance) * @exp_msg_count: # of tunnelled messages expected during link changeover * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset @@@ -131,7 -132,6 +132,6 @@@ struct tipc_link u32 peer_bearer_id; u32 bearer_id; u32 tolerance; - unsigned long keepalive_intv; u32 abort_limit; u32 state; u16 peer_caps; @@@ -140,6 -140,7 +140,7 @@@ char if_name[TIPC_MAX_IF_NAME]; u32 priority; char net_plane; + struct tipc_mon_state mon_state; u16 rst_cnt;
/* Failover/synch */ @@@ -704,25 -705,31 +705,32 @@@ static void link_profile_stats(struct t */ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) { - int mtyp, rc = 0; + int mtyp = 0; + int rc = 0; bool state = false; bool probe = false; bool setup = false; u16 bc_snt = l->bc_sndlink->snd_nxt - 1; u16 bc_acked = l->bc_rcvlink->acked; - - link_profile_stats(l); + struct tipc_mon_state *mstate = &l->mon_state;
switch (l->state) { case LINK_ESTABLISHED: case LINK_SYNCHING: - if (l->silent_intv_cnt > l->abort_limit) - return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); mtyp = STATE_MSG; + link_profile_stats(l); + tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); + if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) + return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); state = bc_acked != bc_snt; - probe = l->silent_intv_cnt; - l->silent_intv_cnt++; + state |= l->bc_rcvlink->rcv_unacked; + state |= l->rcv_unacked; + state |= !skb_queue_empty(&l->transmq); + state |= !skb_queue_empty(&l->deferdq); + probe = mstate->probing; + probe |= l->silent_intv_cnt; + if (probe || mstate->monitoring) + l->silent_intv_cnt++; break; case LINK_RESET: setup = l->rst_cnt++ <= 4; @@@ -833,6 -840,7 +841,7 @@@ void tipc_link_reset(struct tipc_link * l->stats.recv_info = 0; l->stale_count = 0; l->bc_peer_is_up = false; + memset(&l->mon_state, 0, sizeof(l->mon_state)); tipc_link_reset_stats(l); }
@@@ -1241,6 -1249,9 +1250,9 @@@ static void tipc_link_build_proto_msg(s struct tipc_msg *hdr; struct sk_buff_head *dfq = &l->deferdq; bool node_up = link_is_up(l->bc_rcvlink); + struct tipc_mon_state *mstate = &l->mon_state; + int dlen = 0; + void *data;
/* Don't send protocol message during reset or link failover */ if (tipc_link_is_blocked(l)) @@@ -1253,12 -1264,13 +1265,13 @@@ rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, - TIPC_MAX_IF_NAME, l->addr, + tipc_max_domain_size, l->addr, tipc_own_addr(l->net), 0, 0, 0); if (!skb) return;
hdr = buf_msg(skb); + data = msg_data(hdr); msg_set_session(hdr, l->session); msg_set_bearer_id(hdr, l->bearer_id); msg_set_net_plane(hdr, l->net_plane); @@@ -1274,14 -1286,18 +1287,18 @@@
if (mtyp == STATE_MSG) { msg_set_seq_gap(hdr, rcvgap); - msg_set_size(hdr, INT_H_SIZE); msg_set_probe(hdr, probe); + tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); + msg_set_size(hdr, INT_H_SIZE + dlen); + skb_trim(skb, INT_H_SIZE + dlen); l->stats.sent_states++; l->rcv_unacked = 0; } else { /* RESET_MSG or ACTIVATE_MSG */ msg_set_max_pkt(hdr, l->advertised_mtu); - strcpy(msg_data(hdr), l->if_name); + strcpy(data, l->if_name); + msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); + skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); } if (probe) l->stats.sent_probes++; @@@ -1374,7 -1390,9 +1391,9 @@@ static int tipc_link_proto_rcv(struct t u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); u16 rcv_nxt = l->rcv_nxt; + u16 dlen = msg_data_sz(hdr); int mtyp = msg_type(hdr); + void *data; char *if_name; int rc = 0;
@@@ -1384,6 -1402,10 +1403,10 @@@ if (tipc_own_addr(l->net) > msg_prevnode(hdr)) l->net_plane = msg_net_plane(hdr);
+ skb_linearize(skb); + hdr = buf_msg(skb); + data = msg_data(hdr); + switch (mtyp) { case RESET_MSG:
@@@ -1394,8 -1416,6 +1417,6 @@@ /* fall thru' */
case ACTIVATE_MSG: - skb_linearize(skb); - hdr = buf_msg(skb);
/* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; @@@ -1403,7 -1423,7 +1424,7 @@@ break; if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) break; - strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); + strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) @@@ -1451,6 -1471,8 +1472,8 @@@ rc = TIPC_LINK_UP_EVT; break; } + tipc_mon_rcv(l->net, data, dlen, l->addr, + &l->mon_state, l->bearer_id);
/* Send NACK if peer has sent pkts we haven't received yet */ if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) diff --combined tools/virtio/ringtest/Makefile index 6173ada,50e086c..877a8a4 --- a/tools/virtio/ringtest/Makefile +++ b/tools/virtio/ringtest/Makefile @@@ -1,6 -1,6 +1,6 @@@ all:
- all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring -all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring ++all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
CFLAGS += -Wall CFLAGS += -pthread -O2 -ggdb @@@ -8,6 -8,7 +8,7 @@@ LDFLAGS += -pthread -O2 -ggd
main.o: main.c main.h ring.o: ring.c main.h + ptr_ring.o: ptr_ring.c main.h ../../../include/linux/ptr_ring.h virtio_ring_0_9.o: virtio_ring_0_9.c main.h virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h virtio_ring_inorder.o: virtio_ring_inorder.c virtio_ring_0_9.c main.h @@@ -15,13 -16,13 +16,15 @@@ ring: ring.o main. virtio_ring_0_9: virtio_ring_0_9.o main.o virtio_ring_poll: virtio_ring_poll.o main.o virtio_ring_inorder: virtio_ring_inorder.o main.o + ptr_ring: ptr_ring.o main.o +noring: noring.o main.o clean: -rm main.o -rm ring.o ring -rm virtio_ring_0_9.o virtio_ring_0_9 -rm virtio_ring_poll.o virtio_ring_poll -rm virtio_ring_inorder.o virtio_ring_inorder + -rm ptr_ring.o ptr_ring + -rm noring.o noring
.PHONY: all clean
linux-merge@lists.open-mesh.org