The following commit has been merged in the master branch: commit ee58b57100ca953da7320c285315a95db2f7053d Merge: 6f30e8b022c8e3a722928ddb1a2ae0be852fcc0e e7bdea7750eb2a64aea4a08fa5c0a31719c8155d Author: David S. Miller davem@davemloft.net Date: Thu Jun 30 05:03:36 2016 -0400
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several cases of overlapping changes, except the packet scheduler conflicts which deal with the addition of the free list parameter to qdisc_enqueue().
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index f5ddaa9,1209323..d8c0784 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -595,6 -595,10 +595,10 @@@ S: Odd Fixe L: linux-alpha@vger.kernel.org F: arch/alpha/
+ ALPS PS/2 TOUCHPAD DRIVER + R: Pali Rohár pali.rohar@gmail.com + F: drivers/input/mouse/alps.* + ALTERA MAILBOX DRIVER M: Ley Foon Tan lftan@altera.com L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) @@@ -1159,6 -1163,7 +1163,7 @@@ F: arch/arm/mach-footbridge ARM/FREESCALE IMX / MXC ARM ARCHITECTURE M: Shawn Guo shawnguo@kernel.org M: Sascha Hauer kernel@pengutronix.de + R: Fabio Estevam fabio.estevam@nxp.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git @@@ -2242,7 -2247,8 +2247,8 @@@ F: include/net/ax25. F: net/ax25/
AZ6007 DVB DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -2454,14 -2460,6 +2460,14 @@@ L: netdev@vger.kernel.or S: Supported F: drivers/net/ethernet/broadcom/b44.*
+BROADCOM B53 ETHERNET SWITCH DRIVER +M: Florian Fainelli f.fainelli@gmail.com +L: netdev@vger.kernel.org +L: openwrt-devel@lists.openwrt.org (subscribers-only) +S: Supported +F: drivers/net/dsa/b53/* +F: include/linux/platform_data/b53.h + BROADCOM GENET ETHERNET DRIVER M: Florian Fainelli f.fainelli@gmail.com L: netdev@vger.kernel.org @@@ -2578,11 -2576,12 +2584,11 @@@ S: Supporte F: drivers/net/ethernet/broadcom/tg3.*
BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER -M: Brett Rudley brudley@broadcom.com -M: Arend van Spriel arend@broadcom.com -M: Franky (Zhenhui) Lin frankyl@broadcom.com -M: Hante Meuleman meuleman@broadcom.com +M: Arend van Spriel arend.vanspriel@broadcom.com +M: Franky Lin franky.lin@broadcom.com +M: Hante Meuleman hante.meuleman@broadcom.com L: linux-wireless@vger.kernel.org -L: brcm80211-dev-list@broadcom.com +L: brcm80211-dev-list.pdl@broadcom.com S: Supported F: drivers/net/wireless/broadcom/brcm80211/
@@@ -2716,7 -2715,8 +2722,8 @@@ F: Documentation/filesystems/btrfs.tx F: fs/btrfs/
BTTV VIDEO4LINUX DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -2780,9 -2780,9 +2787,9 @@@ F: include/net/caif F: net/caif/
CALGARY x86-64 IOMMU - M: Muli Ben-Yehuda muli@il.ibm.com - M: "Jon D. Mason" jdmason@kudzu.us - L: discuss@x86-64.org + M: Muli Ben-Yehuda mulix@mulix.org + M: Jon Mason jdmason@kudzu.us + L: iommu@lists.linux-foundation.org S: Maintained F: arch/x86/kernel/pci-calgary_64.c F: arch/x86/kernel/tce_64.c @@@ -2813,7 -2813,6 +2820,7 @@@ W: https://github.com/linux-ca T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained +F: Documentation/devicetree/bindings/net/can/ F: drivers/net/can/ F: include/linux/can/dev.h F: include/linux/can/platform/ @@@ -3352,7 -3351,8 +3359,8 @@@ S: Maintaine F: drivers/media/dvb-frontends/cx24120*
CX88 VIDEO4LINUX DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -3782,6 -3782,7 +3790,7 @@@ Q: https://patchwork.kernel.org/project S: Maintained F: drivers/dma/ F: include/linux/dmaengine.h + F: Documentation/devicetree/bindings/dma/ F: Documentation/dmaengine/ T: git git://git.infradead.org/users/vkoul/slave-dma.git
@@@ -4299,7 -4300,8 +4308,8 @@@ F: fs/ecryptfs EDAC-CORE M: Doug Thompson dougthompson@xmission.com M: Borislav Petkov bp@alien8.de - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next @@@ -4344,7 -4346,8 +4354,8 @@@ S: Maintaine F: drivers/edac/e7xxx_edac.c
EDAC-GHES - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/ghes_edac.c @@@ -4368,19 -4371,22 +4379,22 @@@ S: Maintaine F: drivers/edac/i5000_edac.c
EDAC-I5400 - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i5400_edac.c
EDAC-I7300 - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i7300_edac.c
EDAC-I7CORE - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/i7core_edac.c @@@ -4417,7 -4423,8 +4431,8 @@@ S: Maintaine F: drivers/edac/r82600_edac.c
EDAC-SBRIDGE - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-edac@vger.kernel.org S: Maintained F: drivers/edac/sb_edac.c @@@ -4476,7 -4483,8 +4491,8 @@@ S: Maintaine F: drivers/net/ethernet/ibm/ehea/
EM28XX VIDEO4LINUX DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -4879,13 -4887,6 +4895,13 @@@ F: drivers/net/ethernet/freescale/gianf X: drivers/net/ethernet/freescale/gianfar_ptp.c F: Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+FREESCALE QUICC ENGINE UCC HDLC DRIVER +M: Zhao Qiang qiang.zhao@nxp.com +L: netdev@vger.kernel.org +L: linuxppc-dev@lists.ozlabs.org +S: Maintained +F: drivers/net/wan/fsl_ucc_hdlc* + FREESCALE QUICC ENGINE UCC UART DRIVER M: Timur Tabi timur@tabi.org L: linuxppc-dev@lists.ozlabs.org @@@ -6502,6 -6503,7 +6518,7 @@@ F: include/uapi/linux/sunrpc
KERNEL SELFTEST FRAMEWORK M: Shuah Khan shuahkh@osg.samsung.com + M: Shuah Khan shuah@kernel.org L: linux-kselftest@vger.kernel.org T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest S: Maintained @@@ -7171,12 -7173,6 +7188,12 @@@ W: http://www.kernel.org/doc/man-page L: linux-man@vger.kernel.org S: Maintained
+MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER +M: Andrew Lunn andrew@lunn.ch +M: Vivien Didelot vivien.didelot@savoirfairelinux.com +S: Maintained +F: drivers/net/dsa/mv88e6xxx/ + MARVELL ARMADA DRM SUPPORT M: Russell King rmk+kernel@armlinux.org.uk S: Maintained @@@ -7184,6 -7180,11 +7201,6 @@@ F: drivers/gpu/drm/armada F: include/uapi/drm/armada_drm.h F: Documentation/devicetree/bindings/display/armada/
-MARVELL 88E6352 DSA support -M: Guenter Roeck linux@roeck-us.net -S: Maintained -F: drivers/net/dsa/mv88e6352.c - MARVELL CRYPTO DRIVER M: Boris Brezillon boris.brezillon@free-electrons.com M: Arnaud Ebalard arno@natisbad.org @@@ -7374,7 -7375,8 +7391,8 @@@ S: Supporte F: drivers/media/pci/netup_unidvb/*
MEDIA INPUT INFRASTRUCTURE (V4L/DVB) - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org P: LinuxTV.org Project L: linux-media@vger.kernel.org W: https://linuxtv.org @@@ -7422,7 -7424,7 +7440,7 @@@ F: drivers/scsi/megaraid. F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en) - M: Eugenia Emantayev eugenia@mellanox.com + M: Tariq Toukan tariqt@mellanox.com L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@@ -8423,10 -8425,9 +8441,9 @@@ F: drivers/i2c/busses/i2c-ocores. OPEN FIRMWARE AND FLATTENED DEVICE TREE M: Rob Herring robh+dt@kernel.org M: Frank Rowand frowand.list@gmail.com - M: Grant Likely grant.likely@linaro.org L: devicetree@vger.kernel.org W: http://www.devicetree.org/ - T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git + T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git S: Maintained F: drivers/of/ F: include/linux/of*.h @@@ -8434,12 -8435,10 +8451,10 @@@ F: scripts/dtc
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS M: Rob Herring robh+dt@kernel.org - M: Pawel Moll pawel.moll@arm.com M: Mark Rutland mark.rutland@arm.com - M: Ian Campbell ijc+devicetree@hellion.org.uk - M: Kumar Gala galak@codeaurora.org L: devicetree@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git + Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/ S: Maintained F: Documentation/devicetree/ F: arch/*/boot/dts/ @@@ -8964,6 -8963,7 +8979,7 @@@ L: linux-gpio@vger.kernel.or T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git S: Maintained F: Documentation/devicetree/bindings/pinctrl/ + F: Documentation/pinctrl.txt F: drivers/pinctrl/ F: include/linux/pinctrl/
@@@ -9871,7 -9871,8 +9887,8 @@@ S: Odd Fixe F: drivers/media/i2c/saa6588*
SAA7134 VIDEO4LINUX DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -10280,9 -10281,10 +10297,9 @@@ W: http://www.avagotech.co S: Supported F: drivers/scsi/be2iscsi/
-Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER +Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) M: Sathya Perla sathya.perla@broadcom.com M: Ajit Khaparde ajit.khaparde@broadcom.com -M: Padmanabh Ratnakar padmanabh.ratnakar@broadcom.com M: Sriharsha Basavapatna sriharsha.basavapatna@broadcom.com M: Somnath Kotur somnath.kotur@broadcom.com L: netdev@vger.kernel.org @@@ -10389,7 -10391,8 +10406,8 @@@ S: Maintaine F: drivers/media/radio/si4713/radio-usb-si4713.c
SIANO DVB DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -11155,7 -11158,8 +11173,8 @@@ S: Maintaine F: drivers/media/i2c/tda9840*
TEA5761 TUNER DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -11163,7 -11167,8 +11182,8 @@@ S: Odd fixe F: drivers/media/tuners/tea5761.*
TEA5767 TUNER DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -11550,7 -11555,8 +11570,8 @@@ F: include/linux/shmem_fs. F: mm/shmem.c
TM6000 VIDEO4LINUX DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git @@@ -11904,7 -11910,8 +11925,8 @@@ F: drivers/usb/common/usb-otg-fsm.
USB OVER IP DRIVER M: Valentina Manea valentina.manea.m@gmail.com - M: Shuah Khan shuah.kh@samsung.com + M: Shuah Khan shuahkh@osg.samsung.com + M: Shuah Khan shuah@kernel.org L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/usbip_protocol.txt @@@ -11975,6 -11982,7 +11997,7 @@@ L: linux-usb@vger.kernel.or W: http://www.linux-usb.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git S: Supported + F: Documentation/devicetree/bindings/usb/ F: Documentation/usb/ F: drivers/usb/ F: include/linux/usb.h @@@ -12148,6 -12156,7 +12171,7 @@@ VIRTIO CORE, NET AND BLOCK DRIVER M: "Michael S. Tsirkin" mst@redhat.com L: virtualization@lists.linux-foundation.org S: Maintained + F: Documentation/devicetree/bindings/virtio/ F: drivers/virtio/ F: tools/virtio/ F: drivers/net/virtio_net.c @@@ -12536,7 -12545,8 +12560,8 @@@ S: Maintaine F: arch/x86/entry/vdso/
XC2028/3028 TUNER DRIVER - M: Mauro Carvalho Chehab mchehab@osg.samsung.com + M: Mauro Carvalho Chehab mchehab@s-opensource.com + M: Mauro Carvalho Chehab mchehab@kernel.org L: linux-media@vger.kernel.org W: https://linuxtv.org T: git git://linuxtv.org/media_tree.git diff --combined arch/arm/boot/dts/dra7.dtsi index f8b39a5,3a8f397..de559f6 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@@ -1451,6 -1451,8 +1451,8 @@@ ti,hwmods = "gpmc"; reg = <0x50000000 0x37c>; /* device IO registers */ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; + dmas = <&edma_xbar 4 0>; + dma-names = "rxtx"; gpmc,num-cs = <8>; gpmc,num-waitpins = <2>; #address-cells = <2>; @@@ -1626,6 -1628,7 +1628,6 @@@ ale_entries = <1024>; bd_ram_size = <0x2000>; no_bd_ram = <0>; - rx_descs = <64>; mac_control = <0x20>; slaves = <2>; active_slave = <0>; @@@ -1660,7 -1663,7 +1662,7 @@@ status = "disabled";
davinci_mdio: mdio@48485000 { - compatible = "ti,davinci_mdio"; + compatible = "ti,cpsw-mdio","ti,davinci_mdio"; #address-cells = <1>; #size-cells = <0>; ti,hwmods = "davinci_mdio"; diff --combined drivers/net/can/dev.c index 7188137,ad535a8..e21f7cc --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@@ -69,7 -69,6 +69,7 @@@ EXPORT_SYMBOL_GPL(can_len2dlc)
#ifdef CONFIG_CAN_CALC_BITTIMING #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ +#define CAN_CALC_SYNC_SEG 1
/* * Bit-timing calculation derived from: @@@ -84,126 -83,98 +84,126 @@@ * registers of the CAN controller. You can find more information * in the header file linux/can/netlink.h. */ -static int can_update_spt(const struct can_bittiming_const *btc, - int sampl_pt, int tseg, int *tseg1, int *tseg2) +static int can_update_sample_point(const struct can_bittiming_const *btc, + unsigned int sample_point_nominal, unsigned int tseg, + unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, + unsigned int *sample_point_error_ptr) { - *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000; - if (*tseg2 < btc->tseg2_min) - *tseg2 = btc->tseg2_min; - if (*tseg2 > btc->tseg2_max) - *tseg2 = btc->tseg2_max; - *tseg1 = tseg - *tseg2; - if (*tseg1 > btc->tseg1_max) { - *tseg1 = btc->tseg1_max; - *tseg2 = tseg - *tseg1; + unsigned int sample_point_error, best_sample_point_error = UINT_MAX; + unsigned int sample_point, best_sample_point = 0; + unsigned int tseg1, tseg2; + int i; + + for (i = 0; i <= 1; i++) { + tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i; + tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); + tseg1 = tseg - tseg2; + if (tseg1 > btc->tseg1_max) { + tseg1 = btc->tseg1_max; + tseg2 = tseg - tseg1; + } + + sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG); + sample_point_error = abs(sample_point_nominal - sample_point); + + if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) { + best_sample_point = sample_point; + best_sample_point_error = sample_point_error; + *tseg1_ptr = tseg1; + *tseg2_ptr = tseg2; + } } - return 1000 * (tseg + 1 - *tseg2) / (tseg + 1); + + if (sample_point_error_ptr) + *sample_point_error_ptr = best_sample_point_error; + + return best_sample_point; }
static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { struct can_priv *priv = netdev_priv(dev); - long best_error = 1000000000, error = 0; - int best_tseg = 0, best_brp = 0, brp = 0; - int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0; - int spt_error = 1000, spt = 0, sampl_pt; - long rate; + unsigned int bitrate; /* current bitrate */ + unsigned int bitrate_error; /* difference between current and nominal value */ + unsigned int best_bitrate_error = UINT_MAX; + unsigned int sample_point_error; /* difference between current and nominal value */ + unsigned int best_sample_point_error = UINT_MAX; + unsigned int sample_point_nominal; /* nominal sample point */ + unsigned int best_tseg = 0; /* current best value for tseg */ + unsigned int best_brp = 0; /* current best value for brp */ + unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0; u64 v64;
/* Use CiA recommended sample points */ if (bt->sample_point) { - sampl_pt = bt->sample_point; + sample_point_nominal = bt->sample_point; } else { if (bt->bitrate > 800000) - sampl_pt = 750; + sample_point_nominal = 750; else if (bt->bitrate > 500000) - sampl_pt = 800; + sample_point_nominal = 800; else - sampl_pt = 875; + sample_point_nominal = 875; }
/* tseg even = round down, odd = round up */ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { - tsegall = 1 + tseg / 2; + tsegall = CAN_CALC_SYNC_SEG + tseg / 2; + /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; - /* chose brp step which is possible in system */ + + /* choose brp step which is possible in system */ brp = (brp / btc->brp_inc) * btc->brp_inc; if ((brp < btc->brp_min) || (brp > btc->brp_max)) continue; - rate = priv->clock.freq / (brp * tsegall); - error = bt->bitrate - rate; + + bitrate = priv->clock.freq / (brp * tsegall); + bitrate_error = abs(bt->bitrate - bitrate); + /* tseg brp biterror */ - if (error < 0) - error = -error; - if (error > best_error) + if (bitrate_error > best_bitrate_error) continue; - best_error = error; - if (error == 0) { - spt = can_update_spt(btc, sampl_pt, tseg / 2, - &tseg1, &tseg2); - error = sampl_pt - spt; - if (error < 0) - error = -error; - if (error > spt_error) - continue; - spt_error = error; - } + + /* reset sample point error if we have a better bitrate */ + if (bitrate_error < best_bitrate_error) + best_sample_point_error = UINT_MAX; + + can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); + if (sample_point_error > best_sample_point_error) + continue; + + best_sample_point_error = sample_point_error; + best_bitrate_error = bitrate_error; best_tseg = tseg / 2; best_brp = brp; - if (error == 0) + + if (bitrate_error == 0 && sample_point_error == 0) break; }
- if (best_error) { + if (best_bitrate_error) { /* Error in one-tenth of a percent */ - error = (best_error * 1000) / bt->bitrate; - if (error > CAN_CALC_MAX_ERROR) { + v64 = (u64)best_bitrate_error * 1000; + do_div(v64, bt->bitrate); + bitrate_error = (u32)v64; + if (bitrate_error > CAN_CALC_MAX_ERROR) { netdev_err(dev, - "bitrate error %ld.%ld%% too high\n", - error / 10, error % 10); + "bitrate error %d.%d%% too high\n", + bitrate_error / 10, bitrate_error % 10); return -EDOM; - } else { - netdev_warn(dev, "bitrate error %ld.%ld%%\n", - error / 10, error % 10); } + netdev_warn(dev, "bitrate error %d.%d%%\n", + bitrate_error / 10, bitrate_error % 10); }
/* real sample point */ - bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg, - &tseg1, &tseg2); + bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg, + &tseg1, &tseg2, NULL);
- v64 = (u64)best_brp * 1000000000UL; + v64 = (u64)best_brp * 1000 * 1000 * 1000; do_div(v64, priv->clock.freq); bt->tq = (u32)v64; bt->prop_seg = tseg1 / 2; @@@ -211,9 -182,9 +211,9 @@@ bt->phase_seg2 = tseg2;
/* check for sjw user settings */ - if (!bt->sjw || !btc->sjw_max) + if (!bt->sjw || !btc->sjw_max) { bt->sjw = 1; - else { + } else { /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */ if (bt->sjw > btc->sjw_max) bt->sjw = btc->sjw_max; @@@ -223,9 -194,8 +223,9 @@@ }
bt->brp = best_brp; - /* real bit-rate */ - bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1)); + + /* real bitrate */ + bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
return 0; } @@@ -828,6 -798,9 +828,9 @@@ static int can_validate(struct nlattr * * - control mode with CAN_CTRLMODE_FD set */
+ if (!data) + return 0; + if (data[IFLA_CAN_CTRLMODE]) { struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
@@@ -1038,6 -1011,11 +1041,11 @@@ static int can_newlink(struct net *src_ return -EOPNOTSUPP; }
+ static void can_dellink(struct net_device *dev, struct list_head *head) + { + return; + } + static struct rtnl_link_ops can_link_ops __read_mostly = { .kind = "can", .maxtype = IFLA_CAN_MAX, @@@ -1046,6 -1024,7 +1054,7 @@@ .validate = can_validate, .newlink = can_newlink, .changelink = can_changelink, + .dellink = can_dellink, .get_size = can_get_size, .fill_info = can_fill_info, .get_xstats_size = can_get_xstats_size, diff --combined drivers/net/can/usb/gs_usb.c index 3607643,acb0c84..6f0cbc3 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@@ -1,7 -1,9 +1,9 @@@ - /* CAN driver for Geschwister Schneider USB/CAN devices. + /* CAN driver for Geschwister Schneider USB/CAN devices + * and bytewerk.org candleLight USB CAN interfaces. * - * Copyright (C) 2013 Geschwister Schneider Technologie-, + * Copyright (C) 2013-2016 Geschwister Schneider Technologie-, * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt). + * Copyright (C) 2016 Hubert Denkmair * * Many thanks to all socketcan devs! * @@@ -29,6 -31,9 +31,9 @@@ #define USB_GSUSB_1_VENDOR_ID 0x1d50 #define USB_GSUSB_1_PRODUCT_ID 0x606f
+ #define USB_CANDLELIGHT_VENDOR_ID 0x1209 + #define USB_CANDLELIGHT_PRODUCT_ID 0x2323 + #define GSUSB_ENDPOINT_IN 1 #define GSUSB_ENDPOINT_OUT 2
@@@ -39,9 -44,7 +44,9 @@@ enum gs_usb_breq GS_USB_BREQ_MODE, GS_USB_BREQ_BERR, GS_USB_BREQ_BT_CONST, - GS_USB_BREQ_DEVICE_CONFIG + GS_USB_BREQ_DEVICE_CONFIG, + GS_USB_BREQ_TIMESTAMP, + GS_USB_BREQ_IDENTIFY, };
enum gs_can_mode { @@@ -60,11 -63,6 +65,11 @@@ enum gs_can_state GS_CAN_STATE_SLEEPING };
+enum gs_can_identify_mode { + GS_CAN_IDENTIFY_OFF = 0, + GS_CAN_IDENTIFY_ON +}; + /* data types passed between host and device */ struct gs_host_config { u32 byte_order; @@@ -84,10 -82,10 +89,10 @@@ struct gs_device_config } __packed;
#define GS_CAN_MODE_NORMAL 0 -#define GS_CAN_MODE_LISTEN_ONLY (1<<0) -#define GS_CAN_MODE_LOOP_BACK (1<<1) -#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2) -#define GS_CAN_MODE_ONE_SHOT (1<<3) +#define GS_CAN_MODE_LISTEN_ONLY BIT(0) +#define GS_CAN_MODE_LOOP_BACK BIT(1) +#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) +#define GS_CAN_MODE_ONE_SHOT BIT(3)
struct gs_device_mode { u32 mode; @@@ -108,16 -106,10 +113,16 @@@ struct gs_device_bittiming u32 brp; } __packed;
-#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0) -#define GS_CAN_FEATURE_LOOP_BACK (1<<1) -#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2) -#define GS_CAN_FEATURE_ONE_SHOT (1<<3) +struct gs_identify_mode { + u32 mode; +} __packed; + +#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) +#define GS_CAN_FEATURE_LOOP_BACK BIT(1) +#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) +#define GS_CAN_FEATURE_ONE_SHOT BIT(3) +#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) +#define GS_CAN_FEATURE_IDENTIFY BIT(5)
struct gs_device_bt_const { u32 feature; @@@ -222,8 -214,7 +227,8 @@@ static void gs_free_tx_context(struct g
/* Get a tx context by id. */ -static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id) +static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, + unsigned int id) { unsigned long flags;
@@@ -466,8 -457,7 +471,8 @@@ static void gs_usb_xmit_callback(struc netif_wake_queue(netdev); }
-static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, + struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct net_device_stats *stats = &dev->netdev->stats; @@@ -673,8 -663,7 +678,8 @@@ static int gs_can_open(struct net_devic rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_MODE, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, dev->channel, 0, dm, @@@ -737,59 -726,7 +742,59 @@@ static const struct net_device_ops gs_u .ndo_change_mtu = can_change_mtu, };
-static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) +static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) +{ + struct gs_can *dev = netdev_priv(netdev); + struct gs_identify_mode imode; + int rc; + + if (do_identify) + imode.mode = GS_CAN_IDENTIFY_ON; + else + imode.mode = GS_CAN_IDENTIFY_OFF; + + rc = usb_control_msg(interface_to_usbdev(dev->iface), + usb_sndctrlpipe(interface_to_usbdev(dev->iface), + 0), + GS_USB_BREQ_IDENTIFY, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + dev->channel, + 0, + &imode, + sizeof(imode), + 100); + + return (rc > 0) ? 0 : rc; +} + +/* blink LED's for finding the this interface */ +static int gs_usb_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + int rc = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON); + break; + case ETHTOOL_ID_INACTIVE: + rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF); + break; + default: + break; + } + + return rc; +} + +static const struct ethtool_ops gs_usb_ethtool_ops = { + .set_phys_id = gs_usb_set_phys_id, +}; + +static struct gs_can *gs_make_candev(unsigned int channel, + struct usb_interface *intf, + struct gs_device_config *dconf) { struct gs_can *dev; struct net_device *netdev; @@@ -877,14 -814,10 +882,14 @@@ if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT) dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- kfree(bt_const); - SET_NETDEV_DEV(netdev, &intf->dev);
+ if (dconf->sw_version > 1) + if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY) + netdev->ethtool_ops = &gs_usb_ethtool_ops; + + kfree(bt_const); + rc = register_candev(dev->netdev); if (rc) { free_candev(dev->netdev); @@@ -902,16 -835,19 +907,16 @@@ static void gs_destroy_candev(struct gs free_candev(dev->netdev); }
-static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) +static int gs_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) { struct gs_usb *dev; int rc = -ENOMEM; unsigned int icount, i; - struct gs_host_config *hconf; - struct gs_device_config *dconf; - - hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); - if (!hconf) - return -ENOMEM; - - hconf->byte_order = 0x0000beef; + struct gs_host_config hconf = { + .byte_order = 0x0000beef, + }; + struct gs_device_config dconf;
/* send host config */ rc = usb_control_msg(interface_to_usbdev(intf), @@@ -920,16 -856,22 +925,16 @@@ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - hconf, - sizeof(*hconf), + &hconf, + sizeof(hconf), 1000);
- kfree(hconf); - if (rc < 0) { dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; }
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); - if (!dconf) - return -ENOMEM; - /* read device config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), @@@ -937,16 -879,22 +942,16 @@@ USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - dconf, - sizeof(*dconf), + &dconf, + sizeof(dconf), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); - - kfree(dconf); - return rc; }
- icount = dconf->icount+1; - - kfree(dconf); - + icount = dconf.icount + 1; dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
if (icount > GS_MAX_INTF) { @@@ -967,7 -915,7 +972,7 @@@ dev->udev = interface_to_usbdev(intf);
for (i = 0; i < icount; i++) { - dev->canch[i] = gs_make_candev(i, intf); + dev->canch[i] = gs_make_candev(i, intf, &dconf); if (IS_ERR_OR_NULL(dev->canch[i])) { /* save error code to return later */ rc = PTR_ERR(dev->canch[i]); @@@ -1009,6 -957,8 +1014,8 @@@ static void gs_usb_disconnect(struct us static const struct usb_device_id gs_usb_table[] = { { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID, + USB_CANDLELIGHT_PRODUCT_ID, 0) }, {} /* Terminating entry */ };
@@@ -1026,5 -976,6 +1033,6 @@@ module_usb_driver(gs_usb_driver) MODULE_AUTHOR("Maximilian Schneider mws@schneidersoft.net"); MODULE_DESCRIPTION( "Socket CAN device driver for Geschwister Schneider Technologie-, " - "Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces."); + "Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n" + "and bytewerk.org candleLight USB CAN interfaces."); MODULE_LICENSE("GPL v2"); diff --combined drivers/net/ethernet/broadcom/bgmac.c index e6e74ca,a6333d3..b045dc0 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@@ -246,8 -246,6 +246,8 @@@ err_dma_head
err_drop: dev_kfree_skb(skb); + net_dev->stats.tx_dropped++; + net_dev->stats.tx_errors++; return NETDEV_TX_OK; }
@@@ -269,15 -267,16 +269,16 @@@ static void bgmac_dma_tx_free(struct bg while (ring->start != ring->end) { int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[slot_idx]; - u32 ctl1; + u32 ctl0, ctl1; int len;
if (slot_idx == empty_slot) break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0); ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); len = ctl1 & BGMAC_DESC_CTL1_LEN; - if (ctl1 & BGMAC_DESC_CTL0_SOF) + if (ctl0 & BGMAC_DESC_CTL0_SOF) /* Unmap no longer used buffer */ dma_unmap_single(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); @@@ -286,8 -285,6 +287,8 @@@ DMA_TO_DEVICE);
if (slot->skb) { + bgmac->net_dev->stats.tx_bytes += slot->skb->len; + bgmac->net_dev->stats.tx_packets++; bytes_compl += slot->skb->len; pkts_compl++;
@@@ -468,7 -465,6 +469,7 @@@ static int bgmac_dma_rx_read(struct bgm bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; }
@@@ -476,8 -472,6 +477,8 @@@ bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_length_errors++; + bgmac->net_dev->stats.rx_errors++; break; }
@@@ -488,7 -482,6 +489,7 @@@ if (unlikely(!skb)) { bgmac_err(bgmac, "build_skb failed\n"); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; } skb_put(skb, BGMAC_RX_FRAME_OFFSET + @@@ -498,8 -491,6 +499,8 @@@
skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); + bgmac->net_dev->stats.rx_bytes += len; + bgmac->net_dev->stats.rx_packets++; napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); @@@ -1320,9 -1311,10 +1321,10 @@@ static int bgmac_open(struct net_devic } napi_enable(&bgmac->napi);
- phy_start(bgmac->phy_dev); + phy_start(net_dev->phydev);
- netif_carrier_on(net_dev); + netif_start_queue(net_dev); + return 0; }
@@@ -1332,7 -1324,7 +1334,7 @@@ static int bgmac_stop(struct net_devic
netif_carrier_off(net_dev);
- phy_stop(bgmac->phy_dev); + phy_stop(net_dev->phydev);
napi_disable(&bgmac->napi); bgmac_chip_intrs_off(bgmac); @@@ -1370,10 -1362,12 +1372,10 @@@ static int bgmac_set_mac_address(struc
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { - struct bgmac *bgmac = netdev_priv(net_dev); - if (!netif_running(net_dev)) return -EINVAL;
- return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd); + return phy_mii_ioctl(net_dev->phydev, ifr, cmd); }
static const struct net_device_ops bgmac_netdev_ops = { @@@ -1390,125 -1384,20 +1392,125 @@@ * ethtool_ops **************************************************/
-static int bgmac_get_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) +struct bgmac_stat { + u8 size; + u32 offset; + const char *name; +}; + +static struct bgmac_stat bgmac_get_strings_stats[] = { + { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" }, + { 4, BGMAC_TX_GOOD_PKTS, "tx_good" }, + { 8, BGMAC_TX_OCTETS, "tx_octets" }, + { 4, BGMAC_TX_PKTS, "tx_pkts" }, + { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" }, + { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" }, + { 4, BGMAC_TX_LEN_64, "tx_64" }, + { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" }, + { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" }, + { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" }, + { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" }, + { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" }, + { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" }, + { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" }, + { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" }, + { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" }, + { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" }, + { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" }, + { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" }, + { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" }, + { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" }, + { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" }, + { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" }, + { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" }, + { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" }, + { 4, BGMAC_TX_DEFERED, "tx_defered" }, + { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" }, + { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" }, + { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" }, + { 4, BGMAC_TX_Q0_PKTS, "tx_q0" }, + { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" }, + { 4, BGMAC_TX_Q1_PKTS, "tx_q1" }, + { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" }, + { 4, BGMAC_TX_Q2_PKTS, "tx_q2" }, + { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" }, + { 4, BGMAC_TX_Q3_PKTS, "tx_q3" }, + { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" }, + { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" }, + { 4, BGMAC_RX_GOOD_PKTS, "rx_good" }, + { 8, BGMAC_RX_OCTETS, "rx_octets" }, + { 4, BGMAC_RX_PKTS, "rx_pkts" }, + { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" }, + { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" }, + { 4, BGMAC_RX_LEN_64, "rx_64" }, + { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" }, + { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" }, + { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" }, + { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" }, + { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" }, + { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" }, + { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" }, + { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" }, + { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" }, + { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" }, + { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" }, + { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" }, + { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" }, + { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" }, + { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" }, + { 4, BGMAC_RX_CRC_ERRS, "rx_crc" }, + { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" }, + { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" }, + { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" }, + { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" }, + { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" }, + { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" }, +}; + +#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats) + +static int bgmac_get_sset_count(struct net_device *dev, int string_set) { - struct bgmac *bgmac = netdev_priv(net_dev); + switch (string_set) { + case ETH_SS_STATS: + return BGMAC_STATS_LEN; + }
- return phy_ethtool_gset(bgmac->phy_dev, cmd); + return -EOPNOTSUPP; }
-static int bgmac_set_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) +static void bgmac_get_strings(struct net_device *dev, u32 stringset, + u8 *data) { - struct bgmac *bgmac = netdev_priv(net_dev); + int i;
- return phy_ethtool_sset(bgmac->phy_dev, cmd); + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < BGMAC_STATS_LEN; i++) + strlcpy(data + i * ETH_GSTRING_LEN, + bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN); +} + +static void bgmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *ss, uint64_t *data) +{ + struct bgmac *bgmac = netdev_priv(dev); + const struct bgmac_stat *s; + unsigned int i; + u64 val; + + if (!netif_running(dev)) + return; + + for (i = 0; i < BGMAC_STATS_LEN; i++) { + s = &bgmac_get_strings_stats[i]; + val = 0; + if (s->size == 8) + val = (u64)bgmac_read(bgmac, s->offset + 4) << 32; + val |= bgmac_read(bgmac, s->offset); + data[i] = val; + } }
static void bgmac_get_drvinfo(struct net_device *net_dev, @@@ -1519,12 -1408,9 +1521,12 @@@ }
static const struct ethtool_ops bgmac_ethtool_ops = { - .get_settings = bgmac_get_settings, - .set_settings = bgmac_set_settings, + .get_strings = bgmac_get_strings, + .get_sset_count = bgmac_get_sset_count, + .get_ethtool_stats = bgmac_get_ethtool_stats, .get_drvinfo = bgmac_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, };
/************************************************** @@@ -1545,7 -1431,7 +1547,7 @@@ static int bgmac_mii_write(struct mii_b static void bgmac_adjust_link(struct net_device *net_dev) { struct bgmac *bgmac = netdev_priv(net_dev); - struct phy_device *phy_dev = bgmac->phy_dev; + struct phy_device *phy_dev = net_dev->phydev; bool update = false;
if (phy_dev->link) { @@@ -1589,6 -1475,8 +1591,6 @@@ static int bgmac_fixed_phy_register(str return err; }
- bgmac->phy_dev = phy_dev; - return err; }
@@@ -1633,6 -1521,7 +1635,6 @@@ static int bgmac_mii_register(struct bg err = PTR_ERR(phy_dev); goto err_unregister_bus; } - bgmac->phy_dev = phy_dev;
return err;
@@@ -1701,7 -1590,6 +1703,7 @@@ static int bgmac_probe(struct bcma_devi bgmac->net_dev = net_dev; bgmac->core = core; bcma_set_drvdata(core, bgmac); + SET_NETDEV_DEV(net_dev, &core->dev);
/* Defaults */ memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); diff --combined drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d42083a,0c0dfd6..6083775 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@@ -67,17 -67,6 +67,17 @@@ int mlx4_en_setup_tc(struct net_device offset += priv->num_tx_rings_p_up; }
+#ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (up) { + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + priv->cee_params.dcb_cfg.pfc_state = false; + } + } +#endif /* CONFIG_MLX4_EN_DCB */ + return 0; }
@@@ -417,14 -406,18 +417,18 @@@ static int mlx4_en_vlan_rx_add_vid(stru mutex_lock(&mdev->state_lock); if (mdev->device_up && priv->port_up) { err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); - if (err) + if (err) { en_err(priv, "Failed configuring VLAN filter\n"); + goto out; + } } - if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) - en_dbg(HW, priv, "failed adding vlan %d\n", vid); - mutex_unlock(&mdev->state_lock); + err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx); + if (err) + en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
- return 0; + out: + mutex_unlock(&mdev->state_lock); + return err; }
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, @@@ -432,7 -425,7 +436,7 @@@ { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - int err; + int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@@ -449,7 -442,7 +453,7 @@@ } mutex_unlock(&mdev->state_lock);
- return 0; + return err; }
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) @@@ -1208,8 -1201,8 +1212,8 @@@ static void mlx4_en_netpoll(struct net_ struct mlx4_en_cq *cq; int i;
- for (i = 0; i < priv->rx_ring_num; i++) { - cq = priv->rx_cq[i]; + for (i = 0; i < priv->tx_ring_num; i++) { + cq = priv->tx_cq[i]; napi_schedule(&cq->napi); } } @@@ -1703,9 -1696,10 +1707,9 @@@ int mlx4_en_start_port(struct net_devic /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task);
-#ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) - vxlan_get_rx_port(dev); -#endif + udp_tunnel_get_rx_info(dev); + priv->port_up = true; netif_tx_start_all_queues(dev); netif_device_attach(dev); @@@ -2042,11 -2036,20 +2046,20 @@@ err return -ENOMEM; }
+ static void mlx4_en_shutdown(struct net_device *dev) + { + rtnl_lock(); + netif_device_detach(dev); + mlx4_en_close(dev); + rtnl_unlock(); + }
void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + bool shutdown = mdev->dev->persist->interface_state & + MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@@ -2054,7 -2057,10 +2067,10 @@@ if (priv->registered) { devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, priv->port)); - unregister_netdev(dev); + if (shutdown) + mlx4_en_shutdown(dev); + else + unregister_netdev(dev); }
if (priv->allocated) @@@ -2079,7 -2085,8 +2095,8 @@@ kfree(priv->tx_ring); kfree(priv->tx_cq);
- free_netdev(dev); + if (!shutdown) + free_netdev(dev); }
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) @@@ -2352,6 -2359,7 +2369,6 @@@ static int mlx4_en_get_phys_port_id(str return 0; }
-#ifdef CONFIG_MLX4_EN_VXLAN static void mlx4_en_add_vxlan_offloads(struct work_struct *work) { int ret; @@@ -2401,19 -2409,15 +2418,19 @@@ static void mlx4_en_del_vxlan_offloads( }
static void mlx4_en_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + if (ti->sa_family != AF_INET) return;
- if (sa_family == AF_INET6) + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return;
current_port = priv->vxlan_port; @@@ -2428,19 -2432,15 +2445,19 @@@ }
static void mlx4_en_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return;
- if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return;
current_port = priv->vxlan_port; @@@ -2464,12 -2464,18 +2481,17 @@@ static netdev_features_t mlx4_en_featur * strip that feature if this is an IPv6 encapsulated frame. */ if (skb->encapsulation && - (skb->ip_summed == CHECKSUM_PARTIAL) && - (ip_hdr(skb)->version != 4)) - features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + (skb->ip_summed == CHECKSUM_PARTIAL)) { + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (!priv->vxlan_port || + (ip_hdr(skb)->version != 4) || + (udp_hdr(skb)->dest != priv->vxlan_port)) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + }
return features; } -#endif
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) { @@@ -2522,9 -2528,11 +2544,9 @@@ static const struct net_device_ops mlx4 .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, -#ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, -#endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, };
@@@ -2558,9 -2566,11 +2580,9 @@@ static const struct net_device_ops mlx4 .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, -#ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, -#endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, };
@@@ -2826,9 -2836,6 +2848,9 @@@ int mlx4_en_init_netdev(struct mlx4_en_ struct mlx4_en_priv *priv; int i; int err; +#ifdef CONFIG_MLX4_EN_DCB + struct tc_configuration *tc; +#endif
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), MAX_TX_RINGS, MAX_RX_RINGS); @@@ -2854,8 -2861,10 +2876,8 @@@ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); -#ifdef CONFIG_MLX4_EN_VXLAN INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); -#endif #ifdef CONFIG_RFS_ACCEL INIT_LIST_HEAD(&priv->filters); spin_lock_init(&priv->filters_lock); @@@ -2895,17 -2904,6 +2917,17 @@@ priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; + priv->flags |= MLX4_EN_DCB_ENABLED; + priv->cee_params.dcb_cfg.pfc_state = false; + + for (i = 0; i < MLX4_EN_NUM_UP; i++) { + tc = &priv->cee_params.dcb_cfg.tc_config[i]; + tc->dcb_pfc = pfc_disabled; + } + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { dev->dcbnl_ops = &mlx4_en_dcbnl_ops; } else { diff --combined drivers/net/ethernet/mellanox/mlx4/main.c index 3564aad,546fab0..b673a5f --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@@ -292,7 -292,6 +292,7 @@@ static int _mlx4_dev_port(struct mlx4_d dev->caps.pkey_table_len[port] = port_cap->max_pkeys; dev->caps.port_width_cap[port] = port_cap->max_port_width; dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; + dev->caps.max_tc_eth = port_cap->max_tc_eth; dev->caps.def_mac[port] = port_cap->def_mac; dev->caps.supported_type[port] = port_cap->supported_port_types; dev->caps.suggested_type[port] = port_cap->suggested_type; @@@ -3223,6 -3222,7 +3223,7 @@@ static int mlx4_load_one(struct pci_de
INIT_LIST_HEAD(&priv->pgdir_list); mutex_init(&priv->pgdir_mutex); + spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list); mutex_init(&priv->bf_mutex); @@@ -4135,8 -4135,11 +4136,11 @@@ static void mlx4_shutdown(struct pci_de
mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); - if (persist->interface_state & MLX4_INTERFACE_STATE_UP) + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) { + /* Notify mlx4 clients that the kernel is being shut down */ + persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN; mlx4_unload_one(pdev); + } mutex_unlock(&persist->interface_state_mutex); }
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h index da885c0,baa991a..b97511b --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@@ -79,7 -79,6 +79,7 @@@
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 @@@ -89,7 -88,6 +89,7 @@@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) +#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_BF_BUDGET 16 @@@ -145,32 -143,11 +145,32 @@@ struct mlx5e_umr_wqe struct mlx5_wqe_data_seg data; };
+static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { + "rx_cqe_moder", +}; + +enum mlx5e_priv_flag { + MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), +}; + +#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \ + do { \ + if (enable) \ + priv->pflags |= pflag; \ + else \ + priv->pflags &= ~pflag; \ + } while (0) + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ #endif
+struct mlx5e_cq_moder { + u16 usec; + u16 pkts; +}; + struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@@ -179,11 -156,12 +179,11 @@@ u8 log_rq_size; u16 num_channels; u8 num_tc; + u8 rx_cq_period_mode; bool rx_cqe_compress_admin; bool rx_cqe_compress; - u16 rx_cq_moderation_usec; - u16 rx_cq_moderation_pkts; - u16 tx_cq_moderation_usec; - u16 tx_cq_moderation_pkts; + struct mlx5e_cq_moder rx_cq_moderation; + struct mlx5e_cq_moder tx_cq_moderation; u16 min_rx_wqes; bool lro_en; u32 lro_wqe_sz; @@@ -195,7 -173,6 +195,7 @@@ #ifdef CONFIG_MLX5_CORE_EN_DCB struct ieee_ets ets; #endif + bool rx_am_enabled; };
struct mlx5e_tstamp { @@@ -214,7 -191,6 +214,7 @@@ enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, + MLX5E_RQ_STATE_AM, };
struct mlx5e_cq { @@@ -222,7 -198,6 +222,7 @@@ struct mlx5_cqwq wq;
/* data path - accessed per napi poll */ + u16 event_ctr; struct napi_struct *napi; struct mlx5_core_cq mcq; struct mlx5e_channel *channel; @@@ -250,30 -225,6 +250,30 @@@ struct mlx5e_dma_info dma_addr_t addr; };
+struct mlx5e_rx_am_stats { + int ppms; /* packets per msec */ + int epms; /* events per msec */ +}; + +struct mlx5e_rx_am_sample { + ktime_t time; + unsigned int pkt_ctr; + u16 event_ctr; +}; + +struct mlx5e_rx_am { /* Adaptive Moderation */ + u8 state; + struct mlx5e_rx_am_stats prev_stats; + struct mlx5e_rx_am_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@@ -294,8 -245,6 +294,8 @@@ unsigned long state; int ix;
+ struct mlx5e_rx_am am; /* Adaptive Moderation */ + /* control */ struct mlx5_wq_ctrl wq_ctrl; u8 wq_type; @@@ -405,7 -354,6 +405,7 @@@ struct mlx5e_sq struct mlx5e_channel *channel; int tc; struct mlx5e_ico_wqe_info *ico_wqe_info; + u32 rate_limit; } ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) @@@ -453,7 -401,7 +453,7 @@@ enum mlx5e_traffic_types };
enum { - MLX5E_STATE_ASYNC_EVENTS_ENABLE, + MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, }; @@@ -582,7 -530,6 +582,7 @@@ struct mlx5e_priv u32 indir_rqtn; u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; + u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; @@@ -593,7 -540,6 +593,7 @@@ struct work_struct set_rx_mode_work; struct delayed_work update_stats_work;
+ u32 pflags; struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; @@@ -616,7 -562,6 +616,7 @@@ enum mlx5e_link_mode MLX5E_10GBASE_ER = 14, MLX5E_40GBASE_SR4 = 15, MLX5E_40GBASE_LR4 = 16, + MLX5E_50GBASE_SR2 = 18, MLX5E_100GBASE_CR4 = 20, MLX5E_100GBASE_SR4 = 21, MLX5E_100GBASE_KR4 = 22, @@@ -634,9 -579,6 +634,9 @@@
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+ +void mlx5e_build_ptys2ethtool_map(void); + void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); @@@ -670,10 -612,6 +670,10 @@@ void mlx5e_free_rx_fragmented_mpwqe(str struct mlx5e_mpw_info *wi); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+void mlx5e_rx_am(struct mlx5e_rq *rq); +void mlx5e_rx_am_work(struct work_struct *work); +struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode); + void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv); @@@ -709,9 -647,6 +709,9 @@@ void mlx5e_build_default_indir_rqt(stru int num_channels); int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, + u8 cq_period_mode); + static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 39a4d96,e667a87..b29684d --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@@ -48,85 -48,123 +48,85 @@@ static void mlx5e_get_drvinfo(struct ne sizeof(drvinfo->bus_info)); }
-static const struct { - u32 supported; - u32 advertised; +struct ptys2ethtool_config { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); u32 speed; -} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = { - [MLX5E_1000BASE_CX_SGMII] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_1000BASE_KX] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_CX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_20GBASE_KR2] = { - .supported = SUPPORTED_20000baseKR2_Full, - .advertised = ADVERTISED_20000baseKR2_Full, - .speed = 20000, - }, - [MLX5E_40GBASE_CR4] = { - .supported = SUPPORTED_40000baseCR4_Full, - .advertised = ADVERTISED_40000baseCR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_KR4] = { - .supported = SUPPORTED_40000baseKR4_Full, - .advertised = ADVERTISED_40000baseKR4_Full, - .speed = 40000, - }, - [MLX5E_56GBASE_R4] = { - .supported = SUPPORTED_56000baseKR4_Full, - .advertised = ADVERTISED_56000baseKR4_Full, - .speed = 56000, - }, - [MLX5E_10GBASE_CR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_SR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_ER] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_40GBASE_SR4] = { - .supported = SUPPORTED_40000baseSR4_Full, - .advertised = ADVERTISED_40000baseSR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_LR4] = { - .supported = SUPPORTED_40000baseLR4_Full, - .advertised = ADVERTISED_40000baseLR4_Full, - .speed = 40000, - }, - [MLX5E_100GBASE_CR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_SR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_KR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_LR4] = { - .speed = 100000, - }, - [MLX5E_100BASE_TX] = { - .speed = 100, - }, - [MLX5E_1000BASE_T] = { - .supported = SUPPORTED_1000baseT_Full, - .advertised = ADVERTISED_1000baseT_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_T] = { - .supported = SUPPORTED_10000baseT_Full, - .advertised = ADVERTISED_10000baseT_Full, - .speed = 1000, - }, - [MLX5E_25GBASE_CR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_KR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_SR] = { - .speed = 25000, - }, - [MLX5E_50GBASE_CR2] = { - .speed = 50000, - }, - [MLX5E_50GBASE_KR2] = { - .speed = 50000, - }, };
+static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; + +#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ + ({ \ + struct ptys2ethtool_config *cfg; \ + const unsigned int modes[] = { __VA_ARGS__ }; \ + unsigned int i; \ + cfg = &ptys2ethtool_table[reg_]; \ + cfg->speed = speed_; \ + bitmap_zero(cfg->supported, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + bitmap_zero(cfg->advertised, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ + __set_bit(modes[i], cfg->supported); \ + __set_bit(modes[i], cfg->advertised); \ + } \ + }) + +void mlx5e_build_ptys2ethtool_map(void) +{ + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); +} + static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@@ -146,7 -184,9 +146,9 @@@ #define MLX5E_NUM_SQ_STATS(priv) \ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ test_bit(MLX5E_STATE_OPENED, &priv->state)) - #define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv)) + #define MLX5E_NUM_PFC_COUNTERS(priv) \ + (hweight8(mlx5e_query_pfc_combined(priv)) * \ + NUM_PPORT_PER_PRIO_PFC_COUNTERS)
static int mlx5e_get_sset_count(struct net_device *dev, int sset) { @@@ -160,8 -200,6 +162,8 @@@ MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(mlx5e_priv_flags); /* fallthrough */ default: return -EOPNOTSUPP; @@@ -175,42 -213,41 +177,41 @@@ static void mlx5e_fill_stats_strings(st
/* SW counters */ for (i = 0; i < NUM_SW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name); + strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
/* Q counters */ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name); + strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
/* VPORT counters */ for (i = 0; i < NUM_VPORT_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - vport_stats_desc[i].name); + vport_stats_desc[i].format);
/* PPORT counters */ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_802_3_stats_desc[i].name); + pport_802_3_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_2863_stats_desc[i].name); + pport_2863_stats_desc[i].format);
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_2819_stats_desc[i].name); + pport_2819_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", - prio, - pport_per_prio_traffic_stats_desc[i].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_traffic_stats_desc[i].format, prio); }
pfc_combined = mlx5e_query_pfc_combined(priv); for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", - prio, pport_per_prio_pfc_stats_desc[i].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_pfc_stats_desc[i].format, prio); } }
@@@ -220,28 -257,24 +221,27 @@@ /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, - rq_stats_desc[j].name); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->params.num_tc; tc++) for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + (idx++) * ETH_GSTRING_LEN, - "tx%d_%s", - priv->channeltc_to_txq_map[i][tc], - sq_stats_desc[j].name); + sq_stats_desc[j].format, + priv->channeltc_to_txq_map[i][tc]); }
static void mlx5e_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct mlx5e_priv *priv = netdev_priv(dev); + int i;
switch (stringset) { case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]); break;
case ETH_SS_TEST: @@@ -486,11 -519,10 +486,11 @@@ static int mlx5e_get_coalesce(struct ne if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) return -ENOTSUPP;
- coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; - coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; - coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; - coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; + coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; + coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec; + coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts; + coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
return 0; } @@@ -501,10 -533,6 +501,10 @@@ static int mlx5e_set_coalesce(struct ne struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channel *c; + bool restart = + !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled; + bool was_opened; + int err = 0; int tc; int i;
@@@ -512,19 -540,12 +512,19 @@@ return -ENOTSUPP;
mutex_lock(&priv->state_lock); - priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; - priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; - priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; - priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened && restart) { + mlx5e_close_locked(netdev); + priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; + } + + priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; + priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; + priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; + priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; + + if (!was_opened || restart) goto out;
for (i = 0; i < priv->params.num_channels; ++i) { @@@ -543,37 -564,35 +543,37 @@@ }
out: + if (was_opened && restart) + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); - return 0; + return err; }
-static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) +static void ptys2ethtool_supported_link(unsigned long *supported_modes, + u32 eth_proto_cap) { - int i; - u32 supported_modes = 0; + int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - supported_modes |= ptys2ethtool_table[i].supported; - } - return supported_modes; + for_each_set_bit(proto, (unsigned long *)ð_proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(supported_modes, supported_modes, + ptys2ethtool_table[proto].supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); }
-static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) +static void ptys2ethtool_adver_link(unsigned long *advertising_modes, + u32 eth_proto_cap) { - int i; - u32 advertising_modes = 0; + int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - advertising_modes |= ptys2ethtool_table[i].advertised; - } - return advertising_modes; + for_each_set_bit(proto, (unsigned long *)ð_proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(advertising_modes, advertising_modes, + ptys2ethtool_table[proto].advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS); }
-static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) +static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings, + u32 eth_proto_cap) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) @@@ -581,7 -600,7 +581,7 @@@ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { - return SUPPORTED_FIBRE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE); }
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) @@@ -589,8 -608,9 +589,8 @@@ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { - return SUPPORTED_Backplane; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane); } - return 0; }
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) @@@ -614,7 -634,7 +614,7 @@@
static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, - struct ethtool_cmd *cmd) + struct ethtool_link_ksettings *link_ksettings) { int i; u32 speed = SPEED_UNKNOWN; @@@ -631,32 -651,23 +631,32 @@@ } } out: - ethtool_cmd_speed_set(cmd, speed); - cmd->duplex = duplex; + link_ksettings->base.speed = speed; + link_ksettings->base.duplex = duplex; }
-static void get_supported(u32 eth_proto_cap, u32 *supported) +static void get_supported(u32 eth_proto_cap, + struct ethtool_link_ksettings *link_ksettings) { - *supported |= ptys2ethtool_supported_port(eth_proto_cap); - *supported |= ptys2ethtool_supported_link(eth_proto_cap); - *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + unsigned long *supported = link_ksettings->link_modes.supported; + + ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); + ptys2ethtool_supported_link(supported, eth_proto_cap); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause); }
static void get_advertising(u32 eth_proto_cap, u8 tx_pause, - u8 rx_pause, u32 *advertising) + u8 rx_pause, + struct ethtool_link_ksettings *link_ksettings) { - *advertising |= ptys2ethtool_adver_link(eth_proto_cap); - *advertising |= tx_pause ? ADVERTISED_Pause : 0; - *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; + unsigned long *advertising = link_ksettings->link_modes.advertising; + + ptys2ethtool_adver_link(advertising, eth_proto_cap); + if (tx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); + if (tx_pause ^ rx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); }
static u8 get_connector_port(u32 eth_proto) @@@ -684,16 -695,13 +684,16 @@@ return PORT_OTHER; }
-static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) +static void get_lp_advertising(u32 eth_proto_lp, + struct ethtool_link_ksettings *link_ksettings) { - *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); + unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; + + ptys2ethtool_adver_link(lp_advertising, eth_proto_lp); }
-static int mlx5e_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int mlx5e_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@@ -702,8 -710,6 +702,8 @@@ u32 eth_proto_admin; u32 eth_proto_lp; u32 eth_proto_oper; + u8 an_disable_admin; + u8 an_status; int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); @@@ -714,49 -720,35 +714,49 @@@ goto err_query_ptys; }
- eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); + an_status = MLX5_GET(ptys_reg, out, an_status);
- cmd->supported = 0; - cmd->advertising = 0; + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- get_supported(eth_proto_cap, &cmd->supported); - get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); - get_speed_duplex(netdev, eth_proto_oper, cmd); + get_supported(eth_proto_cap, link_ksettings); + get_advertising(eth_proto_admin, 0, 0, link_ksettings); + get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = get_connector_port(eth_proto_oper); - get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); + link_ksettings->base.port = get_connector_port(eth_proto_oper); + get_lp_advertising(eth_proto_lp, link_ksettings); + + if (an_status == MLX5_AN_COMPLETE) + ethtool_link_ksettings_add_link_mode(link_ksettings, + lp_advertising, Autoneg);
- cmd->transceiver = XCVR_INTERNAL; + link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE : + AUTONEG_ENABLE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Autoneg); + if (!an_disable_admin) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg);
err_query_ptys: return err; }
-static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) +static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) { u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (ptys2ethtool_table[i].advertised & link_modes) + if (bitmap_intersects(ptys2ethtool_table[i].advertised, + link_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= MLX5E_PROT_MASK(i); }
@@@ -775,25 -767,21 +775,25 @@@ static u32 mlx5e_ethtool2ptys_speed_lin return speed_links; }
-static int mlx5e_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int mlx5e_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + u32 eth_proto_cap, eth_proto_admin; + bool an_changes = false; + u8 an_disable_admin; + u8 an_disable_cap; + bool an_disable; u32 link_modes; + u8 an_status; u32 speed; - u32 eth_proto_cap, eth_proto_admin; - enum mlx5_port_status ps; int err;
- speed = ethtool_cmd_speed(cmd); + speed = link_ksettings->base.speed;
- link_modes = cmd->autoneg == AUTONEG_ENABLE ? - mlx5e_ethtool2ptys_adver_link(cmd->advertising) : + link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? + mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : mlx5e_ethtool2ptys_speed_link(speed);
err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); @@@ -818,18 -806,15 +818,18 @@@ goto out; }
- if (link_modes == eth_proto_admin) + mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status, + &an_disable_cap, &an_disable_admin); + + an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; + an_changes = ((!an_disable && an_disable_admin) || + (an_disable && !an_disable_admin)); + + if (!an_changes && link_modes == eth_proto_admin) goto out;
- mlx5_query_port_admin_status(mdev, &ps); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN); + mlx5_toggle_port_link(mdev);
out: return err; @@@ -1287,87 -1272,6 +1287,87 @@@ static int mlx5e_get_module_eeprom(stru return 0; }
+typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); + +static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + bool rx_mode_changed; + u8 rx_cq_period_mode; + int err = 0; + bool reset; + + rx_cq_period_mode = enable ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode; + + if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && + !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) + return -ENOTSUPP; + + if (!rx_mode_changed) + return 0; + + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (reset) + mlx5e_close_locked(netdev); + + mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode); + + if (reset) + err = mlx5e_open_locked(netdev); + + return err; +} + +static int mlx5e_handle_pflag(struct net_device *netdev, + u32 wanted_flags, + enum mlx5e_priv_flag flag, + mlx5e_pflag_handler pflag_handler) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + bool enable = !!(wanted_flags & flag); + u32 changes = wanted_flags ^ priv->pflags; + int err; + + if (!(changes & flag)) + return 0; + + err = pflag_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s private flag 0x%x failed err %d\n", + enable ? "Enable" : "Disable", flag, err); + return err; + } + + MLX5E_SET_PRIV_FLAG(priv, flag, enable); + return 0; +} + +static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_CQE_BASED_MODER, + set_pflag_rx_cqe_based_moder); + + mutex_unlock(&priv->state_lock); + return err ? -EINVAL : 0; +} + +static u32 mlx5e_get_priv_flags(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return priv->pflags; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@@ -1380,8 -1284,8 +1380,8 @@@ .set_channels = mlx5e_set_channels, .get_coalesce = mlx5e_get_coalesce, .set_coalesce = mlx5e_set_coalesce, - .get_settings = mlx5e_get_settings, - .set_settings = mlx5e_set_settings, + .get_link_ksettings = mlx5e_get_link_ksettings, + .set_link_ksettings = mlx5e_set_link_ksettings, .get_rxfh_key_size = mlx5e_get_rxfh_key_size, .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, @@@ -1397,6 -1301,4 +1397,6 @@@ .set_wol = mlx5e_set_wol, .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, + .get_priv_flags = mlx5e_get_priv_flags, + .set_priv_flags = mlx5e_set_priv_flags }; diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 02a0f17,cb6defd..a64ce5d --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@@ -40,9 -40,8 +40,9 @@@ #include "vxlan.h"
struct mlx5e_rq_param { - u32 rqc[MLX5_ST_SZ_DW(rqc)]; - struct mlx5_wq_param wq; + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; + bool am_enabled; };
struct mlx5e_sq_param { @@@ -56,7 -55,6 +56,7 @@@ struct mlx5e_cq_param u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; u16 eq_ix; + u8 cq_period_mode; };
struct mlx5e_channel_param { @@@ -107,11 -105,11 +107,11 @@@ static void mlx5e_update_sw_counters(st
s->rx_packets += rq_stats->packets; s->rx_bytes += rq_stats->bytes; - s->lro_packets += rq_stats->lro_packets; - s->lro_bytes += rq_stats->lro_bytes; + s->rx_lro_packets += rq_stats->lro_packets; + s->rx_lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; - s->rx_csum_sw += rq_stats->csum_sw; - s->rx_csum_inner += rq_stats->csum_inner; + s->rx_csum_complete += rq_stats->csum_complete; + s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_wqe_err += rq_stats->wqe_err; s->rx_mpwqe_filler += rq_stats->mpwqe_filler; s->rx_mpwqe_frag += rq_stats->mpwqe_frag; @@@ -124,24 -122,23 +124,23 @@@
s->tx_packets += sq_stats->packets; s->tx_bytes += sq_stats->bytes; - s->tso_packets += sq_stats->tso_packets; - s->tso_bytes += sq_stats->tso_bytes; - s->tso_inner_packets += sq_stats->tso_inner_packets; - s->tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_tso_packets += sq_stats->tso_packets; + s->tx_tso_bytes += sq_stats->tso_bytes; + s->tx_tso_inner_packets += sq_stats->tso_inner_packets; + s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; - s->tx_csum_inner += sq_stats->csum_offload_inner; - tx_offload_none += sq_stats->csum_offload_none; + s->tx_csum_partial_inner += sq_stats->csum_partial_inner; + tx_offload_none += sq_stats->csum_none; } }
/* Update calculated offload counters */ - s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; - s->rx_csum_good = s->rx_packets - s->rx_csum_none - - s->rx_csum_sw; + s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner; + s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
- s->link_down_events = MLX5_GET(ppcnt_reg, + s->link_down_events_phy = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, counter_set.phys_layer_cntrs.link_down_events); } @@@ -246,7 -243,7 +245,7 @@@ static void mlx5e_async_event(struct ml { struct mlx5e_priv *priv = vpriv;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) + if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) return;
switch (event) { @@@ -262,12 -259,12 +261,12 @@@
static void mlx5e_enable_async_events(struct mlx5e_priv *priv) { - set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); }
static void mlx5e_disable_async_events(struct mlx5e_priv *priv) { - clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); + clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); }
@@@ -338,9 -335,6 +337,9 @@@ static int mlx5e_create_rq(struct mlx5e wqe->data.byte_count = cpu_to_be32(byte_count); }
+ INIT_WORK(&rq->am.work, mlx5e_rx_am_work); + rq->am.mode = priv->params.rx_cq_period_mode; + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@@ -513,9 -507,6 +512,9 @@@ static int mlx5e_open_rq(struct mlx5e_c if (err) goto err_disable_rq;
+ if (param->am_enabled) + set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; @@@ -544,8 -535,6 +543,8 @@@ static void mlx5e_close_rq(struct mlx5e /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ napi_synchronize(&rq->channel->napi);
+ cancel_work_sync(&rq->am.work); + mlx5e_disable_rq(rq); mlx5e_destroy_rq(rq); } @@@ -590,7 -579,7 +589,7 @@@ static int mlx5e_create_sq(struct mlx5e void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, true); + err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf)); if (err) return err;
@@@ -712,8 -701,7 +711,8 @@@ static int mlx5e_enable_sq(struct mlx5e return err; }
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) +static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, + int next_state, bool update_rl, int rl_index) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; @@@ -733,10 -721,6 +732,10 @@@
MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); + if (update_rl && next_state == MLX5_SQC_STATE_RDY) { + MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); + }
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
@@@ -752,8 -736,6 +751,8 @@@ static void mlx5e_disable_sq(struct mlx struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_sq(mdev, sq->sqn); + if (sq->rate_limit) + mlx5_rl_remove_rate(mdev, sq->rate_limit); }
static int mlx5e_open_sq(struct mlx5e_channel *c, @@@ -771,8 -753,7 +770,8 @@@ if (err) goto err_destroy_sq;
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + false, 0); if (err) goto err_disable_sq;
@@@ -811,8 -792,7 +810,8 @@@ static void mlx5e_close_sq(struct mlx5e if (mlx5e_sq_has_room_for(sq, 1)) mlx5e_send_nop(sq, true);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR, + false, 0); }
while (sq->cc != sq->pc) /* wait till sq is empty */ @@@ -906,7 -886,6 +905,7 @@@ static int mlx5e_enable_cq(struct mlx5e
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@@ -936,7 -915,8 +935,7 @@@ static void mlx5e_disable_cq(struct mlx static int mlx5e_open_cq(struct mlx5e_channel *c, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, - u16 moderation_usecs, - u16 moderation_frames) + struct mlx5e_cq_moder moderation) { int err; struct mlx5e_priv *priv = c->priv; @@@ -952,8 -932,8 +951,8 @@@
if (MLX5_CAP_GEN(mdev, cq_moderation)) mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); + moderation.usec, + moderation.pkts); return 0;
err_destroy_cq: @@@ -982,7 -962,8 +981,7 @@@ static int mlx5e_open_tx_cqs(struct mlx
for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, - priv->params.tx_cq_moderation_usec, - priv->params.tx_cq_moderation_pkts); + priv->params.tx_cq_moderation); if (err) goto err_close_tx_cqs; } @@@ -1042,91 -1023,14 +1041,91 @@@ static void mlx5e_build_channeltc_to_tx ix + i * priv->params.num_channels; }
+static int mlx5e_set_sq_maxrate(struct net_device *dev, + struct mlx5e_sq *sq, u32 rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 rl_index = 0; + int err; + + if (rate == sq->rate_limit) + /* nothing to do */ + return 0; + + if (sq->rate_limit) + /* remove current rl index to free space to next ones */ + mlx5_rl_remove_rate(mdev, sq->rate_limit); + + sq->rate_limit = 0; + + if (rate) { + err = mlx5_rl_add_rate(mdev, rate, &rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + return err; + } + } + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RDY, true, rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + /* remove the rate from the table */ + if (rate) + mlx5_rl_remove_rate(mdev, rate); + return err; + } + + sq->rate_limit = rate; + return 0; +} + +static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_sq *sq = priv->txq_to_sq_map[index]; + int err = 0; + + if (!mlx5_rl_is_supported(mdev)) { + netdev_err(dev, "Rate limiting is not supported on this device\n"); + return -EINVAL; + } + + /* rate is given in Mb/sec, HW config is in Kb/sec */ + rate = rate << 10; + + /* Check whether rate in valid range, 0 is always valid */ + if (rate && !mlx5_rl_is_in_range(mdev, rate)) { + netdev_err(dev, "TX rate %u, is not in range\n", rate); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_set_sq_maxrate(dev, sq, rate); + if (!err) + priv->tx_rates[index] = rate; + mutex_unlock(&priv->state_lock); + + return err; +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { + struct mlx5e_cq_moder icosq_cq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + struct mlx5e_cq_moder rx_cq_profile; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; + struct mlx5e_sq *sq; int err; + int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) @@@ -1140,16 -1044,11 +1139,16 @@@ c->mkey_be = cpu_to_be32(priv->mkey.key); c->num_tc = priv->params.num_tc;
+ if (priv->params.rx_am_enabled) + rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); + else + rx_cq_profile = priv->params.rx_cq_moderation; + mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder); if (err) goto err_napi_del;
@@@ -1158,7 -1057,8 +1157,7 @@@ goto err_close_icosq_cq;
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, - priv->params.rx_cq_moderation_usec, - priv->params.rx_cq_moderation_pkts); + rx_cq_profile); if (err) goto err_close_tx_cqs;
@@@ -1172,16 -1072,6 +1171,16 @@@ if (err) goto err_close_icosq;
+ for (i = 0; i < priv->params.num_tc; i++) { + u32 txq_ix = priv->channeltc_to_txq_map[ix][i]; + + if (priv->tx_rates[txq_ix]) { + sq = priv->txq_to_sq_map[txq_ix]; + mlx5e_set_sq_maxrate(priv->netdev, sq, + priv->tx_rates[txq_ix]); + } + } + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@@ -1258,8 -1148,6 +1257,8 @@@ static void mlx5e_build_rq_param(struc
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; + + param->am_enabled = priv->params.rx_am_enabled; }
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) @@@ -1325,8 -1213,6 +1324,8 @@@ static void mlx5e_build_rx_cq_param(str }
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = priv->params.rx_cq_period_mode; }
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, @@@ -1337,8 -1223,6 +1336,8 @@@ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; }
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, @@@ -1350,8 -1234,6 +1349,8 @@@ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; }
static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@@ -2637,31 -2519,25 +2636,31 @@@ static int mlx5e_get_vf_stats(struct ne }
static void mlx5e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); }
static void mlx5e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); }
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, @@@ -2728,7 -2604,6 +2727,7 @@@ static const struct net_device_ops mlx5 .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@@ -2748,9 -2623,8 +2747,9 @@@ static const struct net_device_ops mlx5 .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, - .ndo_add_vxlan_port = mlx5e_add_vxlan_port, - .ndo_del_vxlan_port = mlx5e_del_vxlan_port, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, @@@ -2879,20 -2753,6 +2878,20 @@@ static bool cqe_compress_heuristic(u32 (pci_bw < 40000) && (pci_bw < link_speed)); }
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + params->rx_cq_period_mode = cq_period_mode; + + params->rx_cq_moderation.pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) @@@ -2900,9 -2760,6 +2899,9 @@@ struct mlx5e_priv *priv = netdev_priv(netdev); u32 link_speed = 0; u32 pci_bw = 0; + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@@ -2948,13 -2805,13 +2947,13 @@@
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); - priv->params.rx_cq_moderation_usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - priv->params.rx_cq_moderation_pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - priv->params.tx_cq_moderation_usec = + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - priv->params.tx_cq_moderation_pkts = + priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); priv->params.num_tc = 1; @@@ -2969,10 -2826,6 +2968,10 @@@ priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ /* Initialize pflags */ + MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + priv->mdev = mdev; priv->netdev = netdev; priv->params.num_channels = num_channels; @@@ -3274,7 -3127,7 +3273,7 @@@ static void *mlx5e_create_netdev(struc
if (mlx5e_vxlan_allowed(mdev)) { rtnl_lock(); - vxlan_get_rx_port(netdev); + udp_tunnel_get_rx_info(netdev); rtnl_unlock(); }
@@@ -3380,7 -3233,6 +3379,7 @@@ static struct mlx5_interface mlx5e_inte
void mlx5e_init(void) { + mlx5e_build_ptys2ethtool_map(); mlx5_register_interface(&mlx5e_interface); }
diff --combined drivers/net/ethernet/mellanox/mlx5/core/main.c index 08cae34,c65f4a1..1f3b6d6 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@@ -1144,13 -1144,6 +1144,13 @@@ static int mlx5_load_one(struct mlx5_co dev_err(&pdev->dev, "Failed to init flow steering\n"); goto err_fs; } + + err = mlx5_init_rl_table(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init rate limiting\n"); + goto err_rl; + } + #ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_init(dev); if (err) { @@@ -1190,8 -1183,6 +1190,8 @@@ err_sriov mlx5_eswitch_cleanup(dev->priv.eswitch); #endif err_reg_dev: + mlx5_cleanup_rl_table(dev); +err_rl: mlx5_cleanup_fs(dev); err_fs: mlx5_cleanup_mkey_table(dev); @@@ -1262,7 -1253,6 +1262,7 @@@ static int mlx5_unload_one(struct mlx5_ mlx5_eswitch_cleanup(dev->priv.eswitch); #endif
+ mlx5_cleanup_rl_table(dev); mlx5_cleanup_fs(dev); mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); @@@ -1518,8 -1508,9 +1518,9 @@@ static const struct pci_device_id mlx5_ { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ - { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ + { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ + { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */ { 0, } };
diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d23948b,660429e..a453fff --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@@ -49,7 -49,6 +49,7 @@@ #include <linux/jiffies.h> #include <linux/bitops.h> #include <linux/list.h> +#include <linux/notifier.h> #include <linux/dcbnl.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@@ -409,7 -408,11 +409,11 @@@ static netdev_tx_t mlxsw_sp_port_xmit(s }
mlxsw_sp_txhdr_construct(skb, &tx_info); - len = skb->len; + /* TX header is consumed by HW on the way so we shouldn't count its + * bytes as being sent. + */ + len = skb->len - MLXSW_TXHDR_LEN; + /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ @@@ -633,14 -636,14 +637,14 @@@ static int mlxsw_sp_port_vlan_mode_tran return 0; }
-static struct mlxsw_sp_vfid * +static struct mlxsw_sp_fid * mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) { - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f;
- list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { - if (vfid->vid == vid) - return vfid; + list_for_each_entry(f, &mlxsw_sp->port_vfids.list, list) { + if (f->vid == vid) + return f; }
return NULL; @@@ -652,70 -655,75 +656,70 @@@ static u16 mlxsw_sp_avail_vfid_get(cons MLXSW_SP_VFID_PORT_MAX); }
-static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) +static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) { - u16 fid = mlxsw_sp_vfid_to_fid(vfid); char sfmr_pl[MLXSW_REG_SFMR_LEN];
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); + mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); }
-static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - char sfmr_pl[MLXSW_REG_SFMR_LEN]; +static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - -static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, - u16 vid) +static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + u16 vid) { struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; + struct mlxsw_sp_fid *f; + u16 vfid, fid; int err;
- n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); - if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { + vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (vfid == MLXSW_SP_VFID_PORT_MAX) { dev_err(dev, "No available vFIDs\n"); return ERR_PTR(-ERANGE); }
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + fid = mlxsw_sp_vfid_to_fid(vfid); + err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + dev_err(dev, "Failed to create FID=%d\n", fid); return ERR_PTR(err); }
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) goto err_allocate_vfid;
- vfid->vfid = n_vfid; - vfid->vid = vid; + f->leave = mlxsw_sp_vport_vfid_leave; + f->fid = fid; + f->vid = vid;
- list_add(&vfid->list, &mlxsw_sp->port_vfids.list); - set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); + list_add(&f->list, &mlxsw_sp->port_vfids.list); + set_bit(vfid, mlxsw_sp->port_vfids.mapped);
- return vfid; + return f;
err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); return ERR_PTR(-ENOMEM); }
static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) + struct mlxsw_sp_fid *f) { - clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); - list_del(&vfid->list); + u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); + + clear_bit(vfid, mlxsw_sp->port_vfids.mapped); + list_del(&f->list);
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
- kfree(vfid); + kfree(f); }
static struct mlxsw_sp_port * -mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_vfid *vfid) +mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_vport;
@@@ -733,7 -741,8 +737,7 @@@ mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; - mlxsw_sp_vport->vport.vfid = vfid; - mlxsw_sp_vport->vport.vid = vfid->vid; + mlxsw_sp_vport->vport.vid = vid;
list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
@@@ -746,72 -755,13 +750,72 @@@ static void mlxsw_sp_port_vport_destroy kfree(mlxsw_sp_vport); }
+static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool valid) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, + vid); +} + +static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct mlxsw_sp_fid *f; + int err; + + f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, vid); + if (!f) { + f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, vid); + if (IS_ERR(f)) + return PTR_ERR(f); + } + + if (!f->ref_count) { + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_flood_set; + } + + err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_fid_map; + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); + f->ref_count++; + + return 0; + +err_vport_fid_map: + if (!f->ref_count) + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); +err_vport_flood_set: + if (!f->ref_count) + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); + return err; +} + +static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + + mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); + + if (--f->ref_count == 0) { + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); + } +} + int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; int err;
/* VLAN 0 is added to HW filter when device goes up, but it is @@@ -825,10 -775,31 +829,10 @@@ return 0; }
- vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!vfid) { - vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(vfid); - } - } - - mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); if (!mlxsw_sp_vport) { netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); - err = -ENOMEM; - goto err_port_vport_create; - } - - if (!vfid->nr_vports) { - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, - true, false); - if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_vport_flood_set; - } + return -ENOMEM; }
/* When adding the first VLAN interface on a bridged port we need to @@@ -843,10 -814,15 +847,10 @@@ } }
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); + err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport); if (err) { - netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, vfid->vfid); - goto err_port_vid_to_fid_set; + netdev_err(dev, "Failed to join vFID\n"); + goto err_vport_vfid_join; }
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); @@@ -869,6 -845,8 +873,6 @@@ goto err_port_stp_state_set; }
- vfid->nr_vports++; - return 0;
err_port_stp_state_set: @@@ -876,12 -854,21 +880,12 @@@ err_port_add_vid: mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(vfid->vfid), vid); -err_port_vid_to_fid_set: + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); +err_vport_vfid_join: if (list_is_singular(&mlxsw_sp_port->vports_list)) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); err_port_vp_mode_trans: - if (!vfid->nr_vports) - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); -err_vport_flood_set: mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); -err_port_vport_create: - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; }
@@@ -890,7 -877,7 +894,7 @@@ int mlxsw_sp_port_kill_vid(struct net_d { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f; int err;
/* VLAN 0 is removed from HW filter when device goes down, but @@@ -905,6 -892,8 +909,6 @@@ return 0; }
- vfid = mlxsw_sp_vport->vport.vfid; - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_DISCARDING); if (err) { @@@ -925,12 -914,16 +929,12 @@@ return err; }
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, vfid->vfid); - return err; - } + /* Drop FID reference. If this was the last reference the + * resources will be freed. + */ + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f && !WARN_ON(!f->leave)) + f->leave(mlxsw_sp_vport);
/* When removing the last VLAN interface on a bridged port we need to * transition all active 802.1Q bridge VLANs to use VID to FID @@@ -944,8 -937,13 +948,8 @@@ } }
- vfid->nr_vports--; mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
- /* Destroy the vFID if no vPorts are assigned to it anymore. */ - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); - return 0; }
@@@ -2405,7 -2403,6 +2409,7 @@@ static int mlxsw_sp_init(struct mlxsw_c
mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; + INIT_LIST_HEAD(&mlxsw_sp->fids); INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); @@@ -2482,7 -2479,6 +2486,7 @@@ static void mlxsw_sp_fini(struct mlxsw_ mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_ports_remove(mlxsw_sp); + WARN_ON(!list_empty(&mlxsw_sp->fids)); }
static struct mlxsw_config_profile mlxsw_sp_config_profile = { @@@ -2544,37 -2540,16 +2548,37 @@@ static struct mlxsw_driver mlxsw_sp_dri .profile = &mlxsw_sp_config_profile, };
-static int -mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) +static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, + u16 fid) +{ + if (mlxsw_sp_fid_is_vfid(fid)) + return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); + else + return test_bit(fid, lag_port->active_vlans); +} + +static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; + u8 local_port = mlxsw_sp_port->local_port; + u16 lag_id = mlxsw_sp_port->lag_id; + int i, count = 0;
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); - mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); + if (!mlxsw_sp_port->lagged) + return true;
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + struct mlxsw_sp_port *lag_port; + + lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); + if (!lag_port || lag_port->local_port == local_port) + continue; + if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) + count++; + } + + return !count; }
static int @@@ -2589,8 -2564,17 +2593,8 @@@ mlxsw_sp_port_fdb_flush_by_port_fid(con mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); -} - -static int -mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; - - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); - mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", + mlxsw_sp_port->local_port, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); } @@@ -2606,51 -2590,71 +2610,51 @@@ mlxsw_sp_port_fdb_flush_by_lag_id_fid(c mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+ netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", + mlxsw_sp_port->lag_id, fid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); }
-static int -__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) +int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { - int err, last_err = 0; - u16 vid; - - for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; - } + if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) + return 0;
- return last_err; + if (mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, + fid); + else + return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); }
-static int -__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) +static bool mlxsw_sp_port_dev_check(const struct net_device *dev) { - int err, last_err = 0; - u16 vid; - - for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; - } - - return last_err; + return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; }
-static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) +static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { - if (!list_empty(&mlxsw_sp_port->vports_list)) - if (mlxsw_sp_port->lagged) - return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); - else - return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); - else - if (mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); - else - return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); + return !mlxsw_sp->master_bridge.dev || + mlxsw_sp->master_bridge.dev == br_dev; }
-static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) +static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - - if (mlxsw_sp_vport->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, - fid); - else - return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); + mlxsw_sp->master_bridge.dev = br_dev; + mlxsw_sp->master_bridge.ref_count++; }
-static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) { - return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; + if (--mlxsw_sp->master_bridge.ref_count == 0) + mlxsw_sp->master_bridge.dev = NULL; }
-static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *br_dev) { struct net_device *dev = mlxsw_sp_port->dev; int err; @@@ -2664,8 -2668,6 +2668,8 @@@ if (err) return err;
+ mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); + mlxsw_sp_port->learning = 1; mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->uc_flood = 1; @@@ -2674,14 -2676,16 +2678,14 @@@ return 0; }
-static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, - bool flush_fdb) +static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev;
- if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@@ -2690,7 -2694,28 +2694,7 @@@ /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - return mlxsw_sp_port_add_vid(dev, 0, 1); -} - -static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - return !mlxsw_sp->master_bridge.dev || - mlxsw_sp->master_bridge.dev == br_dev; -} - -static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - mlxsw_sp->master_bridge.dev = br_dev; - mlxsw_sp->master_bridge.ref_count++; -} - -static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - if (--mlxsw_sp->master_bridge.ref_count == 0) - mlxsw_sp->master_bridge.dev = NULL; + mlxsw_sp_port_add_vid(dev, 0, 1); }
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) @@@ -2851,33 -2876,65 +2855,33 @@@ err_col_port_add return err; }
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb); - -static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *lag_dev) +static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_upper *lag; u16 lag_id = mlxsw_sp_port->lag_id; - int err; + struct mlxsw_sp_upper *lag;
if (!mlxsw_sp_port->lagged) - return 0; + return; lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); WARN_ON(lag->ref_count == 0);
- err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); - if (err) - return err; - err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); - if (err) - return err; - - /* In case we leave a LAG device that has bridges built on top, - * then their teardown sequence is never issued and we need to - * invoke the necessary cleanup routines ourselves. - */ - list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, - vport.list) { - struct net_device *br_dev; - - if (!mlxsw_sp_vport->bridged) - continue; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); - } + mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
if (mlxsw_sp_port->bridged) { mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); - mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); - mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); }
- if (lag->ref_count == 1) { - if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); - if (err) - return err; - } + if (lag->ref_count == 1) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, mlxsw_sp_port->local_port); mlxsw_sp_port->lagged = 0; lag->ref_count--; - return 0; }
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, @@@ -2926,25 -2983,42 +2930,25 @@@ static int mlxsw_sp_port_vlan_link(stru u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); + if (WARN_ON(!mlxsw_sp_vport)) return -EINVAL; - }
mlxsw_sp_vport->dev = vlan_dev;
return 0; }
-static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *vlan_dev) +static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) { struct mlxsw_sp_port *mlxsw_sp_vport; u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return -EINVAL; - } - - /* When removing a VLAN device while still bridged we should first - * remove it from the bridge, as we receive the bridge's notification - * when the vPort is already gone. - */ - if (mlxsw_sp_vport->bridged) { - struct net_device *br_dev; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); - } + if (WARN_ON(!mlxsw_sp_vport)) + return;
mlxsw_sp_vport->dev = mlxsw_sp_port->dev; - - return 0; }
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, @@@ -2954,7 -3028,7 +2958,7 @@@ struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; - int err; + int err = 0;
mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@@ -2963,56 -3037,73 +2967,56 @@@ switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) + if (!is_vlan_dev(upper_dev) && + !netif_is_lag_master(upper_dev) && + !netif_is_bridge_master(upper_dev)) + return -EINVAL; + if (!info->linking) break; /* HW limitation forbids to put ports to multiple bridges. */ if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, info->upper_info)) - return NOTIFY_BAD; + return -EINVAL; + if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) + return -EINVAL; + if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && + !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (is_vlan_dev(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to link VLAN device\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to unlink VLAN device\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); } else if (netif_is_bridge_master(upper_dev)) { - if (info->linking) { - err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } - mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - } else { - err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, - true); - mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } - } + if (info->linking) + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, + upper_dev); + else + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); } else if (netif_is_lag_master(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to join link aggregation\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to leave link aggregation\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + } else { + err = -EINVAL; + WARN_ON(1); } break; }
- return NOTIFY_DONE; + return err; }
static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, @@@ -3036,7 -3127,7 +3040,7 @@@ break; }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_port_event(struct net_device *dev, @@@ -3050,7 -3141,7 +3054,7 @@@ return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, @@@ -3063,23 -3154,23 +3067,23 @@@ netdev_for_each_lower_dev(lag_dev, dev, iter) { if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); - if (ret == NOTIFY_BAD) + if (ret) return ret; } }
- return NOTIFY_DONE; + return 0; }
-static struct mlxsw_sp_vfid * +static struct mlxsw_sp_fid * mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, const struct net_device *br_dev) { - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f;
- list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { - if (vfid->br_dev == br_dev) - return vfid; + list_for_each_entry(f, &mlxsw_sp->br_vfids.list, list) { + if (f->dev == br_dev) + return f; }
return NULL; @@@ -3101,127 -3192,180 +3105,127 @@@ static u16 mlxsw_sp_avail_br_vfid_get(c MLXSW_SP_VFID_BR_MAX); }
-static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) +static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + +static struct mlxsw_sp_fid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; + struct mlxsw_sp_fid *f; + u16 vfid, fid; int err;
- n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); - if (n_vfid == MLXSW_SP_VFID_MAX) { + vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); + if (vfid == MLXSW_SP_VFID_MAX) { dev_err(dev, "No available vFIDs\n"); return ERR_PTR(-ERANGE); }
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + fid = mlxsw_sp_vfid_to_fid(vfid); + err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + dev_err(dev, "Failed to create FID=%d\n", fid); return ERR_PTR(err); }
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) goto err_allocate_vfid;
- vfid->vfid = n_vfid; - vfid->br_dev = br_dev; + f->leave = mlxsw_sp_vport_br_vfid_leave; + f->fid = fid; + f->dev = br_dev;
- list_add(&vfid->list, &mlxsw_sp->br_vfids.list); - set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + list_add(&f->list, &mlxsw_sp->br_vfids.list); + set_bit(mlxsw_sp_vfid_to_br_vfid(vfid), mlxsw_sp->br_vfids.mapped);
- return vfid; + return f;
err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); return ERR_PTR(-ENOMEM); }
static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) + struct mlxsw_sp_fid *f) { - u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); + u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid);
clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); - list_del(&vfid->list); + list_del(&f->list);
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + mlxsw_sp_vfid_op(mlxsw_sp, f->fid, false);
- kfree(vfid); + kfree(f); }
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb) +static int mlxsw_sp_vport_br_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; - u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid, *new_vfid; + struct mlxsw_sp_fid *f; int err;
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - WARN_ON(!vfid); - return -EINVAL; + f = mlxsw_sp_br_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (!f) { + f = mlxsw_sp_br_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (IS_ERR(f)) + return PTR_ERR(f); }
- /* We need a vFID to go back to after leaving the bridge's vFID. */ - new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!new_vfid) { - new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(new_vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(new_vfid); - } - } + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_flood_set;
- /* Invalidate existing {Port, VID} to vFID mapping and create a new - * one for the new vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } + err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_fid_map;
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(new_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - new_vfid->vfid); - goto err_port_vid_to_fid_validate; - } + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); + f->ref_count++;
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) { - netdev_err(dev, "Failed to disable learning\n"); - goto err_port_vid_learning_set; - } + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); - if (err) { - netdev_err(dev, "Failed clear to clear flooding\n"); - goto err_vport_flood_set; - } + return 0;
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_FORWARDING); - if (err) { - netdev_err(dev, "Failed to set STP state\n"); - goto err_port_stp_state_set; - } +err_vport_fid_map: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); +err_vport_flood_set: + if (!f->ref_count) + mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); + return err; +} + +static void mlxsw_sp_vport_br_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
- if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) - netdev_err(dev, "Failed to flush FDB\n"); + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
- /* Switch between the vFIDs and destroy the old one if needed. */ - new_vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = new_vfid; - vfid->nr_vports--; - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
- mlxsw_sp_vport->learning = 0; - mlxsw_sp_vport->learning_sync = 0; - mlxsw_sp_vport->uc_flood = 0; - mlxsw_sp_vport->bridged = 0; + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
- return 0; + mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
-err_port_stp_state_set: -err_vport_flood_set: -err_port_vid_learning_set: -err_port_vid_to_fid_validate: -err_port_vid_to_fid_invalidate: - /* Rollback vFID only if new. */ - if (!new_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); - return err; + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_br_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); }
static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, struct net_device *br_dev) { - struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid; int err;
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create bridge vFID\n"); - return PTR_ERR(vfid); - } - } + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + err = mlxsw_sp_vport_br_vfid_join(mlxsw_sp_vport, br_dev); if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_port_flood_set; + netdev_err(dev, "Failed to join vFID\n"); + goto err_vport_br_vfid_join; }
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); @@@ -3230,6 -3374,38 +3234,6 @@@ goto err_port_vid_learning_set; }
- /* We need to invalidate existing {Port, VID} to vFID mapping and - * create a new one for the bridge's vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - old_vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } - - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - vfid->vfid); - goto err_port_vid_to_fid_validate; - } - - /* Switch between the vFIDs and destroy the old one if needed. */ - vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = vfid; - old_vfid->nr_vports--; - if (!old_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); - mlxsw_sp_vport->learning = 1; mlxsw_sp_vport->learning_sync = 1; mlxsw_sp_vport->uc_flood = 1; @@@ -3237,32 -3413,20 +3241,32 @@@
return 0;
-err_port_vid_to_fid_validate: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); -err_port_vid_to_fid_invalidate: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); err_port_vid_learning_set: - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); -err_port_flood_set: - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); +err_vport_br_vfid_join: + mlxsw_sp_vport_vfid_join(mlxsw_sp_vport); return err; }
+static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + + mlxsw_sp_vport_br_vfid_leave(mlxsw_sp_vport); + + mlxsw_sp_vport_vfid_join(mlxsw_sp_vport); + + mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, + MLXSW_REG_SPMS_STATE_FORWARDING); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; +} + static bool mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, const struct net_device *br_dev) @@@ -3271,9 -3435,7 +3275,9 @@@
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, vport.list) { - if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + struct net_device *dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + + if (dev && dev == br_dev) return false; }
@@@ -3288,39 -3450,56 +3292,39 @@@ static int mlxsw_sp_netdevice_vport_eve struct netdev_notifier_changeupper_info *info = ptr; struct mlxsw_sp_port *mlxsw_sp_vport; struct net_device *upper_dev; - int err; + int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) - break; if (!netif_is_bridge_master(upper_dev)) - return NOTIFY_BAD; + return -EINVAL; + if (!info->linking) + break; /* We can't have multiple VLAN interfaces configured on * the same port and being members in the same bridge. */ if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master) - break; if (info->linking) { - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return NOTIFY_BAD; - } + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, upper_dev); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } } else { - /* We ignore bridge's unlinking notifications if vPort - * is gone, since we already left the bridge when the - * VLAN device was unlinked from the real device. - */ if (!mlxsw_sp_vport) - return NOTIFY_DONE; - err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, - upper_dev, true); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } + return 0; + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); } }
- return NOTIFY_DONE; + return err; }
static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, @@@ -3335,12 -3514,12 +3339,12 @@@ if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, vid); - if (ret == NOTIFY_BAD) + if (ret) return ret; } }
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, @@@ -3356,23 -3535,24 +3360,23 @@@ return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, vid);
- return NOTIFY_DONE; + return 0; }
static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int err = 0;
if (mlxsw_sp_port_dev_check(dev)) - return mlxsw_sp_netdevice_port_event(dev, event, ptr); - - if (netif_is_lag_master(dev)) - return mlxsw_sp_netdevice_lag_event(dev, event, ptr); - - if (is_vlan_dev(dev)) - return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + err = mlxsw_sp_netdevice_port_event(dev, event, ptr); + else if (netif_is_lag_master(dev)) + err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); + else if (is_vlan_dev(dev)) + err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
- return NOTIFY_DONE; + return notifier_from_errno(err); }
static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { diff --combined drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 2195ed3,ba26bb3..c25a8ba --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@@ -1979,7 -1979,7 +1979,7 @@@ static int __nfp_net_set_config_and_ena if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - vxlan_get_rx_port(nn->netdev); + udp_tunnel_get_rx_info(nn->netdev); }
return err; @@@ -2015,7 -2015,7 +2015,7 @@@ static void nfp_net_open_stack(struct n
netif_tx_wake_all_queues(nn->netdev);
- enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); nfp_net_read_link_status(nn); }
@@@ -2044,7 -2044,7 +2044,7 @@@ static int nfp_net_netdev_open(struct n NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); if (err) goto err_free_exn; - disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), GFP_KERNEL); @@@ -2133,7 -2133,7 +2133,7 @@@ static void nfp_net_close_stack(struct { unsigned int r;
- disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); netif_carrier_off(nn->netdev); nn->link_up = false;
@@@ -2551,32 -2551,26 +2551,32 @@@ static int nfp_net_find_vxlan_idx(struc }
static void nfp_net_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct nfp_net *nn = netdev_priv(netdev); int idx;
- idx = nfp_net_find_vxlan_idx(nn, port); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + idx = nfp_net_find_vxlan_idx(nn, ti->port); if (idx == -ENOSPC) return;
if (!nn->vxlan_usecnt[idx]++) - nfp_net_set_vxlan_port(nn, idx, port); + nfp_net_set_vxlan_port(nn, idx, ti->port); }
static void nfp_net_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct nfp_net *nn = netdev_priv(netdev); int idx;
- idx = nfp_net_find_vxlan_idx(nn, port); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + idx = nfp_net_find_vxlan_idx(nn, ti->port); if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC) return;
@@@ -2595,8 -2589,8 +2595,8 @@@ static const struct net_device_ops nfp_ .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, - .ndo_add_vxlan_port = nfp_net_add_vxlan_port, - .ndo_del_vxlan_port = nfp_net_del_vxlan_port, + .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, + .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, };
/** diff --combined drivers/net/ethernet/qlogic/qed/qed_l2.c index d121a8b,aada4c7..a12c6ca --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@@ -72,6 -72,7 +72,7 @@@ int qed_sp_eth_vport_start(struct qed_h p_ramrod->mtu = cpu_to_le16(p_params->mtu); p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; p_ramrod->drop_ttl0_en = p_params->drop_ttl0; + p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); @@@ -247,10 -248,6 +248,6 @@@ qed_sp_update_accept_mode(struct qed_hw SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, !!(accept_filter & QED_ACCEPT_NONE));
- SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, - (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && - !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); - SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, !!(accept_filter & QED_ACCEPT_NONE));
@@@ -575,12 -572,9 +572,12 @@@ int qed_sp_eth_rxq_start_ramrod(struct p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- rc = qed_spq_post(p_hwfn, p_ent, NULL); + p_ramrod->vf_rx_prod_index = params->vf_qid; + if (params->vf_qid) + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
- return rc; + return qed_spq_post(p_hwfn, p_ent, NULL); }
static int @@@ -618,7 -612,7 +615,7 @@@ qed_sp_eth_rx_queue_start(struct qed_hw
*pp_prod = (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_PRODS_OFFSET(abs_l2_queue); + MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), @@@ -762,9 -756,9 +759,9 @@@ int qed_sp_eth_txq_start_ramrod(struct struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_hw_cid_data *p_tx_cid; - u8 abs_vport_id; + u16 pq_id, abs_tx_q_id = 0; int rc = -EINVAL; - u16 pq_id; + u8 abs_vport_id;
/* Store information for the stop */ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; @@@ -775,10 -769,6 +772,10 @@@ if (rc) return rc;
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id); + if (rc) + return rc; + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = cid; @@@ -798,7 -788,6 +795,7 @@@ p_ramrod->sb_index = p_params->sb_idx; p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
@@@ -1493,51 -1482,51 +1490,51 @@@ static void __qed_get_vport_port_stats( offsetof(struct public_port, stats), sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.pmm.r64; - p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127; - p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255; - p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511; - p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023; - p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518; - p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522; - p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047; - p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095; - p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216; - p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383; - p_stats->rx_crc_errors += port_stats.pmm.rfcs; - p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; - p_stats->rx_pause_frames += port_stats.pmm.rxpf; - p_stats->rx_pfc_frames += port_stats.pmm.rxpp; - p_stats->rx_align_errors += port_stats.pmm.raln; - p_stats->rx_carrier_errors += port_stats.pmm.rfcr; - p_stats->rx_oversize_packets += port_stats.pmm.rovr; - p_stats->rx_jabbers += port_stats.pmm.rjbr; - p_stats->rx_undersize_packets += port_stats.pmm.rund; - p_stats->rx_fragments += port_stats.pmm.rfrg; - p_stats->tx_64_byte_packets += port_stats.pmm.t64; - p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; - p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; - p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; - p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; - p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; - p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; - p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; - p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; - p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; - p_stats->tx_pause_frames += port_stats.pmm.txpf; - p_stats->tx_pfc_frames += port_stats.pmm.txpp; - p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; - p_stats->tx_total_collisions += port_stats.pmm.tncl; - p_stats->rx_mac_bytes += port_stats.pmm.rbyte; - p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; - p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; - p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; - p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; - p_stats->tx_mac_bytes += port_stats.pmm.tbyte; - p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; - p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; - p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; - p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + p_stats->rx_64_byte_packets += port_stats.eth.r64; + p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; + p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; + p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; + p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; + p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; + p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; + p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; + p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; + p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; + p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; + p_stats->rx_crc_errors += port_stats.eth.rfcs; + p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; + p_stats->rx_pause_frames += port_stats.eth.rxpf; + p_stats->rx_pfc_frames += port_stats.eth.rxpp; + p_stats->rx_align_errors += port_stats.eth.raln; + p_stats->rx_carrier_errors += port_stats.eth.rfcr; + p_stats->rx_oversize_packets += port_stats.eth.rovr; + p_stats->rx_jabbers += port_stats.eth.rjbr; + p_stats->rx_undersize_packets += port_stats.eth.rund; + p_stats->rx_fragments += port_stats.eth.rfrg; + p_stats->tx_64_byte_packets += port_stats.eth.t64; + p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; + p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; + p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; + p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; + p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; + p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; + p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; + p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; + p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; + p_stats->tx_pause_frames += port_stats.eth.txpf; + p_stats->tx_pfc_frames += port_stats.eth.txpp; + p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; + p_stats->tx_total_collisions += port_stats.eth.tncl; + p_stats->rx_mac_bytes += port_stats.eth.rbyte; + p_stats->rx_mac_uc_packets += port_stats.eth.rxuca; + p_stats->rx_mac_mc_packets += port_stats.eth.rxmca; + p_stats->rx_mac_bc_packets += port_stats.eth.rxbca; + p_stats->rx_mac_frames_ok += port_stats.eth.rxpok; + p_stats->tx_mac_bytes += port_stats.eth.tbyte; + p_stats->tx_mac_uc_packets += port_stats.eth.txuca; + p_stats->tx_mac_mc_packets += port_stats.eth.txmca; + p_stats->tx_mac_bc_packets += port_stats.eth.txbca; + p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf; for (j = 0; j < 8; j++) { p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; p_stats->brb_discards += port_stats.brb.brb_discard[j]; @@@ -1756,7 -1745,8 +1753,8 @@@ static int qed_start_vport(struct qed_d start.vport_id, start.mtu); }
- qed_reset_vport_stats(cdev); + if (params->clear_stats) + qed_reset_vport_stats(cdev);
return 0; } @@@ -2166,18 -2156,11 +2164,18 @@@ static int qed_fp_cqe_completion(struc extern const struct qed_iov_hv_ops qed_iov_ops_pass; #endif
+#ifdef CONFIG_DCB +extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; +#endif + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, #ifdef CONFIG_QED_SRIOV .iov = &qed_iov_ops_pass, #endif +#ifdef CONFIG_DCB + .dcb = &qed_dcbnl_ops_pass, +#endif .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, .check_mac = &qed_check_mac, diff --combined drivers/net/ethernet/qlogic/qed/qed_main.c index e32ee57,c7e01b3..1f13abb --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@@ -207,8 -207,6 +207,8 @@@ int qed_fill_dev_info(struct qed_dev *c dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; + dev_info->rdma_supported = + (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
@@@ -834,8 -832,7 +834,8 @@@ static int qed_slowpath_start(struct qe goto err2; }
- data = cdev->firmware->data; + /* First Dword used to diffrentiate between various sources */ + data = cdev->firmware->data + sizeof(u32); }
memset(&tunn_info, 0, sizeof(tunn_info)); @@@ -903,8 -900,7 +903,8 @@@ static int qed_slowpath_stop(struct qed
if (IS_PF(cdev)) { qed_free_stream_mem(cdev); - qed_sriov_disable(cdev, true); + if (IS_QED_ETH_IF(cdev)) + qed_sriov_disable(cdev, true);
qed_nic_stop(cdev); qed_slowpath_irq_free(cdev); @@@ -995,7 -991,8 +995,7 @@@ static bool qed_can_link_change(struct return true; }
-static int qed_set_link(struct qed_dev *cdev, - struct qed_link_params *params) +static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { struct qed_hwfn *hwfn; struct qed_mcp_link_params *link_params; @@@ -1035,7 -1032,7 +1035,7 @@@ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; if (params->adv_speeds & 0) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) link_params->speed.forced_speed = params->forced_speed; @@@ -1056,19 -1053,19 +1056,19 @@@ if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { switch (params->loopback_mode) { case QED_LINK_LOOPBACK_INT_PHY: - link_params->loopback_mode = PMM_LOOPBACK_INT_PHY; + link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; break; case QED_LINK_LOOPBACK_EXT_PHY: - link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY; + link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; break; case QED_LINK_LOOPBACK_EXT: - link_params->loopback_mode = PMM_LOOPBACK_EXT; + link_params->loopback_mode = ETH_LOOPBACK_EXT; break; case QED_LINK_LOOPBACK_MAC: - link_params->loopback_mode = PMM_LOOPBACK_MAC; + link_params->loopback_mode = ETH_LOOPBACK_MAC; break; default: - link_params->loopback_mode = PMM_LOOPBACK_NONE; + link_params->loopback_mode = ETH_LOOPBACK_NONE; break; } } @@@ -1088,6 -1085,7 +1088,7 @@@ static int qed_get_port_type(u32 media_ case MEDIA_SFPP_10G_FIBER: case MEDIA_SFP_1G_FIBER: case MEDIA_XFP_FIBER: + case MEDIA_MODULE_FIBER: case MEDIA_KR: port_type = PORT_FIBRE; break; @@@ -1187,7 -1185,7 +1188,7 @@@ static void qed_fill_link(struct qed_hw NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->advertised_caps |= 0; if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->advertised_caps |= 0;
if (link_caps.speed_capabilities & @@@ -1204,7 -1202,7 +1205,7 @@@ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->supported_caps |= 0; if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->supported_caps |= 0;
if (link.link_up) @@@ -1303,38 -1301,6 +1304,38 @@@ static int qed_drain(struct qed_dev *cd return 0; }
+static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) +{ + *rx_coal = cdev->rx_coalesce_usecs; + *tx_coal = cdev->tx_coalesce_usecs; +} + +static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id) +{ + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int hwfn_index; + int status = 0; + + hwfn_index = qid % cdev->num_hwfns; + hwfn = &cdev->hwfns[hwfn_index]; + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, + qid / cdev->num_hwfns, sb_id); + if (status) + goto out; + status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, + qid / cdev->num_hwfns, sb_id); +out: + qed_ptt_release(hwfn, ptt); + + return status; +} + static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@@ -1381,7 -1347,5 +1382,7 @@@ const struct qed_common_ops qed_common_ .update_msglvl = &qed_init_dp, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .get_coalesce = &qed_get_coalesce, + .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, }; diff --combined drivers/net/ethernet/qlogic/qed/qed_spq.c index ad9bf5c,b122f60..97ffeae --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@@ -213,19 -213,15 +213,15 @@@ static int qed_spq_hw_post(struct qed_h SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD); db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - - /* validate producer is up to-date */ - rmb(); - db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
- /* do not reorder */ - barrier(); + /* make sure the SPQE is updated before the doorbell */ + wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */ - mmiowb(); + wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", @@@ -343,7 -339,6 +339,7 @@@ struct qed_eq *qed_eq_alloc(struct qed_ if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, num_elem, sizeof(union event_ring_element), &p_eq->chain)) { @@@ -417,10 -412,10 +413,10 @@@ int qed_eth_cqe_completion(struct qed_h ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = p_hwfn->p_spq; - struct qed_spq_entry *p_virt = NULL; - dma_addr_t p_phys = 0; - unsigned int i = 0; + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_virt = NULL; + dma_addr_t p_phys = 0; + u32 i, capacity;
INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); @@@ -432,8 -427,7 +428,8 @@@ p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); p_virt = p_spq->p_virt;
- for (i = 0; i < p_spq->chain.capacity; i++) { + capacity = qed_chain_get_capacity(&p_spq->chain); + for (i = 0; i < capacity; i++) { DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool); @@@ -461,10 -455,9 +457,10 @@@
int qed_spq_alloc(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = NULL; - dma_addr_t p_phys = 0; - struct qed_spq_entry *p_virt = NULL; + struct qed_spq_entry *p_virt = NULL; + struct qed_spq *p_spq = NULL; + dma_addr_t p_phys = 0; + u32 capacity;
/* SPQ struct */ p_spq = @@@ -478,7 -471,6 +474,7 @@@ if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_SINGLE, + QED_CHAIN_CNT_TYPE_U16, 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), &p_spq->chain)) { @@@ -487,11 -479,11 +483,11 @@@ }
/* allocate and fill the SPQ elements (incl. ramrod data list) */ + capacity = qed_chain_get_capacity(&p_spq->chain); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - &p_phys, - GFP_KERNEL); + &p_phys, GFP_KERNEL);
if (!p_virt) goto spq_allocate_fail; @@@ -511,18 -503,16 +507,18 @@@ spq_allocate_fail void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; + u32 capacity;
if (!p_spq) return;
- if (p_spq->p_virt) + if (p_spq->p_virt) { + capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - p_spq->p_virt, - p_spq->p_phys); + p_spq->p_virt, p_spq->p_phys); + }
qed_chain_free(p_hwfn->cdev, &p_spq->chain); ; @@@ -620,7 -610,9 +616,9 @@@ qed_spq_add_entry(struct qed_hwfn *p_hw
*p_en2 = *p_ent;
- kfree(p_ent); + /* EBLOCK responsible to free the allocated p_ent */ + if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) + kfree(p_ent);
p_ent = p_en2; } @@@ -755,6 -747,15 +753,15 @@@ int qed_spq_post(struct qed_hwfn *p_hwf * Thus, after gaining the answer perform the cleanup here. */ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); + + if (p_ent->queue == &p_spq->unlimited_pending) { + /* This is an allocated p_ent which does not need to + * return to pool. + */ + kfree(p_ent); + return rc; + } + if (rc) goto spq_post_fail2;
@@@ -850,8 -851,12 +857,12 @@@ int qed_spq_completion(struct qed_hwfn found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, fw_return_code);
- if (found->comp_mode != QED_SPQ_MODE_EBLOCK) - /* EBLOCK is responsible for freeing its own entry */ + if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || + (found->queue == &p_spq->unlimited_pending)) + /* EBLOCK is responsible for returning its own entry into the + * free list, unless it originally added the entry into the + * unlimited pending list. + */ qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */ @@@ -877,9 -882,9 +888,9 @@@ struct qed_consq *qed_consq_alloc(struc if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, - &p_consq->chain)) { + 0x80, &p_consq->chain)) { DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); goto consq_allocate_fail; } diff --combined drivers/net/ethernet/qlogic/qede/qede_main.c index 2972742,f8e11f9..19bc631 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@@ -24,7 -24,12 +24,7 @@@ #include <linux/netdev_features.h> #include <linux/udp.h> #include <linux/tcp.h> -#ifdef CONFIG_QEDE_VXLAN -#include <net/vxlan.h> -#endif -#ifdef CONFIG_QEDE_GENEVE -#include <net/geneve.h> -#endif +#include <net/udp_tunnel.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/tcp.h> @@@ -574,6 -579,8 +574,6 @@@ netdev_tx_t qede_start_xmit(struct sk_b
/* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { - u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT; - /* We don't re-calculate IP checksum as it is already done by * the upper stack */ @@@ -583,8 -590,14 +583,8 @@@ if (xmit_type & XMIT_ENC) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - } else { - /* In cases when OS doesn't indicate for inner offloads - * when packet is tunnelled, we need to override the HW - * tunnel configuration so that packets are treated as - * regular non tunnelled packets and no inner offloads - * are done by the hardware. - */ - first_bd->data.bitfields |= cpu_to_le16(temp); + first_bd->data.bitfields |= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; }
/* If the packet is IPv6 with extension header, indicate that @@@ -642,10 -655,6 +642,10 @@@ tx_data_bd = (struct eth_tx_bd *)third_bd; data_split = true; } + } else { + first_bd->data.bitfields |= + (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; }
/* Handle fragmented skb */ @@@ -2107,75 -2116,75 +2107,75 @@@ int qede_set_features(struct net_devic return 0; }
-#ifdef CONFIG_QEDE_VXLAN -static void qede_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) +static void qede_udp_tunnel_add(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port);
- if (edev->vxlan_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (edev->vxlan_dst_port) + return;
- edev->vxlan_dst_port = t_port; + edev->vxlan_dst_port = t_port;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", + t_port);
- set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (edev->geneve_dst_port) + return;
-static void qede_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + edev->geneve_dst_port = t_port;
- if (t_port != edev->vxlan_dst_port) + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: return; + }
- edev->vxlan_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } -#endif
-#ifdef CONFIG_QEDE_GENEVE -static void qede_add_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) +static void qede_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port);
- if (edev->geneve_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (t_port != edev->vxlan_dst_port) + return;
- edev->geneve_dst_port = t_port; + edev->vxlan_dst_port = 0;
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", + t_port);
-static void qede_del_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (t_port != edev->geneve_dst_port) + return;
- if (t_port != edev->geneve_dst_port) - return; + edev->geneve_dst_port = 0;
- edev->geneve_dst_port = 0; + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + }
- DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } -#endif
static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, @@@ -2199,8 -2208,14 +2199,8 @@@ .ndo_get_vf_config = qede_get_vf_config, .ndo_set_vf_rate = qede_set_vf_rate, #endif -#ifdef CONFIG_QEDE_VXLAN - .ndo_add_vxlan_port = qede_add_vxlan_port, - .ndo_del_vxlan_port = qede_del_vxlan_port, -#endif -#ifdef CONFIG_QEDE_GENEVE - .ndo_add_geneve_port = qede_add_geneve_port, - .ndo_del_geneve_port = qede_del_geneve_port, -#endif + .ndo_udp_tunnel_add = qede_udp_tunnel_add, + .ndo_udp_tunnel_del = qede_udp_tunnel_del, };
/* ------------------------------------------------------------------------- @@@ -2490,10 -2505,6 +2490,10 @@@ static int __qede_probe(struct pci_dev
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+#ifdef CONFIG_DCB + qede_set_dcbnl_ops(edev->ndev); +#endif + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); mutex_init(&edev->qede_lock);
@@@ -2812,7 -2823,6 +2812,7 @@@ static int qede_alloc_mem_rxq(struct qe rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring); @@@ -2824,7 -2834,6 +2824,7 @@@ rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring); @@@ -2876,9 -2885,9 +2876,9 @@@ static int qede_alloc_mem_txq(struct qe rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, NUM_TX_BDS_MAX, - sizeof(*p_virt), - &txq->tx_pbl); + sizeof(*p_virt), &txq->tx_pbl); if (rc) goto err;
@@@ -3222,7 -3231,7 +3222,7 @@@ static int qede_stop_queues(struct qede return rc; }
- static int qede_start_queues(struct qede_dev *edev) + static int qede_start_queues(struct qede_dev *edev, bool clear_stats) { int rc, tc, i; int vlan_removal_en = 1; @@@ -3453,6 -3462,7 +3453,7 @@@ out
enum qede_load_mode { QEDE_LOAD_NORMAL, + QEDE_LOAD_RELOAD, };
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) @@@ -3491,7 -3501,7 +3492,7 @@@ goto err3; DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev); + rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD); if (rc) goto err4; DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); @@@ -3546,7 -3556,7 +3547,7 @@@ void qede_reload(struct qede_dev *edev if (func) func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL); + qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock); qede_config_rx_mode(edev->ndev); @@@ -3568,8 -3578,12 +3569,8 @@@ static int qede_open(struct net_device if (rc) return rc;
-#ifdef CONFIG_QEDE_VXLAN - vxlan_get_rx_port(ndev); -#endif -#ifdef CONFIG_QEDE_GENEVE - geneve_get_rx_port(ndev); -#endif + udp_tunnel_get_rx_info(ndev); + return 0; }
diff --combined drivers/net/ethernet/ti/cpsw.c index 6d0c5a0,5319089..1a93a1f --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@@ -364,6 -364,7 +364,6 @@@ static inline void slave_write(struct c }
struct cpsw_priv { - spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; struct napi_struct napi_rx; @@@ -1243,7 -1244,6 +1243,7 @@@ static void cpsw_slave_stop(struct cpsw slave->phy = NULL; cpsw_ale_control_set(priv->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + soft_reset_slave(slave); }
static int cpsw_ndo_open(struct net_device *ndev) @@@ -1252,11 -1252,7 +1252,11 @@@ int i, ret; u32 reg;
- pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + }
if (!cpsw_common_res_usage_state(priv)) cpsw_intr_disable(priv); @@@ -1282,7 -1278,6 +1282,7 @@@
if (!cpsw_common_res_usage_state(priv)) { struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); + int buf_num;
/* setup tx dma to fixed prio and zero offset */ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); @@@ -1310,8 -1305,10 +1310,8 @@@ enable_irq(priv->irqs_table[0]); }
- if (WARN_ON(!priv->data.rx_descs)) - priv->data.rx_descs = 128; - - for (i = 0; i < priv->data.rx_descs; i++) { + buf_num = cpdma_chan_get_rx_buf_num(priv->dma); + for (i = 0; i < buf_num; i++) { struct sk_buff *skb;
ret = -ENOMEM; @@@ -1614,17 -1611,10 +1614,17 @@@ static int cpsw_ndo_set_mac_address(str struct sockaddr *addr = (struct sockaddr *)p; int flags = 0; u16 vid = 0; + int ret;
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
+ ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { vid = priv->slaves[priv->emac_port].port_vlan; flags = ALE_VLAN; @@@ -1639,8 -1629,6 +1639,8 @@@ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); for_each_slave(priv, cpsw_set_slave_mac, priv);
+ pm_runtime_put(&priv->pdev->dev); + return 0; }
@@@ -1705,17 -1693,10 +1705,17 @@@ static int cpsw_ndo_vlan_rx_add_vid(str __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); + int ret;
if (vid == priv->data.default_vlan) return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual @@@ -1730,10 -1711,7 +1730,10 @@@ }
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); - return cpsw_add_vlan_ale_entry(priv, vid); + ret = cpsw_add_vlan_ale_entry(priv, vid); + + pm_runtime_put(&priv->pdev->dev); + return ret; }
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, @@@ -1745,12 -1723,6 +1745,12 @@@ if (vid == priv->data.default_vlan) return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { int i;
@@@ -1770,10 -1742,8 +1770,10 @@@ if (ret != 0) return ret;
- return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, - 0, ALE_VLAN, vid); + ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); + pm_runtime_put(&priv->pdev->dev); + return ret; }
static const struct net_device_ops cpsw_netdev_ops = { @@@ -1932,33 -1902,10 +1932,33 @@@ static int cpsw_set_pauseparam(struct n priv->tx_pause = pause->tx_pause ? true : false;
for_each_slave(priv, _cpsw_adjust_link, priv, &link); - return 0; }
+static int cpsw_ethtool_op_begin(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); + pm_runtime_put_noidle(&priv->pdev->dev); + } + + return ret; +} + +static void cpsw_ethtool_op_complete(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_put(&priv->pdev->dev); + if (ret < 0) + cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@@ -1978,8 -1925,6 +1978,8 @@@ .set_wol = cpsw_set_wol, .get_regs_len = cpsw_get_regs_len, .get_regs = cpsw_get_regs, + .begin = cpsw_ethtool_op_begin, + .complete = cpsw_ethtool_op_complete, };
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@@ -2054,6 -1999,12 +2054,6 @@@ static int cpsw_probe_dt(struct cpsw_pl } data->bd_ram_size = prop;
- if (of_property_read_u32(node, "rx_descs", &prop)) { - dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n"); - return -EINVAL; - } - data->rx_descs = prop; - if (of_property_read_u32(node, "mac_control", &prop)) { dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); return -EINVAL; @@@ -2071,7 -2022,7 +2071,7 @@@ if (ret) dev_warn(&pdev->dev, "Doesn't have any child node\n");
- for_each_child_of_node(node, slave_node) { + for_each_available_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; int lenp; @@@ -2173,6 -2124,7 +2173,6 @@@ static int cpsw_probe_dual_emac(struct }
priv_sl2 = netdev_priv(ndev); - spin_lock_init(&priv_sl2->lock); priv_sl2->data = *data; priv_sl2->pdev = pdev; priv_sl2->ndev = ndev; @@@ -2291,6 -2243,7 +2291,6 @@@ static int cpsw_probe(struct platform_d
platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); - spin_lock_init(&priv->lock); priv->pdev = pdev; priv->ndev = ndev; priv->dev = &ndev->dev; @@@ -2368,11 -2321,7 +2368,11 @@@ /* Need to enable clocks with runtime PM api to access module * registers */ - pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + goto clean_runtime_disable_ret; + } priv->version = readl(&priv->regs->id_ver); pm_runtime_put_sync(&pdev->dev);
@@@ -2556,8 -2505,6 +2556,6 @@@ clean_ale_ret: cpsw_ale_destroy(priv->ale); clean_dma_ret: - cpdma_chan_destroy(priv->txch); - cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); @@@ -2585,8 -2532,6 +2583,6 @@@ static int cpsw_remove(struct platform_ unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale); - cpdma_chan_destroy(priv->txch); - cpdma_chan_destroy(priv->rxch); cpdma_ctlr_destroy(priv->dma); pm_runtime_disable(&pdev->dev); device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); @@@ -2609,12 -2554,16 +2605,12 @@@ static int cpsw_suspend(struct device * for (i = 0; i < priv->data.slaves; i++) { if (netif_running(priv->slaves[i].ndev)) cpsw_ndo_stop(priv->slaves[i].ndev); - soft_reset_slave(priv->slaves + i); } } else { if (netif_running(ndev)) cpsw_ndo_stop(ndev); - for_each_slave(priv, soft_reset_slave); }
- pm_runtime_put_sync(&pdev->dev); - /* Select sleep pin state */ pinctrl_pm_select_sleep_state(&pdev->dev);
@@@ -2627,6 -2576,8 +2623,6 @@@ static int cpsw_resume(struct device *d struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev);
- pm_runtime_get_sync(&pdev->dev); - /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev);
diff --combined drivers/net/geneve.c index aa61708,cc39cef..310e0b9c --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@@ -12,6 -12,7 +12,6 @@@
#include <linux/kernel.h> #include <linux/module.h> -#include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/hash.h> #include <net/dst_metadata.h> @@@ -396,6 -397,23 +396,6 @@@ static struct socket *geneve_create_soc return sock; }
-static void geneve_notify_add_rx_port(struct geneve_sock *gs) -{ - struct net_device *dev; - struct sock *sk = gs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = geneve_get_sk_family(gs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_add_geneve_port) - dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, - port); - } - rcu_read_unlock(); -} - static int geneve_hlen(struct genevehdr *gh) { return sizeof(*gh) + gh->opt_len * 4; @@@ -515,7 -533,7 +515,7 @@@ static struct geneve_sock *geneve_socke INIT_HLIST_HEAD(&gs->vni_list[h]);
/* Initialize the geneve udp offloads structure */ - geneve_notify_add_rx_port(gs); + udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE);
/* Mark socket as an encapsulation socket */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); @@@ -530,13 -548,31 +530,13 @@@ return gs; }
-static void geneve_notify_del_rx_port(struct geneve_sock *gs) -{ - struct net_device *dev; - struct sock *sk = gs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = geneve_get_sk_family(gs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_del_geneve_port) - dev->netdev_ops->ndo_del_geneve_port(dev, sa_family, - port); - } - - rcu_read_unlock(); -} - static void __geneve_sock_release(struct geneve_sock *gs) { if (!gs || --gs->refcnt) return;
list_del(&gs->list); - geneve_notify_del_rx_port(gs); + udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); udp_tunnel_sock_release(gs->sock); kfree_rcu(gs, rcu); } @@@ -922,8 -958,8 +922,8 @@@ tx_error dev->stats.collisions++; else if (err == -ENETUNREACH) dev->stats.tx_carrier_errors++; - else - dev->stats.tx_errors++; + + dev->stats.tx_errors++; return NETDEV_TX_OK; }
@@@ -1012,8 -1048,8 +1012,8 @@@ tx_error dev->stats.collisions++; else if (err == -ENETUNREACH) dev->stats.tx_carrier_errors++; - else - dev->stats.tx_errors++; + + dev->stats.tx_errors++; return NETDEV_TX_OK; } #endif @@@ -1129,20 -1165,29 +1129,20 @@@ static struct device_type geneve_type .name = "geneve", };
-/* Calls the ndo_add_geneve_port of the caller in order to +/* Calls the ndo_add_udp_enc_port of the caller in order to * supply the listening GENEVE udp ports. Callers are expected - * to implement the ndo_add_geneve_port. + * to implement the ndo_add_udp_enc_port. */ static void geneve_push_rx_ports(struct net_device *dev) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; - sa_family_t sa_family; - struct sock *sk; - __be16 port; - - if (!dev->netdev_ops->ndo_add_geneve_port) - return;
rcu_read_lock(); - list_for_each_entry_rcu(gs, &gn->sock_list, list) { - sk = gs->sock->sk; - sa_family = sk->sk_family; - port = inet_sk(sk)->inet_sport; - dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, port); - } + list_for_each_entry_rcu(gs, &gn->sock_list, list) + udp_tunnel_push_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); rcu_read_unlock(); }
@@@ -1463,6 -1508,7 +1463,7 @@@ struct net_device *geneve_dev_create_fb { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; + LIST_HEAD(list_kill); int err;
memset(tb, 0, sizeof(tb)); @@@ -1474,8 -1520,10 +1475,10 @@@ err = geneve_configure(net, dev, &geneve_remote_unspec, 0, 0, 0, 0, htons(dst_port), true, GENEVE_F_UDP_ZERO_CSUM6_RX); - if (err) - goto err; + if (err) { + free_netdev(dev); + return ERR_PTR(err); + }
/* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. @@@ -1484,10 -1532,15 +1487,15 @@@ if (err) goto err;
+ err = rtnl_configure_link(dev, NULL); + if (err < 0) + goto err; + return dev;
err: - free_netdev(dev); + geneve_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); @@@ -1497,7 -1550,7 +1505,7 @@@ static int geneve_netdevice_event(struc { struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_OFFLOAD_PUSH_GENEVE) + if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) geneve_push_rx_ports(dev);
return NOTIFY_DONE; diff --combined drivers/net/phy/fixed_phy.c index b376ada,9ec7f73..c649c10 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@@ -23,9 -23,9 +23,10 @@@ #include <linux/slab.h> #include <linux/of.h> #include <linux/gpio.h> +#include <linux/seqlock.h> + #include <linux/idr.h>
-#define MII_REGS_NUM 29 +#include "swphy.h"
struct fixed_mdio_bus { struct mii_bus *mii_bus; @@@ -34,8 -34,8 +35,8 @@@
struct fixed_phy { int addr; - u16 regs[MII_REGS_NUM]; struct phy_device *phydev; + seqcount_t seqcount; struct fixed_phy_status status; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; @@@ -47,10 -47,103 +48,10 @@@ static struct fixed_mdio_bus platform_f .phys = LIST_HEAD_INIT(platform_fmb.phys), };
-static int fixed_phy_update_regs(struct fixed_phy *fp) +static void fixed_phy_update(struct fixed_phy *fp) { - u16 bmsr = BMSR_ANEGCAPABLE; - u16 bmcr = 0; - u16 lpagb = 0; - u16 lpa = 0; - if (gpio_is_valid(fp->link_gpio)) fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio); - - if (fp->status.duplex) { - switch (fp->status.speed) { - case 1000: - bmsr |= BMSR_ESTATEN; - break; - case 100: - bmsr |= BMSR_100FULL; - break; - case 10: - bmsr |= BMSR_10FULL; - break; - default: - break; - } - } else { - switch (fp->status.speed) { - case 1000: - bmsr |= BMSR_ESTATEN; - break; - case 100: - bmsr |= BMSR_100HALF; - break; - case 10: - bmsr |= BMSR_10HALF; - break; - default: - break; - } - } - - if (fp->status.link) { - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; - - if (fp->status.duplex) { - bmcr |= BMCR_FULLDPLX; - - switch (fp->status.speed) { - case 1000: - bmcr |= BMCR_SPEED1000; - lpagb |= LPA_1000FULL; - break; - case 100: - bmcr |= BMCR_SPEED100; - lpa |= LPA_100FULL; - break; - case 10: - lpa |= LPA_10FULL; - break; - default: - pr_warn("fixed phy: unknown speed\n"); - return -EINVAL; - } - } else { - switch (fp->status.speed) { - case 1000: - bmcr |= BMCR_SPEED1000; - lpagb |= LPA_1000HALF; - break; - case 100: - bmcr |= BMCR_SPEED100; - lpa |= LPA_100HALF; - break; - case 10: - lpa |= LPA_10HALF; - break; - default: - pr_warn("fixed phy: unknown speed\n"); - return -EINVAL; - } - } - - if (fp->status.pause) - lpa |= LPA_PAUSE_CAP; - - if (fp->status.asym_pause) - lpa |= LPA_PAUSE_ASYM; - } - - fp->regs[MII_PHYSID1] = 0; - fp->regs[MII_PHYSID2] = 0; - - fp->regs[MII_BMSR] = bmsr; - fp->regs[MII_BMCR] = bmcr; - fp->regs[MII_LPA] = lpa; - fp->regs[MII_STAT1000] = lpagb; - - return 0; }
static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) @@@ -58,23 -151,29 +59,23 @@@ struct fixed_mdio_bus *fmb = bus->priv; struct fixed_phy *fp;
- if (reg_num >= MII_REGS_NUM) - return -1; - - /* We do not support emulating Clause 45 over Clause 22 register reads - * return an error instead of bogus data. - */ - switch (reg_num) { - case MII_MMD_CTRL: - case MII_MMD_DATA: - return -1; - default: - break; - } - list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phy_addr) { - /* Issue callback if user registered it. */ - if (fp->link_update) { - fp->link_update(fp->phydev->attached_dev, - &fp->status); - fixed_phy_update_regs(fp); - } - return fp->regs[reg_num]; + struct fixed_phy_status state; + int s; + + do { + s = read_seqcount_begin(&fp->seqcount); + /* Issue callback if user registered it. */ + if (fp->link_update) { + fp->link_update(fp->phydev->attached_dev, + &fp->status); + fixed_phy_update(fp); + } + state = fp->status; + } while (read_seqcount_retry(&fp->seqcount, s)); + + return swphy_read_reg(reg_num, &state); } }
@@@ -126,7 -225,6 +127,7 @@@ int fixed_phy_update_state(struct phy_d
list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phydev->mdio.addr) { + write_seqcount_begin(&fp->seqcount); #define _UPD(x) if (changed->x) \ fp->status.x = status->x _UPD(link); @@@ -135,8 -233,7 +136,8 @@@ _UPD(pause); _UPD(asym_pause); #undef _UPD - fixed_phy_update_regs(fp); + fixed_phy_update(fp); + write_seqcount_end(&fp->seqcount); return 0; } } @@@ -153,15 -250,11 +154,15 @@@ int fixed_phy_add(unsigned int irq, in struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp;
+ ret = swphy_validate_state(status); + if (ret < 0) + return ret; + fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM;
- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); + seqcount_init(&fp->seqcount);
if (irq != PHY_POLL) fmb->mii_bus->irq[phy_addr] = irq; @@@ -177,18 -270,25 +178,20 @@@ goto err_regs; }
- ret = fixed_phy_update_regs(fp); - if (ret) - goto err_gpio; + fixed_phy_update(fp);
list_add_tail(&fp->node, &fmb->phys);
return 0;
-err_gpio: - if (gpio_is_valid(fp->link_gpio)) - gpio_free(fp->link_gpio); err_regs: kfree(fp); return ret; } EXPORT_SYMBOL_GPL(fixed_phy_add);
+ static DEFINE_IDA(phy_fixed_ida); + static void fixed_phy_del(int phy_addr) { struct fixed_mdio_bus *fmb = &platform_fmb; @@@ -200,14 -300,12 +203,12 @@@ if (gpio_is_valid(fp->link_gpio)) gpio_free(fp->link_gpio); kfree(fp); + ida_simple_remove(&phy_fixed_ida, phy_addr); return; } } }
- static int phy_fixed_addr; - static DEFINE_SPINLOCK(phy_fixed_addr_lock); - struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, int link_gpio, @@@ -222,17 -320,15 +223,15 @@@ return ERR_PTR(-EPROBE_DEFER);
/* Get the next available PHY address, up to PHY_MAX_ADDR */ - spin_lock(&phy_fixed_addr_lock); - if (phy_fixed_addr == PHY_MAX_ADDR) { - spin_unlock(&phy_fixed_addr_lock); - return ERR_PTR(-ENOSPC); - } - phy_addr = phy_fixed_addr++; - spin_unlock(&phy_fixed_addr_lock); + phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL); + if (phy_addr < 0) + return ERR_PTR(phy_addr);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio); - if (ret < 0) + if (ret < 0) { + ida_simple_remove(&phy_fixed_ida, phy_addr); return ERR_PTR(ret); + }
phy = get_phy_device(fmb->mii_bus, phy_addr, false); if (IS_ERR(phy)) { @@@ -337,6 -433,7 +336,7 @@@ static void __exit fixed_mdio_bus_exit( list_del(&fp->node); kfree(fp); } + ida_destroy(&phy_fixed_ida); } module_exit(fixed_mdio_bus_exit);
diff --combined drivers/net/team/team.c index 0a1bb83,fdee772..f9eebea --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -1203,8 -1203,10 +1203,10 @@@ static int team_port_add(struct team *t goto err_dev_open; }
+ netif_addr_lock_bh(dev); dev_uc_sync_multiple(port_dev, dev); dev_mc_sync_multiple(port_dev, dev); + netif_addr_unlock_bh(dev);
err = vlan_vids_add_by_dev(port_dev, dev); if (err) { @@@ -1574,6 -1576,23 +1576,6 @@@ static const struct team_option team_op }, };
-static struct lock_class_key team_netdev_xmit_lock_key; -static struct lock_class_key team_netdev_addr_lock_key; -static struct lock_class_key team_tx_busylock_key; - -static void team_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *unused) -{ - lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key); -} - -static void team_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL); - dev->qdisc_tx_busylock = &team_tx_busylock_key; -}
static int team_init(struct net_device *dev) { @@@ -1609,7 -1628,7 +1611,7 @@@ goto err_options_register; netif_carrier_off(dev);
- team_set_lockdep_class(dev); + netdev_lockdep_set_classes(dev);
return 0;
diff --combined drivers/net/usb/r8152.c index 11178f9,4e257b8..24d36728 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -31,7 -31,7 +31,7 @@@ #define NETNEXT_VERSION "08"
/* Information for net */ - #define NET_VERSION "3" + #define NET_VERSION "4"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers nic_swsd@realtek.com" @@@ -116,6 -116,7 +116,7 @@@ #define USB_TX_DMA 0xd434 #define USB_TOLERANCE 0xd490 #define USB_LPM_CTRL 0xd41a + #define USB_BMU_RESET 0xd4b0 #define USB_UPS_CTRL 0xd800 #define USB_MISC_0 0xd81a #define USB_POWER_CUT 0xd80a @@@ -338,6 -339,10 +339,10 @@@ #define TEST_MODE_DISABLE 0x00000001 #define TX_SIZE_ADJUST1 0x00000100
+ /* USB_BMU_RESET */ + #define BMU_RESET_EP_IN 0x01 + #define BMU_RESET_EP_OUT 0x02 + /* USB_UPS_CTRL */ #define POWER_CUT 0x0100
@@@ -602,7 -607,7 +607,7 @@@ struct r8152 struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; - struct delayed_work schedule; + struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ #ifdef CONFIG_PM_SLEEP @@@ -619,7 -624,6 +624,7 @@@ int (*eee_get)(struct r8152 *, struct ethtool_eee *); int (*eee_set)(struct r8152 *, struct ethtool_eee *); bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); } rtl_ops;
int intr_interval; @@@ -628,11 -632,8 +633,11 @@@ u32 tx_qlen; u32 coalesce; u16 ocp_base; + u16 speed; u8 *intr_buff; u8 version; + u8 duplex; + u8 autoneg; };
enum rtl_version { @@@ -1746,7 -1747,7 +1751,7 @@@ static int rx_bottom(struct r8152 *tp, pkt_len -= CRC_SIZE; rx_data += sizeof(struct rx_desc);
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len); + skb = napi_alloc_skb(&tp->napi, pkt_len); if (!skb) { stats->rx_dropped++; goto find_next_rx; @@@ -2173,7 -2174,7 +2178,7 @@@ static void r8153_set_rx_early_timeout( static void r8153_set_rx_early_size(struct r8152 *tp) { u32 mtu = tp->netdev->mtu; - u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4; + u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); } @@@ -2460,6 -2461,17 +2465,17 @@@ static void r8153_teredo_off(struct r81 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0); }
+ static void rtl_reset_bmu(struct r8152 *tp) + { + u32 ocp_data; + + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET); + ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT); + ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); + ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT; + ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data); + } + static void r8152_aldps_en(struct r8152 *tp, bool enable) { if (enable) { @@@ -2503,6 -2515,8 +2519,6 @@@ static void r8152b_exit_oob(struct r815
rxdy_gated_en(tp, true); r8153_teredo_off(tp); - r8152b_hw_phy_cfg(tp); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@@ -2680,7 -2694,10 +2696,8 @@@ static void r8153_first_init(struct r81 ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- r8153_hw_phy_cfg(tp); - rtl8152_nic_reset(tp); + rtl_reset_bmu(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; @@@ -2742,6 -2759,7 +2759,7 @@@ static void r8153_enter_oob(struct r815 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl_disable(tp); + rtl_reset_bmu(tp);
for (i = 0; i < 1000; i++) { ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); @@@ -2803,6 -2821,7 +2821,7 @@@ static void rtl8153_disable(struct r815 { r8153_aldps_en(tp, false); rtl_disable(tp); + rtl_reset_bmu(tp); r8153_aldps_en(tp, true); usb_enable_lpm(tp->udev); } @@@ -2872,7 -2891,7 +2891,7 @@@ static int rtl8152_set_speed(struct r81 bmcr = BMCR_ANENABLE | BMCR_ANRESTART; }
- if (test_bit(PHY_RESET, &tp->flags)) + if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET;
if (tp->mii.supports_gmii) @@@ -2881,7 -2900,7 +2900,7 @@@ r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr);
- if (test_and_clear_bit(PHY_RESET, &tp->flags)) { + if (bmcr & BMCR_RESET) { int i;
for (i = 0; i < 50; i++) { @@@ -3040,27 -3059,6 +3059,27 @@@ out1 usb_autopm_put_interface(tp->intf); }
+static void rtl_hw_phy_work_func_t(struct work_struct *work) +{ + struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + if (usb_autopm_get_interface(tp->intf) < 0) + return; + + mutex_lock(&tp->control); + + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); +} + #ifdef CONFIG_PM_SLEEP static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) @@@ -3109,6 -3107,9 +3128,6 @@@ static int rtl8152_open(struct net_devi
tp->rtl_ops.up(tp);
- rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); @@@ -3400,15 -3401,11 +3419,11 @@@ static void r8153_init(struct r8152 *tp r8153_power_cut_en(tp, false); r8153_u1u2en(tp, true);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, - PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN | - U1U2_SPDWN_EN | L1_SPDWN_EN); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, - PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN | - TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN | - EEE_SPDWN_EN); + /* MAC clock speed down */ + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp); r8153_aldps_en(tp, true); @@@ -3536,7 -3533,6 +3551,7 @@@ static int rtl8152_resume(struct usb_in
if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); netif_device_attach(tp->netdev); }
@@@ -3551,6 -3547,10 +3566,6 @@@ napi_enable(&tp->napi); } else { tp->rtl_ops.up(tp); - rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? - SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(tp->netdev); set_bit(WORK_ENABLE, &tp->flags); } @@@ -3680,11 -3680,6 +3695,11 @@@ static int rtl8152_set_settings(struct mutex_lock(&tp->control);
ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); + if (!ret) { + tp->autoneg = cmd->autoneg; + tp->speed = cmd->speed; + tp->duplex = cmd->duplex; + }
mutex_unlock(&tp->control);
@@@ -4142,7 -4137,6 +4157,7 @@@ static int rtl_ops_init(struct r8152 *t ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; + ops->hw_phy_cfg = r8152b_hw_phy_cfg; break;
case RTL_VER_03: @@@ -4158,7 -4152,6 +4173,7 @@@ ops->eee_get = r8153_get_eee; ops->eee_set = r8153_set_eee; ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153_hw_phy_cfg; break;
default: @@@ -4205,7 -4198,6 +4220,7 @@@ static int rtl8152_probe(struct usb_int
mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); + INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t);
netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; @@@ -4245,14 -4237,9 +4260,14 @@@ break; }
+ tp->autoneg = AUTONEG_ENABLE; + tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; + tp->duplex = DUPLEX_FULL; + intf->needs_remote_wakeup = 1;
tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp);
usb_set_intfdata(intf, tp); @@@ -4298,7 -4285,6 +4313,7 @@@ static void rtl8152_disconnect(struct u
netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); + cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); free_netdev(tp->netdev); } diff --combined drivers/net/vxlan.c index abb9cd2,b3b9db6..ae7455d --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@@ -11,18 -11,32 +11,18 @@@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h> -#include <linux/types.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> -#include <linux/skbuff.h> -#include <linux/rculist.h> -#include <linux/netdevice.h> -#include <linux/in.h> -#include <linux/ip.h> #include <linux/udp.h> #include <linux/igmp.h> -#include <linux/etherdevice.h> #include <linux/if_ether.h> -#include <linux/if_vlan.h> -#include <linux/hash.h> #include <linux/ethtool.h> #include <net/arp.h> #include <net/ndisc.h> #include <net/ip.h> -#include <net/ip_tunnels.h> #include <net/icmp.h> -#include <net/udp.h> -#include <net/udp_tunnel.h> #include <net/rtnetlink.h> -#include <net/route.h> -#include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/net_namespace.h> #include <net/netns/generic.h> @@@ -30,9 -44,12 +30,9 @@@ #include <net/protocol.h>
#if IS_ENABLED(CONFIG_IPV6) -#include <net/ipv6.h> -#include <net/addrconf.h> #include <net/ip6_tunnel.h> #include <net/ip6_checksum.h> #endif -#include <net/dst_metadata.h>
#define VXLAN_VERSION "0.1"
@@@ -602,6 -619,42 +602,6 @@@ static int vxlan_gro_complete(struct so return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); }
-/* Notify netdevs that UDP port started listening */ -static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) -{ - struct net_device *dev; - struct sock *sk = vs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = vxlan_get_sk_family(vs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_add_vxlan_port) - dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, - port); - } - rcu_read_unlock(); -} - -/* Notify netdevs that UDP port is no more listening */ -static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) -{ - struct net_device *dev; - struct sock *sk = vs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = vxlan_get_sk_family(vs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_del_vxlan_port) - dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family, - port); - } - rcu_read_unlock(); -} - /* Add new entry to forwarding table -- assumes lock held */ static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, @@@ -997,10 -1050,7 +997,10 @@@ static bool __vxlan_sock_release_prep(s vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); - vxlan_notify_del_rx_port(vs); + udp_tunnel_notify_del_rx_port(vs->sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); spin_unlock(&vn->sock_lock);
return true; @@@ -2475,24 -2525,30 +2475,24 @@@ static struct device_type vxlan_type = .name = "vxlan", };
-/* Calls the ndo_add_vxlan_port of the caller in order to +/* Calls the ndo_add_udp_enc_port of the caller in order to * supply the listening VXLAN udp ports. Callers are expected - * to implement the ndo_add_vxlan_port. + * to implement the ndo_add_udp_enc_port. */ static void vxlan_push_rx_ports(struct net_device *dev) { struct vxlan_sock *vs; struct net *net = dev_net(dev); struct vxlan_net *vn = net_generic(net, vxlan_net_id); - sa_family_t sa_family; - __be16 port; unsigned int i;
- if (!dev->netdev_ops->ndo_add_vxlan_port) - return; - spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { - hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { - port = inet_sk(vs->sock->sk)->inet_sport; - sa_family = vxlan_get_sk_family(vs); - dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, - port); - } + hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) + udp_tunnel_push_rx_port(dev, vs->sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); } spin_unlock(&vn->sock_lock); } @@@ -2694,10 -2750,7 +2694,10 @@@ static struct vxlan_sock *vxlan_socket_
spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); - vxlan_notify_add_rx_port(vs); + udp_tunnel_notify_add_rx_port(sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */ @@@ -2899,30 -2952,6 +2899,6 @@@ static int vxlan_dev_configure(struct n return 0; }
- struct net_device *vxlan_dev_create(struct net *net, const char *name, - u8 name_assign_type, struct vxlan_config *conf) - { - struct nlattr *tb[IFLA_MAX+1]; - struct net_device *dev; - int err; - - memset(&tb, 0, sizeof(tb)); - - dev = rtnl_create_link(net, name, name_assign_type, - &vxlan_link_ops, tb); - if (IS_ERR(dev)) - return dev; - - err = vxlan_dev_configure(net, dev, conf); - if (err < 0) { - free_netdev(dev); - return ERR_PTR(err); - } - - return dev; - } - EXPORT_SYMBOL_GPL(vxlan_dev_create); - static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { @@@ -3215,6 -3244,40 +3191,40 @@@ static struct rtnl_link_ops vxlan_link_ .get_link_net = vxlan_get_link_net, };
+ struct net_device *vxlan_dev_create(struct net *net, const char *name, + u8 name_assign_type, + struct vxlan_config *conf) + { + struct nlattr *tb[IFLA_MAX + 1]; + struct net_device *dev; + int err; + + memset(&tb, 0, sizeof(tb)); + + dev = rtnl_create_link(net, name, name_assign_type, + &vxlan_link_ops, tb); + if (IS_ERR(dev)) + return dev; + + err = vxlan_dev_configure(net, dev, conf); + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + } + + err = rtnl_configure_link(dev, NULL); + if (err < 0) { + LIST_HEAD(list_kill); + + vxlan_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); + return ERR_PTR(err); + } + + return dev; + } + EXPORT_SYMBOL_GPL(vxlan_dev_create); + static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn, struct net_device *dev) { @@@ -3245,7 -3308,7 +3255,7 @@@ static int vxlan_netdevice_event(struc
if (event == NETDEV_UNREGISTER) vxlan_handle_lowerdev_unregister(vn, dev); - else if (event == NETDEV_OFFLOAD_PUSH_VXLAN) + else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) vxlan_push_rx_ports(dev);
return NOTIFY_DONE; diff --combined drivers/net/wireless/ath/ath10k/core.c index c6291c2,a92a0ba..dfb3db0 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@@ -18,7 -18,6 +18,7 @@@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> +#include <asm/byteorder.h>
#include "core.h" #include "mac.h" @@@ -56,7 -55,7 +56,7 @@@ static const struct ath10k_hw_params at .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@@ -70,25 -69,6 +70,25 @@@ }, }, { + .id = QCA9887_HW_1_0_VERSION, + .dev_id = QCA9887_1_0_DEVICE_ID, + .name = "qca9887 hw1.0", + .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, + .cal_data_len = 2116, + .fw = { + .dir = QCA9887_HW_1_0_FW_DIR, + .board = QCA9887_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA9887_BOARD_DATA_SZ, + .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ, + }, + }, + { .id = QCA6174_HW_2_1_VERSION, .dev_id = QCA6164_2_1_DEVICE_ID, .name = "qca6164 hw2.1", @@@ -168,7 -148,6 +168,7 @@@ .uart_pin = 7, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, @@@ -184,29 -163,6 +184,29 @@@ }, }, { + .id = QCA9984_HW_1_0_DEV_VERSION, + .dev_id = QCA9984_1_0_DEVICE_ID, + .name = "qca9984/qca9994 hw1.0", + .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, + .cal_data_len = 12064, + .fw = { + .dir = QCA9984_HW_1_0_FW_DIR, + .board = QCA9984_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + }, + { .id = QCA9377_HW_1_0_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, .name = "qca9377 hw1.0", @@@ -246,10 -202,9 +246,10 @@@ .name = "qca4019 hw1.0", .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x0010000, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 125000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, @@@ -281,7 -236,6 +281,7 @@@ static const char *const ath10k_core_fw [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", + [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param", };
static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@@ -577,35 -531,6 +577,35 @@@ out return ret; }
+static int ath10k_download_cal_eeprom(struct ath10k *ar) +{ + size_t data_len; + void *data = NULL; + int ret; + + ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); + if (ret) { + if (ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = 0; + +out_free: + kfree(data); + + return ret; +} + static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) { u32 result, address; @@@ -1158,7 -1083,7 +1158,7 @@@ int ath10k_core_fetch_firmware_api_n(st }
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "", - ar->running_fw->fw_file.fw_features, + fw_file->fw_features, sizeof(fw_file->fw_features)); break; case ATH10K_FW_IE_FW_IMAGE: @@@ -1368,17 -1293,7 +1368,17 @@@ static int ath10k_download_cal_data(str }
ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot did not find DT entry, try OTP next: %d\n", + "boot did not find DT entry, try target EEPROM next: %d\n", + ret); + + ret = ath10k_download_cal_eeprom(ar); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_EEPROM; + goto done; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find target EEPROM entry, try OTP next: %d\n", ret);
ret = ath10k_download_and_run_otp(ar); @@@ -1818,16 -1733,6 +1818,16 @@@ int ath10k_core_start(struct ath10k *ar if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+ /* 10.4 firmware supports BT-Coex without reloading firmware + * via pdev param. To support Bluetooth coexistence pdev param, + * WMI_COEX_GPIO_SUPPORT of extended resource config should be + * enabled always. + */ + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) + val |= WMI_10_4_COEX_GPIO_SUPPORT; + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, @@@ -2157,7 -2062,6 +2157,7 @@@ struct ath10k *ath10k_core_create(size_
switch (hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: ar->regs = &qca988x_regs; ar->hw_values = &qca988x_values; break; @@@ -2167,7 -2071,6 +2167,7 @@@ ar->hw_values = &qca6174_values; break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: ar->regs = &qca99x0_regs; ar->hw_values = &qca99x0_values; break; @@@ -2256,5 -2159,5 +2256,5 @@@ void ath10k_core_destroy(struct ath10k EXPORT_SYMBOL(ath10k_core_destroy);
MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); +MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); diff --combined drivers/net/wireless/ath/ath10k/htt_rx.c index 3b35c7a,813cdd2..80e6453 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@@ -748,7 -748,7 +748,7 @@@ ath10k_htt_rx_h_peer_channel(struct ath if (WARN_ON_ONCE(!arvif)) return NULL;
- if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) + if (WARN_ON_ONCE(ath10k_mac_vif_chan(arvif->vif, &def))) return NULL;
return def.chan; @@@ -939,8 -939,7 +939,8 @@@ static void ath10k_process_rx(struct at is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? "mcast" : "ucast", (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, - status->flag == 0 ? "legacy" : "", + (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ? + "legacy" : "", status->flag & RX_FLAG_HT ? "ht" : "", status->flag & RX_FLAG_VHT ? "vht" : "", status->flag & RX_FLAG_40MHZ ? "40" : "", @@@ -1905,7 -1904,6 +1905,6 @@@ static void ath10k_htt_rx_in_ord_ind(st return; } } - ath10k_htt_rx_msdu_buff_replenish(htt); }
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, @@@ -2183,6 -2181,34 +2182,6 @@@ static void ath10k_htt_rx_tx_mode_switc ath10k_mac_tx_push_pending(ar); }
-static inline enum nl80211_band phy_mode_to_band(u32 phy_mode) -{ - enum nl80211_band band; - - switch (phy_mode) { - case MODE_11A: - case MODE_11NA_HT20: - case MODE_11NA_HT40: - case MODE_11AC_VHT20: - case MODE_11AC_VHT40: - case MODE_11AC_VHT80: - band = NL80211_BAND_5GHZ; - break; - case MODE_11G: - case MODE_11B: - case MODE_11GONLY: - case MODE_11NG_HT20: - case MODE_11NG_HT40: - case MODE_11AC_VHT20_2G: - case MODE_11AC_VHT40_2G: - case MODE_11AC_VHT80_2G: - default: - band = NL80211_BAND_2GHZ; - } - - return band; -} - void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { bool release; @@@ -2264,6 -2290,7 +2263,6 @@@ bool ath10k_htt_t2h_msg_handler(struct ath10k_htt_tx_mgmt_dec_pending(htt); spin_unlock_bh(&htt->tx_lock); } - ath10k_mac_tx_push_pending(ar); break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: @@@ -2414,6 -2441,8 +2413,6 @@@ static void ath10k_htt_txrx_compl_task( dev_kfree_skb_any(skb); }
- ath10k_mac_tx_push_pending(ar); - num_mpdus = atomic_read(&htt->num_mpdus_ready);
while (num_mpdus) { diff --combined drivers/net/wireless/ath/ath10k/mac.c index 3a170b1,4040f94..d4b7a16 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@@ -62,32 -62,6 +62,32 @@@ static struct ieee80211_rate ath10k_rat { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, };
+static struct ieee80211_rate ath10k_rates_rev2[] = { + { .bitrate = 10, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, + { .bitrate = 20, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + + { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, + { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, + { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, + { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, + { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, + { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, + { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, + { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, +}; + #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) @@@ -96,9 -70,6 +96,9 @@@ #define ath10k_g_rates (ath10k_rates + 0) #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) +#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) + static bool ath10k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { @@@ -708,10 -679,10 +708,10 @@@ static int ath10k_peer_create(struct at
peer = ath10k_peer_find(ar, vdev_id, addr); if (!peer) { + spin_unlock_bh(&ar->data_lock); ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", addr, vdev_id); ath10k_wmi_peer_delete(ar, vdev_id, addr); - spin_unlock_bh(&ar->data_lock); return -ENOENT; }
@@@ -3810,9 -3781,6 +3810,9 @@@ void ath10k_mac_tx_push_pending(struct int ret; int max;
+ if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) + return; + spin_lock_bh(&ar->txqs_lock); rcu_read_lock();
@@@ -4083,7 -4051,9 +4083,7 @@@ static void ath10k_mac_op_wake_tx_queue list_add_tail(&artxq->list, &ar->txqs); spin_unlock_bh(&ar->txqs_lock);
- if (ath10k_mac_tx_can_push(hw, txq)) - tasklet_schedule(&ar->htt.txrx_compl_task); - + ath10k_mac_tx_push_pending(ar); ath10k_htt_tx_txq_update(hw, txq); }
@@@ -4497,19 -4467,6 +4497,19 @@@ static int ath10k_start(struct ieee8021 } }
+ param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, param, 0); + if (ret) { + ath10k_warn(ar, + "failed to set btcoex param: %d\n", ret); + goto err_core_stop; + } + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + } + ar->num_started_vdevs = 0; ath10k_regd_update(ar);
@@@ -7738,14 -7695,8 +7738,14 @@@ int ath10k_mac_register(struct ath10k * band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); band->channels = channels; - band->n_bitrates = ath10k_g_rates_size; - band->bitrates = ath10k_g_rates; + + if (ar->hw_params.cck_rate_map_rev2) { + band->n_bitrates = ath10k_g_rates_rev2_size; + band->bitrates = ath10k_g_rates_rev2; + } else { + band->n_bitrates = ath10k_g_rates_size; + band->bitrates = ath10k_g_rates; + }
ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; } diff --combined include/linux/bpf.h index 9adfef6,0de4de6..8411032 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@@ -11,17 -11,14 +11,17 @@@ #include <linux/workqueue.h> #include <linux/file.h> #include <linux/percpu.h> +#include <linux/err.h>
+struct perf_event; struct bpf_map;
/* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ struct bpf_map *(*map_alloc)(union bpf_attr *attr); - void (*map_free)(struct bpf_map *); + void (*map_release)(struct bpf_map *map, struct file *map_file); + void (*map_free)(struct bpf_map *map); int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
/* funcs callable from userspace and from eBPF programs */ @@@ -30,9 -27,8 +30,9 @@@ int (*map_delete_elem)(struct bpf_map *map, void *key);
/* funcs called by prog_array and perf_event_array map */ - void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); - void (*map_fd_put_ptr) (void *ptr); + void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, + int fd); + void (*map_fd_put_ptr)(void *ptr); };
struct bpf_map { @@@ -115,6 -111,31 +115,31 @@@ enum bpf_access_type BPF_WRITE = 2 };
+ /* types of values stored in eBPF registers */ + enum bpf_reg_type { + NOT_INIT = 0, /* nothing was written into register */ + UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */ + PTR_TO_CTX, /* reg points to bpf_context */ + CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ + PTR_TO_MAP_VALUE, /* reg points to map element value */ + PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ + FRAME_PTR, /* reg == frame_pointer */ + PTR_TO_STACK, /* reg == frame_pointer + imm */ + CONST_IMM, /* constant integer value */ + + /* PTR_TO_PACKET represents: + * skb->data + * skb->data + imm + * skb->data + (u16) var + * skb->data + (u16) var + imm + * if (range > 0) then [ptr, ptr + range - off) is safe to access + * if (id > 0) means that some 'var' was added + * if (off > 0) menas that 'imm' was added + */ + PTR_TO_PACKET, + PTR_TO_PACKET_END, /* skb->data + headlen */ + }; + struct bpf_prog;
struct bpf_verifier_ops { @@@ -124,7 -145,8 +149,8 @@@ /* return true if 'size' wide access at offset 'off' within bpf_context * with 'type' (read or write) is allowed */ - bool (*is_valid_access)(int off, int size, enum bpf_access_type type); + bool (*is_valid_access)(int off, int size, enum bpf_access_type type, + enum bpf_reg_type *reg_type);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, int src_reg, int ctx_off, @@@ -167,19 -189,11 +193,19 @@@ struct bpf_array void __percpu *pptrs[0] __aligned(8); }; }; + #define MAX_TAIL_CALL_CNT 32
+struct bpf_event_entry { + struct perf_event *event; + struct file *perf_file; + struct file *map_file; + struct rcu_head rcu; +}; + u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -void bpf_fd_array_map_clear(struct bpf_map *map); + bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@@ -217,13 -231,8 +243,13 @@@ int bpf_percpu_hash_update(struct bpf_m u64 flags); int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, u64 flags); + int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, + void *key, void *value, u64 map_flags); +void bpf_fd_array_map_clear(struct bpf_map *map); + /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. * Best-effort only. No barriers here, since it _will_ race with concurrent @@@ -255,6 -264,10 +281,10 @@@ static inline struct bpf_prog *bpf_prog static inline void bpf_prog_put(struct bpf_prog *prog) { } + + static inline void bpf_prog_put_rcu(struct bpf_prog *prog) + { + } #endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */ diff --combined include/linux/mlx4/device.h index 4dbc145,d46a0e7..e6f6910 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@@ -466,6 -466,7 +466,7 @@@ enum enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, + MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2, };
#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ @@@ -535,7 -536,6 +536,7 @@@ struct mlx4_caps int max_rq_desc_sz; int max_qp_init_rdma; int max_qp_dest_rdma; + int max_tc_eth; u32 *qp0_qkey; u32 *qp0_proxy; u32 *qp1_proxy; @@@ -1495,7 -1495,6 +1496,7 @@@ int mlx4_mr_rereg_mem_write(struct mlx4
int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset, u16 size, u8 *data); +int mlx4_max_tc(struct mlx4_dev *dev);
/* Returns true if running in low memory profile (kdump kernel) */ static inline bool mlx4_low_memory_profile(void) diff --combined include/linux/qed/qed_eth_if.h index 71d523b,6c876a6..4475a9d --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@@ -49,6 -49,7 +49,7 @@@ struct qed_start_vport_params bool drop_ttl0; u8 vport_id; u16 mtu; + bool clear_stats; };
struct qed_stop_rxq_params { @@@ -113,7 -114,6 +114,7 @@@ struct qed_queue_start_common_params u8 vport_id; u16 sb; u16 sb_idx; + u16 vf_qid; };
struct qed_tunn_params { @@@ -128,73 -128,11 +129,73 @@@ struct qed_eth_cb_ops void (*force_mac) (void *dev, u8 *mac); };
+#ifdef CONFIG_DCB +/* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration + * of dcbnl_rtnl_ops structure. + */ +struct qed_eth_dcbnl_ops { + /* IEEE 802.1Qaz std */ + int (*ieee_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_setpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_setets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getets)(struct qed_dev *cdev, struct ieee_ets *ets); + int (*ieee_peer_getpfc)(struct qed_dev *cdev, struct ieee_pfc *pfc); + int (*ieee_getapp)(struct qed_dev *cdev, struct dcb_app *app); + int (*ieee_setapp)(struct qed_dev *cdev, struct dcb_app *app); + + /* CEE std */ + u8 (*getstate)(struct qed_dev *cdev); + u8 (*setstate)(struct qed_dev *cdev, u8 state); + void (*getpgtccfgtx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpgtccfgrx)(struct qed_dev *cdev, int prio, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map); + void (*getpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 *bw_pct); + void (*getpfccfg)(struct qed_dev *cdev, int prio, u8 *setting); + void (*setpfccfg)(struct qed_dev *cdev, int prio, u8 setting); + u8 (*getcap)(struct qed_dev *cdev, int capid, u8 *cap); + int (*getnumtcs)(struct qed_dev *cdev, int tcid, u8 *num); + u8 (*getpfcstate)(struct qed_dev *cdev); + int (*getapp)(struct qed_dev *cdev, u8 idtype, u16 id); + u8 (*getfeatcfg)(struct qed_dev *cdev, int featid, u8 *flags); + + /* DCBX configuration */ + u8 (*getdcbx)(struct qed_dev *cdev); + void (*setpgtccfgtx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgtccfgrx)(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map); + void (*setpgbwgcfgtx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + void (*setpgbwgcfgrx)(struct qed_dev *cdev, int pgid, u8 bw_pct); + u8 (*setall)(struct qed_dev *cdev); + int (*setnumtcs)(struct qed_dev *cdev, int tcid, u8 num); + void (*setpfcstate)(struct qed_dev *cdev, u8 state); + int (*setapp)(struct qed_dev *cdev, u8 idtype, u16 idval, u8 up); + u8 (*setdcbx)(struct qed_dev *cdev, u8 state); + u8 (*setfeatcfg)(struct qed_dev *cdev, int featid, u8 flags); + + /* Peer apps */ + int (*peer_getappinfo)(struct qed_dev *cdev, + struct dcb_peer_app_info *info, + u16 *app_count); + int (*peer_getapptable)(struct qed_dev *cdev, struct dcb_app *table); + + /* CEE peer */ + int (*cee_peer_getpfc)(struct qed_dev *cdev, struct cee_pfc *pfc); + int (*cee_peer_getpg)(struct qed_dev *cdev, struct cee_pg *pg); +}; +#endif + struct qed_eth_ops { const struct qed_common_ops *common; #ifdef CONFIG_QED_SRIOV const struct qed_iov_hv_ops *iov; #endif +#ifdef CONFIG_DCB + const struct qed_eth_dcbnl_ops *dcb; +#endif
int (*fill_dev_info)(struct qed_dev *cdev, struct qed_dev_eth_info *info); diff --combined kernel/trace/bpf_trace.c index 037ea6e,26f603d..3de25fb --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@@ -192,22 -192,27 +192,26 @@@ static u64 bpf_perf_event_read(u64 r1, { struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_event_entry *ee; struct perf_event *event; - struct file *file;
if (unlikely(index >= array->map.max_entries)) return -E2BIG;
- file = READ_ONCE(array->ptrs[index]); - if (unlikely(!file)) + ee = READ_ONCE(array->ptrs[index]); + if (unlikely(!ee)) return -ENOENT;
- event = file->private_data; - + event = ee->event; /* make sure event is local and doesn't have pmu::count */ if (event->oncpu != smp_processor_id() || event->pmu->count) return -EINVAL;
+ if (unlikely(event->attr.type != PERF_TYPE_HARDWARE && + event->attr.type != PERF_TYPE_RAW)) + return -EINVAL; + /* * we don't know if the function is run successfully by the * return value. It can be judged in other places, such as @@@ -232,8 -237,8 +236,8 @@@ static u64 bpf_perf_event_output(u64 r1 u64 index = flags & BPF_F_INDEX_MASK; void *data = (void *) (long) r4; struct perf_sample_data sample_data; + struct bpf_event_entry *ee; struct perf_event *event; - struct file *file; struct perf_raw_record raw = { .size = size, .data = data, @@@ -246,11 -251,12 +250,11 @@@ if (unlikely(index >= array->map.max_entries)) return -E2BIG;
- file = READ_ONCE(array->ptrs[index]); - if (unlikely(!file)) + ee = READ_ONCE(array->ptrs[index]); + if (unlikely(!ee)) return -ENOENT;
- event = file->private_data; - + event = ee->event; if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE || event->attr.config != PERF_COUNT_SW_BPF_OUTPUT)) return -EINVAL; @@@ -347,7 -353,8 +351,8 @@@ static const struct bpf_func_proto *kpr }
/* bpf+kprobe programs can access fields of 'struct pt_regs' */ - static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) + static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type, + enum bpf_reg_type *reg_type) { /* check bounds */ if (off < 0 || off >= sizeof(struct pt_regs)) @@@ -425,7 -432,8 +430,8 @@@ static const struct bpf_func_proto *tp_ } }
- static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type) + static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, + enum bpf_reg_type *reg_type) { if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) return false; diff --combined net/batman-adv/routing.c index f75091c,6c2901a..396c013 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@@ -374,6 -374,7 +374,7 @@@ int batadv_recv_icmp_packet(struct sk_b if (skb_cow(skb, ETH_HLEN) < 0) goto out;
+ ethhdr = eth_hdr(skb); icmph = (struct batadv_icmp_header *)skb->data; icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph; if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN) @@@ -653,7 -654,7 +654,7 @@@ static int batadv_route_unicast_packet( len + ETH_HLEN);
ret = NET_RX_SUCCESS; - } else if (res == NET_XMIT_POLICED) { + } else if (res == -EINPROGRESS) { /* skb was buffered and consumed */ ret = NET_RX_SUCCESS; } diff --combined net/core/filter.c index df6860c,c4b330c..cb9fc16 --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -748,17 -748,6 +748,17 @@@ static bool chk_code_allowed(u16 code_t return codes[code_to_probe]; }
+static bool bpf_check_basics_ok(const struct sock_filter *filter, + unsigned int flen) +{ + if (filter == NULL) + return false; + if (flen == 0 || flen > BPF_MAXINSNS) + return false; + + return true; +} + /** * bpf_check_classic - verify socket filter code * @filter: filter to verify @@@ -779,6 -768,9 +779,6 @@@ static int bpf_check_classic(const stru bool anc_found; int pc;
- if (flen == 0 || flen > BPF_MAXINSNS) - return -EINVAL; - /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *ftest = &filter[pc]; @@@ -1073,7 -1065,7 +1073,7 @@@ int bpf_prog_create(struct bpf_prog **p struct bpf_prog *fp;
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); @@@ -1120,7 -1112,7 +1120,7 @@@ int bpf_prog_create_from_user(struct bp int err;
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); @@@ -1215,6 -1207,7 +1215,6 @@@ stati struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) { unsigned int fsize = bpf_classic_proglen(fprog); - unsigned int bpf_fsize = bpf_prog_size(fprog->len); struct bpf_prog *prog; int err;
@@@ -1222,10 -1215,10 +1222,10 @@@ return ERR_PTR(-EPERM);
/* Make sure new filter is there and in the right amounts. */ - if (fprog->filter == NULL) + if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return ERR_PTR(-EINVAL);
- prog = bpf_prog_alloc(bpf_fsize, 0); + prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!prog) return ERR_PTR(-ENOMEM);
@@@ -1610,36 -1603,9 +1610,36 @@@ static const struct bpf_func_proto bpf_ .arg5_type = ARG_ANYTHING, };
+static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) +{ + if (skb_at_tc_ingress(skb)) + skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); + + return dev_forward_skb(dev, skb); +} + +static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) +{ + int ret; + + if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { + net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); + kfree_skb(skb); + return -ENETDOWN; + } + + skb->dev = dev; + + __this_cpu_inc(xmit_recursion); + ret = dev_queue_xmit(skb); + __this_cpu_dec(xmit_recursion); + + return ret; +} + static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) { - struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; + struct sk_buff *skb = (struct sk_buff *) (long) r1; struct net_device *dev;
if (unlikely(flags & ~(BPF_F_INGRESS))) @@@ -1649,12 -1615,19 +1649,12 @@@ if (unlikely(!dev)) return -EINVAL;
- skb2 = skb_clone(skb, GFP_ATOMIC); - if (unlikely(!skb2)) + skb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb)) return -ENOMEM;
- if (flags & BPF_F_INGRESS) { - if (skb_at_tc_ingress(skb2)) - skb_postpush_rcsum(skb2, skb_mac_header(skb2), - skb2->mac_len); - return dev_forward_skb(dev, skb2); - } - - skb2->dev = dev; - return dev_queue_xmit(skb2); + return flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); }
static const struct bpf_func_proto bpf_clone_redirect_proto = { @@@ -1698,8 -1671,15 +1698,8 @@@ int skb_do_redirect(struct sk_buff *skb return -EINVAL; }
- if (ri->flags & BPF_F_INGRESS) { - if (skb_at_tc_ingress(skb)) - skb_postpush_rcsum(skb, skb_mac_header(skb), - skb->mac_len); - return dev_forward_skb(dev, skb); - } - - skb->dev = dev; - return dev_queue_xmit(skb); + return ri->flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); }
static const struct bpf_func_proto bpf_redirect_proto = { @@@ -2105,7 -2085,8 +2105,8 @@@ static bool __is_valid_access(int off, }
static bool sk_filter_is_valid_access(int off, int size, - enum bpf_access_type type) + enum bpf_access_type type, + enum bpf_reg_type *reg_type) { switch (off) { case offsetof(struct __sk_buff, tc_classid): @@@ -2128,7 -2109,8 +2129,8 @@@ }
static bool tc_cls_act_is_valid_access(int off, int size, - enum bpf_access_type type) + enum bpf_access_type type, + enum bpf_reg_type *reg_type) { if (type == BPF_WRITE) { switch (off) { @@@ -2143,6 -2125,16 +2145,16 @@@ return false; } } + + switch (off) { + case offsetof(struct __sk_buff, data): + *reg_type = PTR_TO_PACKET; + break; + case offsetof(struct __sk_buff, data_end): + *reg_type = PTR_TO_PACKET_END; + break; + } + return __is_valid_access(off, size, type); }
diff --combined net/ipv4/gre_demux.c index c4c3e43,de1d119..b798862 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@@ -62,26 -62,26 +62,26 @@@ EXPORT_SYMBOL_GPL(gre_del_protocol)
/* Fills in tpi and returns header length to be pulled. */ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, - bool *csum_err, __be16 proto) + bool *csum_err, __be16 proto, int nhs) { const struct gre_base_hdr *greh; __be32 *options; int hdr_len;
- if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr)))) + if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr)))) return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb); + greh = (struct gre_base_hdr *)(skb->data + nhs); if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) return -EINVAL;
tpi->flags = gre_flags_to_tnl_flags(greh->flags); hdr_len = gre_calc_hlen(tpi->flags);
- if (!pskb_may_pull(skb, hdr_len)) + if (!pskb_may_pull(skb, nhs + hdr_len)) return -EINVAL;
- greh = (struct gre_base_hdr *)skb_transport_header(skb); + greh = (struct gre_base_hdr *)(skb->data + nhs); tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1); @@@ -117,7 -117,6 +117,7 @@@ if ((*(u8 *)options & 0xF0) != 0x40) hdr_len += 4; } + tpi->hdr_len = hdr_len; return hdr_len; } EXPORT_SYMBOL(gre_parse_header); diff --combined net/ipv4/ip_gre.c index 8eec78f,1d000af..5b1481b --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@@ -49,12 -49,6 +49,6 @@@ #include <net/gre.h> #include <net/dst_metadata.h>
- #if IS_ENABLED(CONFIG_IPV6) - #include <net/ipv6.h> - #include <net/ip6_fib.h> - #include <net/ip6_route.h> - #endif - /* Problems & solutions -------------------- @@@ -144,7 -138,6 +138,7 @@@ static void ipgre_err(struct sk_buff *s const struct iphdr *iph; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; + unsigned int data_len = 0; struct ip_tunnel *t;
switch (type) { @@@ -170,7 -163,6 +164,7 @@@ case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return; + data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ break;
case ICMP_REDIRECT: @@@ -189,13 -181,6 +183,13 @@@ if (!t) return;
+#if IS_ENABLED(CONFIG_IPV6) + if (tpi->proto == htons(ETH_P_IPV6) && + !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, + type, data_len)) + return; +#endif + if (t->parms.iph.daddr == 0 || ipv4_is_multicast(t->parms.iph.daddr)) return; @@@ -226,12 -211,14 +220,14 @@@ static void gre_err(struct sk_buff *skb * by themselves??? */
+ const struct iphdr *iph = (struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct tnl_ptk_info tpi; bool csum_err = false;
- if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)) < 0) { + if (gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), + iph->ihl * 4) < 0) { if (!csum_err) /* ignore csum errors. */ return; } @@@ -347,7 -334,7 +343,7 @@@ static int gre_rcv(struct sk_buff *skb } #endif
- hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP)); + hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0); if (hdr_len < 0) goto drop;
@@@ -850,19 -837,17 +846,19 @@@ out return ipgre_tunnel_validate(tb, data); }
-static void ipgre_netlink_parms(struct net_device *dev, +static int ipgre_netlink_parms(struct net_device *dev, struct nlattr *data[], struct nlattr *tb[], struct ip_tunnel_parm *parms) { + struct ip_tunnel *t = netdev_priv(dev); + memset(parms, 0, sizeof(*parms));
parms->iph.protocol = IPPROTO_GRE;
if (!data) - return; + return 0;
if (data[IFLA_GRE_LINK]) parms->link = nla_get_u32(data[IFLA_GRE_LINK]); @@@ -891,26 -876,16 +887,26 @@@ if (data[IFLA_GRE_TOS]) parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
- if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) + if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) { + if (t->ignore_df) + return -EINVAL; parms->iph.frag_off = htons(IP_DF); + }
if (data[IFLA_GRE_COLLECT_METADATA]) { - struct ip_tunnel *t = netdev_priv(dev); - t->collect_md = true; if (dev->type == ARPHRD_IPGRE) dev->type = ARPHRD_NONE; } + + if (data[IFLA_GRE_IGNORE_DF]) { + if (nla_get_u8(data[IFLA_GRE_IGNORE_DF]) + && (parms->iph.frag_off & htons(IP_DF))) + return -EINVAL; + t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]); + } + + return 0; }
/* This function returns true when ENCAP attributes are present in the nl msg */ @@@ -981,19 -956,16 +977,19 @@@ static int ipgre_newlink(struct net *sr { struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + int err;
if (ipgre_netlink_encap_parms(data, &ipencap)) { struct ip_tunnel *t = netdev_priv(dev); - int err = ip_tunnel_encap_setup(t, &ipencap); + err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0) return err; }
- ipgre_netlink_parms(dev, data, tb, &p); + err = ipgre_netlink_parms(dev, data, tb, &p); + if (err < 0) + return err; return ip_tunnel_newlink(dev, tb, &p); }
@@@ -1002,19 -974,16 +998,19 @@@ static int ipgre_changelink(struct net_ { struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + int err;
if (ipgre_netlink_encap_parms(data, &ipencap)) { struct ip_tunnel *t = netdev_priv(dev); - int err = ip_tunnel_encap_setup(t, &ipencap); + err = ip_tunnel_encap_setup(t, &ipencap);
if (err < 0) return err; }
- ipgre_netlink_parms(dev, data, tb, &p); + err = ipgre_netlink_parms(dev, data, tb, &p); + if (err < 0) + return err; return ip_tunnel_changelink(dev, tb, &p); }
@@@ -1051,8 -1020,6 +1047,8 @@@ static size_t ipgre_get_size(const stru nla_total_size(2) + /* IFLA_GRE_COLLECT_METADATA */ nla_total_size(0) + + /* IFLA_GRE_IGNORE_DF */ + nla_total_size(1) + 0; }
@@@ -1086,9 -1053,6 +1082,9 @@@ static int ipgre_fill_info(struct sk_bu t->encap.flags)) goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df)) + goto nla_put_failure; + if (t->collect_md) { if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA)) goto nla_put_failure; @@@ -1116,7 -1080,6 +1112,7 @@@ static const struct nla_policy ipgre_po [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 }, [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 }, [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, + [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 }, };
static struct rtnl_link_ops ipgre_link_ops __read_mostly = { @@@ -1154,6 -1117,7 +1150,7 @@@ struct net_device *gretap_fb_dev_create { struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; + LIST_HEAD(list_kill); struct ip_tunnel *t; int err;
@@@ -1169,8 -1133,10 +1166,10 @@@ t->collect_md = true;
err = ipgre_newlink(net, dev, tb, NULL); - if (err < 0) - goto out; + if (err < 0) { + free_netdev(dev); + return ERR_PTR(err); + }
/* openvswitch users expect packet sizes to be unrestricted, * so set the largest MTU we can. @@@ -1179,9 -1145,14 +1178,14 @@@ if (err) goto out;
+ err = rtnl_configure_link(dev, NULL); + if (err < 0) + goto out; + return dev; out: - free_netdev(dev); + ip_tunnel_dellink(dev, &list_kill); + unregister_netdevice_many(&list_kill); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(gretap_fb_dev_create); diff --combined net/ipv4/tcp_output.c index b1bcba0,e00e972..b26aa87 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@@ -911,12 -911,9 +911,12 @@@ static int tcp_transmit_skb(struct soc int err;
BUG_ON(!skb || !tcp_skb_pcount(skb)); + tp = tcp_sk(sk);
if (clone_it) { skb_mstamp_get(&skb->skb_mstamp); + TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq + - tp->snd_una;
if (unlikely(skb_cloned(skb))) skb = pskb_copy(skb, gfp_mask); @@@ -927,6 -924,7 +927,6 @@@ }
inet = inet_sk(sk); - tp = tcp_sk(sk); tcb = TCP_SKB_CB(skb); memset(&opts, 0, sizeof(opts));
@@@ -2753,7 -2751,7 +2753,7 @@@ void tcp_xmit_retransmit_queue(struct s struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; struct sk_buff *hole = NULL; - u32 last_lost; + u32 max_segs, last_lost; int mib_idx; int fwd_rexmitting = 0;
@@@ -2773,6 -2771,7 +2773,7 @@@ last_lost = tp->snd_una; }
+ max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk)); tcp_for_write_queue_from(skb, sk) { __u8 sacked = TCP_SKB_CB(skb)->sacked; int segs; @@@ -2786,6 -2785,10 +2787,10 @@@ segs = tp->snd_cwnd - tcp_packets_in_flight(tp); if (segs <= 0) return; + /* In case tcp_shift_skb_data() have aggregated large skbs, + * we need to make sure not sending too bigs TSO packets + */ + segs = min_t(int, segs, max_segs);
if (fwd_rexmitting) { begin_fwd: diff --combined net/ipv6/icmp.c index fd11f58,a4fa840..bd59c34 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@@ -98,7 -98,7 +98,7 @@@ static void icmpv6_err(struct sk_buff *
if (!(type & ICMPV6_INFOMSG_MASK)) if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST) - ping_err(skb, offset, info); + ping_err(skb, offset, ntohl(info)); }
static int icmpv6_rcv(struct sk_buff *skb); @@@ -388,8 -388,7 +388,8 @@@ relookup_failed /* * Send an ICMP message in response to a packet in error */ -static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr) { struct net *net = dev_net(skb->dev); struct inet6_dev *idev = NULL; @@@ -476,8 -475,6 +476,8 @@@ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.daddr = hdr->saddr; + if (force_saddr) + saddr = force_saddr; if (saddr) fl6.saddr = *saddr; fl6.flowi6_mark = mark; @@@ -505,14 -502,12 +505,14 @@@ else if (!fl6.flowi6_oif) fl6.flowi6_oif = np->ucast_oif;
+ ipc6.tclass = np->tclass; + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + dst = icmpv6_route_lookup(net, skb, sk, &fl6); if (IS_ERR(dst)) goto out;
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - ipc6.tclass = np->tclass; ipc6.dontfrag = np->dontfrag; ipc6.opt = NULL;
@@@ -554,75 -549,10 +554,75 @@@ out */ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { - icmp6_send(skb, ICMPV6_PARAMPROB, code, pos); + icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL); kfree_skb(skb); }
+/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH + * if sufficient data bytes are available + * @nhs is the size of the tunnel header(s) : + * Either an IPv4 header for SIT encap + * an IPv4 header + GRE header for GRE encap + */ +int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, + unsigned int data_len) +{ + struct in6_addr temp_saddr; + struct rt6_info *rt; + struct sk_buff *skb2; + u32 info = 0; + + if (!pskb_may_pull(skb, nhs + sizeof(struct ipv6hdr) + 8)) + return 1; + + /* RFC 4884 (partial) support for ICMP extensions */ + if (data_len < 128 || (data_len & 7) || skb->len < data_len) + data_len = 0; + + skb2 = data_len ? skb_copy(skb, GFP_ATOMIC) : skb_clone(skb, GFP_ATOMIC); + + if (!skb2) + return 1; + + skb_dst_drop(skb2); + skb_pull(skb2, nhs); + skb_reset_network_header(skb2); + + rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); + + if (rt && rt->dst.dev) + skb2->dev = rt->dst.dev; + + ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &temp_saddr); + + if (data_len) { + /* RFC 4884 (partial) support : + * insert 0 padding at the end, before the extensions + */ + __skb_push(skb2, nhs); + skb_reset_network_header(skb2); + memmove(skb2->data, skb2->data + nhs, data_len - nhs); + memset(skb2->data + data_len - nhs, 0, nhs); + /* RFC 4884 4.5 : Length is measured in 64-bit words, + * and stored in reserved[0] + */ + info = (data_len/8) << 24; + } + if (type == ICMP_TIME_EXCEEDED) + icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, + info, &temp_saddr); + else + icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, + info, &temp_saddr); + if (rt) + ip6_rt_put(rt); + + kfree_skb(skb2); + + return 0; +} +EXPORT_SYMBOL(ip6_err_gen_icmpv6_unreach); + static void icmpv6_echo_reply(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); @@@ -655,7 -585,7 +655,7 @@@ fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) fl6.saddr = *saddr; - fl6.flowi6_oif = l3mdev_fib_oif(skb->dev); + fl6.flowi6_oif = skb->dev->ifindex; fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY; fl6.flowi6_mark = mark; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); diff --combined net/ipv6/route.c index 08b77f4,520b788..4981755 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -1042,8 -1042,8 +1042,8 @@@ static struct rt6_info *rt6_make_pcpu_r return pcpu_rt; }
-static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, - struct flowi6 *fl6, int flags) +struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, + int oif, struct flowi6 *fl6, int flags) { struct fib6_node *fn, *saved_fn; struct rt6_info *rt; @@@ -1139,7 -1139,6 +1139,7 @@@ redo_rt6_select
} } +EXPORT_SYMBOL_GPL(ip6_pol_route);
static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) @@@ -1783,7 -1782,7 +1783,7 @@@ static struct rt6_info *ip6_nh_lookup_t }; struct fib6_table *table; struct rt6_info *rt; - int flags = 0; + int flags = RT6_LOOKUP_F_IFACE;
table = fib6_get_table(net, cfg->fc_table); if (!table) @@@ -2201,7 -2200,7 +2201,7 @@@ static void rt6_do_redirect(struct dst_ * first-hop router for the specified ICMP Destination Address. */
- if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) { + if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) { net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); return; } @@@ -2236,12 -2235,12 +2236,12 @@@ * We have finally decided to accept it. */
- neigh_update(neigh, lladdr, NUD_STALE, + ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE| (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| - NEIGH_UPDATE_F_ISROUTER)) - ); + NEIGH_UPDATE_F_ISROUTER)), + NDISC_REDIRECT, &ndopts);
nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL); if (!nrt) @@@ -2586,6 -2585,23 +2586,6 @@@ struct rt6_info *addrconf_dst_alloc(str return rt; }
-int ip6_route_get_saddr(struct net *net, - struct rt6_info *rt, - const struct in6_addr *daddr, - unsigned int prefs, - struct in6_addr *saddr) -{ - struct inet6_dev *idev = - rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL; - int err = 0; - if (rt && rt->rt6i_prefsrc.plen) - *saddr = rt->rt6i_prefsrc.addr; - else - err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, - daddr, prefs, saddr); - return err; -} - /* remove deleted ip from prefsrc entries */ struct arg_dev_net_ip { struct net_device *dev; @@@ -3290,8 -3306,6 +3290,8 @@@ static int inet6_rtm_getroute(struct sk
err = -EINVAL; memset(&fl6, 0, sizeof(fl6)); + rtm = nlmsg_data(nlh); + fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
if (tb[RTA_SRC]) { if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) diff --combined net/ipv6/sit.c index cdd7146,0619ac7..917a5cd --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@@ -479,12 -479,47 +479,12 @@@ static void ipip6_tunnel_uninit(struct dev_put(dev); }
-/* Generate icmpv6 with type/code ICMPV6_DEST_UNREACH/ICMPV6_ADDR_UNREACH - * if sufficient data bytes are available - */ -static int ipip6_err_gen_icmpv6_unreach(struct sk_buff *skb) -{ - int ihl = ((const struct iphdr *)skb->data)->ihl*4; - struct rt6_info *rt; - struct sk_buff *skb2; - - if (!pskb_may_pull(skb, ihl + sizeof(struct ipv6hdr) + 8)) - return 1; - - skb2 = skb_clone(skb, GFP_ATOMIC); - - if (!skb2) - return 1; - - skb_dst_drop(skb2); - skb_pull(skb2, ihl); - skb_reset_network_header(skb2); - - rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr, NULL, 0, 0); - - if (rt && rt->dst.dev) - skb2->dev = rt->dst.dev; - - icmpv6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); - - if (rt) - ip6_rt_put(rt); - - kfree_skb(skb2); - - return 0; -} - static int ipip6_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)skb->data; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; + unsigned int data_len = 0; struct ip_tunnel *t; int err;
@@@ -509,7 -544,6 +509,7 @@@ case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return 0; + data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ break; case ICMP_REDIRECT: break; @@@ -526,22 -560,22 +526,22 @@@
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { ipv4_update_pmtu(skb, dev_net(skb->dev), info, - t->parms.link, 0, IPPROTO_IPV6, 0); + t->parms.link, 0, iph->protocol, 0); err = 0; goto out; } if (type == ICMP_REDIRECT) { ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, - IPPROTO_IPV6, 0); + iph->protocol, 0); err = 0; goto out; }
- if (t->parms.iph.daddr == 0) + err = 0; + if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) goto out;
- err = 0; - if (!ipip6_err_gen_icmpv6_unreach(skb)) + if (t->parms.iph.daddr == 0) goto out;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) @@@ -791,6 -825,9 +791,6 @@@ static netdev_tx_t ipip6_tunnel_xmit(st u8 protocol = IPPROTO_IPV6; int t_hlen = tunnel->hlen + sizeof(struct iphdr);
- if (skb->protocol != htons(ETH_P_IPV6)) - goto tx_error; - if (tos == 1) tos = ipv6_get_dsfield(iph6);
diff --combined net/ipv6/udp.c index 4bb5c13,005dc82..0a71a312d --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@@ -115,11 -115,10 +115,10 @@@ static void udp_v6_rehash(struct sock * udp_lib_rehash(sk, new_hash); }
- static inline int compute_score(struct sock *sk, struct net *net, - unsigned short hnum, - const struct in6_addr *saddr, __be16 sport, - const struct in6_addr *daddr, __be16 dport, - int dif) + static int compute_score(struct sock *sk, struct net *net, + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, unsigned short hnum, + int dif) { int score; struct inet_sock *inet; @@@ -162,54 -161,11 +161,11 @@@ return score; }
- static inline int compute_score2(struct sock *sk, struct net *net, - const struct in6_addr *saddr, __be16 sport, - const struct in6_addr *daddr, - unsigned short hnum, int dif) - { - int score; - struct inet_sock *inet; - - if (!net_eq(sock_net(sk), net) || - udp_sk(sk)->udp_port_hash != hnum || - sk->sk_family != PF_INET6) - return -1; - - if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) - return -1; - - score = 0; - inet = inet_sk(sk); - - if (inet->inet_dport) { - if (inet->inet_dport != sport) - return -1; - score++; - } - - if (!ipv6_addr_any(&sk->sk_v6_daddr)) { - if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr)) - return -1; - score++; - } - - if (sk->sk_bound_dev_if) { - if (sk->sk_bound_dev_if != dif) - return -1; - score++; - } - - if (sk->sk_incoming_cpu == raw_smp_processor_id()) - score++; - - return score; - } - - /* called with read_rcu_lock() */ + /* called with rcu_read_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned int hnum, int dif, - struct udp_hslot *hslot2, unsigned int slot2, + struct udp_hslot *hslot2, struct sk_buff *skb) { struct sock *sk, *result; @@@ -219,7 -175,7 +175,7 @@@ result = NULL; badness = -1; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { - score = compute_score2(sk, net, saddr, sport, + score = compute_score(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { reuseport = sk->sk_reuseport; @@@ -268,17 -224,22 +224,22 @@@ struct sock *__udp6_lib_lookup(struct n
result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, - hslot2, slot2, skb); + hslot2, skb); if (!result) { + unsigned int old_slot2 = slot2; hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); slot2 = hash2 & udptable->mask; + /* avoid searching the same slot again. */ + if (unlikely(slot2 == old_slot2)) + return result; + hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin;
result = udp6_lib_lookup2(net, saddr, sport, - &in6addr_any, hnum, dif, - hslot2, slot2, skb); + daddr, hnum, dif, + hslot2, skb); } return result; } @@@ -286,7 -247,7 +247,7 @@@ begin result = NULL; badness = -1; sk_for_each_rcu(sk, &hslot->head) { - score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); + score = compute_score(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { @@@ -1246,11 -1207,6 +1207,11 @@@ do_udp_sendmsg
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ if (ipc6.tclass < 0) + ipc6.tclass = np->tclass; + + fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); + dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); @@@ -1261,6 -1217,9 +1222,6 @@@ if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
- if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: diff --combined net/openvswitch/conntrack.c index 52f3b9b,d843125..b4069a9 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@@ -818,23 -818,22 +818,33 @@@ static int ovs_ct_lookup(struct net *ne */ state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED; __ovs_ct_update_key(key, state, &info->zone, exp->master); - } else - return __ovs_ct_lookup(net, key, info, skb); + } else { + struct nf_conn *ct; + int err; + + err = __ovs_ct_lookup(net, key, info, skb); + if (err) + return err; + + ct = (struct nf_conn *)skb->nfct; + if (ct) + nf_ct_deliver_cached_events(ct); + }
return 0; }
+static bool labels_nonzero(const struct ovs_key_ct_labels *labels) +{ + size_t i; + + for (i = 0; i < sizeof(*labels); i++) + if (labels->ct_labels[i]) + return true; + + return false; +} + /* Lookup connection and confirm if unconfirmed. */ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, const struct ovs_conntrack_info *info, @@@ -845,32 -844,24 +855,32 @@@ err = __ovs_ct_lookup(net, key, info, skb); if (err) return err; - /* This is a no-op if the connection has already been confirmed. */ + + /* Apply changes before confirming the connection so that the initial + * conntrack NEW netlink event carries the values given in the CT + * action. + */ + if (info->mark.mask) { + err = ovs_ct_set_mark(skb, key, info->mark.value, + info->mark.mask); + if (err) + return err; + } + if (labels_nonzero(&info->labels.mask)) { + err = ovs_ct_set_labels(skb, key, &info->labels.value, + &info->labels.mask); + if (err) + return err; + } + /* This will take care of sending queued events even if the connection + * is already confirmed. + */ if (nf_conntrack_confirm(skb) != NF_ACCEPT) return -EINVAL;
return 0; }
-static bool labels_nonzero(const struct ovs_key_ct_labels *labels) -{ - size_t i; - - for (i = 0; i < sizeof(*labels); i++) - if (labels->ct_labels[i]) - return true; - - return false; -} - /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero * value if 'skb' is freed. */ @@@ -895,7 -886,19 +905,7 @@@ int ovs_ct_execute(struct net *net, str err = ovs_ct_commit(net, key, info, skb); else err = ovs_ct_lookup(net, key, info, skb); - if (err) - goto err;
- if (info->mark.mask) { - err = ovs_ct_set_mark(skb, key, info->mark.value, - info->mark.mask); - if (err) - goto err; - } - if (labels_nonzero(&info->labels.mask)) - err = ovs_ct_set_labels(skb, key, &info->labels.value, - &info->labels.mask); -err: skb_push(skb, nh_ofs); if (err) kfree_skb(skb); @@@ -1152,20 -1155,6 +1162,20 @@@ static int parse_ct(const struct nlatt } }
+#ifdef CONFIG_NF_CONNTRACK_MARK + if (!info->commit && info->mark.mask) { + OVS_NLERR(log, + "Setting conntrack mark requires 'commit' flag."); + return -EINVAL; + } +#endif +#ifdef CONFIG_NF_CONNTRACK_LABELS + if (!info->commit && labels_nonzero(&info->labels.mask)) { + OVS_NLERR(log, + "Setting conntrack labels requires 'commit' flag."); + return -EINVAL; + } +#endif if (rem > 0) { OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem); return -EINVAL; diff --combined net/rds/ib_cm.c index 3342876,7c2a65a..e48bb1b --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@@ -36,7 -36,6 +36,7 @@@ #include <linux/vmalloc.h> #include <linux/ratelimit.h>
+#include "rds_single_path.h" #include "rds.h" #include "ib.h"
@@@ -112,7 -111,7 +112,7 @@@ void rds_ib_cm_connect_complete(struct } }
- if (conn->c_version < RDS_PROTOCOL(3,1)) { + if (conn->c_version < RDS_PROTOCOL(3, 1)) { printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed," " no longer supported\n", &conn->c_faddr, @@@ -274,7 -273,7 +274,7 @@@ static void rds_ib_tasklet_fn_send(unsi if (rds_conn_up(conn) && (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags) || test_bit(0, &conn->c_map_queued))) - rds_send_xmit(ic->conn); + rds_send_xmit(&ic->conn->c_path[0]); }
static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, diff --combined net/rds/loop.c index 268f07f,814173b..15f83db --- a/net/rds/loop.c +++ b/net/rds/loop.c @@@ -34,7 -34,6 +34,7 @@@ #include <linux/slab.h> #include <linux/in.h>
+#include "rds_single_path.h" #include "rds.h" #include "loop.h"
@@@ -96,8 -95,9 +96,9 @@@ out */ static void rds_loop_inc_free(struct rds_incoming *inc) { - struct rds_message *rm = container_of(inc, struct rds_message, m_inc); - rds_message_put(rm); + struct rds_message *rm = container_of(inc, struct rds_message, m_inc); + + rds_message_put(rm); }
/* we need to at least give the thread something to succeed */ diff --combined net/rds/tcp_connect.c index ba9ec67,f6e95d6..96c2c4d --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@@ -34,7 -34,6 +34,7 @@@ #include <linux/in.h> #include <net/tcp.h>
+#include "rds_single_path.h" #include "rds.h" #include "tcp.h"
@@@ -55,20 -54,19 +55,20 @@@ void rds_tcp_state_change(struct sock *
rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
- switch(sk->sk_state) { - /* ignore connecting sockets as they make progress */ - case TCP_SYN_SENT: - case TCP_SYN_RECV: - break; - case TCP_ESTABLISHED: - rds_connect_path_complete(&conn->c_path[0], - RDS_CONN_CONNECTING); - break; - case TCP_CLOSE_WAIT: - case TCP_CLOSE: - rds_conn_drop(conn); - default: - break; + switch (sk->sk_state) { + /* ignore connecting sockets as they make progress */ + case TCP_SYN_SENT: + case TCP_SYN_RECV: + break; + case TCP_ESTABLISHED: - rds_connect_path_complete(conn, RDS_CONN_CONNECTING); ++ rds_connect_path_complete(&conn->c_path[0], ++ RDS_CONN_CONNECTING); + break; + case TCP_CLOSE_WAIT: + case TCP_CLOSE: + rds_conn_drop(conn); + default: + break; } out: read_unlock_bh(&sk->sk_callback_lock); diff --combined net/rds/tcp_listen.c index 22d9bb1,245542c..f9cc945 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c @@@ -35,7 -35,6 +35,7 @@@ #include <linux/in.h> #include <net/tcp.h>
+#include "rds_single_path.h" #include "rds.h" #include "tcp.h"
@@@ -133,19 -132,17 +133,19 @@@ int rds_tcp_accept_one(struct socket *s * c_transport_data. */ if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr) || - !conn->c_outgoing) { + !conn->c_path[0].cp_outgoing) { goto rst_nsk; } else { rds_tcp_reset_callbacks(new_sock, conn); - conn->c_outgoing = 0; + conn->c_path[0].cp_outgoing = 0; /* rds_connect_path_complete() marks RDS_CONN_UP */ - rds_connect_path_complete(conn, RDS_CONN_RESETTING); + rds_connect_path_complete(&conn->c_path[0], - RDS_CONN_DISCONNECTING); ++ RDS_CONN_RESETTING); } } else { rds_tcp_set_callbacks(new_sock, conn); - rds_connect_path_complete(conn, RDS_CONN_CONNECTING); + rds_connect_path_complete(&conn->c_path[0], + RDS_CONN_CONNECTING); } new_sock = NULL; ret = 0; diff --combined net/rds/tcp_recv.c index 3f8fb38,6e6a711..4a87d9e --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@@ -34,7 -34,6 +34,7 @@@ #include <linux/slab.h> #include <net/tcp.h>
+#include "rds_single_path.h" #include "rds.h" #include "tcp.h"
@@@ -172,7 -171,7 +172,7 @@@ static int rds_tcp_data_recv(read_descr while (left) { if (!tinc) { tinc = kmem_cache_alloc(rds_tcp_incoming_slab, - arg->gfp); + arg->gfp); if (!tinc) { desc->error = -ENOMEM; goto out; diff --combined net/rds/tcp_send.c index 2b3414f,618be69..710f1aa --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@@ -34,7 -34,6 +34,7 @@@ #include <linux/in.h> #include <net/tcp.h>
+#include "rds_single_path.h" #include "rds.h" #include "tcp.h"
@@@ -67,19 -66,19 +67,19 @@@ void rds_tcp_xmit_complete(struct rds_c static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len) { struct kvec vec = { - .iov_base = data, - .iov_len = len, + .iov_base = data, + .iov_len = len, + }; + struct msghdr msg = { + .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, }; - struct msghdr msg = { - .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL, - };
return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len); }
/* the core send_sem serializes this with other xmit and shutdown */ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm, - unsigned int hdr_off, unsigned int sg, unsigned int off) + unsigned int hdr_off, unsigned int sg, unsigned int off) { struct rds_tcp_connection *tc = conn->c_transport_data; int done = 0; @@@ -197,7 -196,7 +197,7 @@@ void rds_tcp_write_space(struct sock *s tc->t_last_seen_una = rds_tcp_snd_una(tc); rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
- if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) + if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out: diff --combined net/sched/act_api.c index f8c61d2,c7a0b0d..47ec230 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@@ -224,8 -224,8 +224,8 @@@ int tcf_hash_search(struct tc_action_ne } EXPORT_SYMBOL(tcf_hash_search);
-int tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a, - int bind) +bool tcf_hash_check(struct tc_action_net *tn, u32 index, struct tc_action *a, + int bind) { struct tcf_hashinfo *hinfo = tn->hinfo; struct tcf_common *p = NULL; @@@ -235,9 -235,9 +235,9 @@@ p->tcfc_refcnt++; a->priv = p; a->hinfo = hinfo; - return 1; + return true; } - return 0; + return false; } EXPORT_SYMBOL(tcf_hash_check);
@@@ -283,11 -283,10 +283,11 @@@ err2 p->tcfc_index = index ? index : tcf_hash_new_index(tn); p->tcfc_tm.install = jiffies; p->tcfc_tm.lastuse = jiffies; + p->tcfc_tm.firstuse = 0; if (est) { err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats, &p->tcfc_rate_est, - &p->tcfc_lock, est); + &p->tcfc_lock, NULL, est); if (err) { free_percpu(p->cpu_qstats); goto err2; @@@ -504,8 -503,8 +504,8 @@@ nla_put_failure } EXPORT_SYMBOL(tcf_action_dump_1);
-int -tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref) +int tcf_action_dump(struct sk_buff *skb, struct list_head *actions, + int bind, int ref) { struct tc_action *a; int err = -EINVAL; @@@ -671,7 -670,7 +671,7 @@@ int tcf_action_copy_stats(struct sk_buf if (err < 0) goto errout;
- if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 || + if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfc_bstats) < 0 || gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, &p->tcfc_rate_est) < 0 || gnet_stats_copy_queue(&d, p->cpu_qstats, @@@ -688,9 -687,9 +688,9 @@@ errout return -1; }
-static int -tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq, - u16 flags, int event, int bind, int ref) +static int tca_get_fill(struct sk_buff *skb, struct list_head *actions, + u32 portid, u32 seq, u16 flags, int event, int bind, + int ref) { struct tcamsg *t; struct nlmsghdr *nlh; @@@ -731,8 -730,7 +731,8 @@@ act_get_notify(struct net *net, u32 por skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; - if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { + if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, + 0, 0) <= 0) { kfree_skb(skb); return -EINVAL; } @@@ -840,8 -838,7 +840,8 @@@ static int tca_action_flush(struct net if (a.ops == NULL) /*some idjot trying to flush unknown action */ goto err_out;
- nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); + nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, + sizeof(*t), 0); if (!nlh) goto out_module_put; t = nlmsg_data(nlh); @@@ -1004,8 -1001,7 +1004,8 @@@ static int tc_ctl_action(struct sk_buf u32 portid = skb ? NETLINK_CB(skb).portid : 0; int ret = 0, ovr = 0;
- if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN)) + if ((n->nlmsg_type != RTM_GETACTION) && + !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM;
ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); @@@ -1122,7 -1118,7 +1122,7 @@@ tc_dump_action(struct sk_buff *skb, str nla_nest_end(skb, nest); ret = skb->len; } else - nla_nest_cancel(skb, nest); + nlmsg_trim(skb, b);
nlh->nlmsg_len = skb_tail_pointer(skb) - b; if (NETLINK_CB(cb->skb).portid && ret) diff --combined net/sched/act_ife.c index b7fa969,ea4a2fe..845ab51 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@@ -106,9 -106,9 +106,9 @@@ int ife_get_meta_u16(struct sk_buff *sk } EXPORT_SYMBOL_GPL(ife_get_meta_u16);
- int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval) + int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) { - mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL); + mi->metaval = kmemdup(metaval, sizeof(u32), gfp); if (!mi->metaval) return -ENOMEM;
@@@ -116,9 -116,9 +116,9 @@@ } EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
- int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval) + int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) { - mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL); + mi->metaval = kmemdup(metaval, sizeof(u16), gfp); if (!mi->metaval) return -ENOMEM;
@@@ -240,10 -240,10 +240,10 @@@ static int ife_validate_metatype(struc }
/* called when adding new meta information - * under ife->tcf_lock + * under ife->tcf_lock for existing action */ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, - void *val, int len) + void *val, int len, bool exists) { struct tcf_meta_ops *ops = find_ife_oplist(metaid); int ret = 0; @@@ -251,11 -251,13 +251,13 @@@ if (!ops) { ret = -ENOENT; #ifdef CONFIG_MODULES - spin_unlock_bh(&ife->tcf_lock); + if (exists) + spin_unlock_bh(&ife->tcf_lock); rtnl_unlock(); request_module("ifemeta%u", metaid); rtnl_lock(); - spin_lock_bh(&ife->tcf_lock); + if (exists) + spin_lock_bh(&ife->tcf_lock); ops = find_ife_oplist(metaid); #endif } @@@ -272,10 -274,10 +274,10 @@@ }
/* called when adding new meta information - * under ife->tcf_lock + * under ife->tcf_lock for existing action */ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, - int len) + int len, bool atomic) { struct tcf_meta_info *mi = NULL; struct tcf_meta_ops *ops = find_ife_oplist(metaid); @@@ -284,7 -286,7 +286,7 @@@ if (!ops) return -ENOENT;
- mi = kzalloc(sizeof(*mi), GFP_KERNEL); + mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); if (!mi) { /*put back what find_ife_oplist took */ module_put(ops->owner); @@@ -294,7 -296,7 +296,7 @@@ mi->metaid = metaid; mi->ops = ops; if (len > 0) { - ret = ops->alloc(mi, metaval); + ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); if (ret != 0) { kfree(mi); module_put(ops->owner); @@@ -313,11 -315,13 +315,13 @@@ static int use_all_metadata(struct tcf_ int rc = 0; int installed = 0;
+ read_lock(&ife_mod_lock); list_for_each_entry(o, &ifeoplist, list) { - rc = add_metainfo(ife, o->metaid, NULL, 0); + rc = add_metainfo(ife, o->metaid, NULL, 0, true); if (rc == 0) installed += 1; } + read_unlock(&ife_mod_lock);
if (installed) return 0; @@@ -385,8 -389,9 +389,9 @@@ static void tcf_ife_cleanup(struct tc_a spin_unlock_bh(&ife->tcf_lock); }
- /* under ife->tcf_lock */ - static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb) + /* under ife->tcf_lock for existing action */ + static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, + bool exists) { int len = 0; int rc = 0; @@@ -398,11 -403,11 +403,11 @@@ val = nla_data(tb[i]); len = nla_len(tb[i]);
- rc = load_metaops_and_vet(ife, i, val, len); + rc = load_metaops_and_vet(ife, i, val, len, exists); if (rc != 0) return rc;
- rc = add_metainfo(ife, i, val, len); + rc = add_metainfo(ife, i, val, len, exists); if (rc) return rc; } @@@ -423,8 -428,7 +428,8 @@@ static int tcf_ife_init(struct net *net u16 ife_type = 0; u8 *daddr = NULL; u8 *saddr = NULL; - int ret = 0, exists = 0; + bool exists = false; + int ret = 0; int err;
err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); @@@ -475,7 -479,8 +480,8 @@@ saddr = nla_data(tb[TCA_IFE_SMAC]); }
- spin_lock_bh(&ife->tcf_lock); + if (exists) + spin_lock_bh(&ife->tcf_lock); ife->tcf_action = parm->action;
if (parm->flags & IFE_ENCODE) { @@@ -505,11 -510,12 +511,12 @@@ metadata_parse_err if (ret == ACT_P_CREATED) _tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock); + if (exists) + spin_unlock_bh(&ife->tcf_lock); return err; }
- err = populate_metalist(ife, tb2); + err = populate_metalist(ife, tb2, exists); if (err) goto metadata_parse_err;
@@@ -524,12 -530,14 +531,14 @@@ if (ret == ACT_P_CREATED) _tcf_ife_cleanup(a, bind);
- spin_unlock_bh(&ife->tcf_lock); + if (exists) + spin_unlock_bh(&ife->tcf_lock); return err; } }
- spin_unlock_bh(&ife->tcf_lock); + if (exists) + spin_unlock_bh(&ife->tcf_lock);
if (ret == ACT_P_CREATED) tcf_hash_insert(tn, a); @@@ -554,7 -562,9 +563,7 @@@ static int tcf_ife_dump(struct sk_buff if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) goto nla_put_failure;
- t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install); - t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse); - t.expires = jiffies_to_clock_t(ife->tcf_tm.expires); + tcf_tm_dump(&t, &ife->tcf_tm); if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) goto nla_put_failure;
@@@ -622,7 -632,7 +631,7 @@@ static int tcf_ife_decode(struct sk_buf
spin_lock(&ife->tcf_lock); bstats_update(&ife->tcf_bstats, skb); - ife->tcf_tm.lastuse = jiffies; + tcf_lastuse_update(&ife->tcf_tm); spin_unlock(&ife->tcf_lock);
ifehdrln = ntohs(ifehdrln); @@@ -710,7 -720,7 +719,7 @@@ static int tcf_ife_encode(struct sk_buf
spin_lock(&ife->tcf_lock); bstats_update(&ife->tcf_bstats, skb); - ife->tcf_tm.lastuse = jiffies; + tcf_lastuse_update(&ife->tcf_tm);
if (!metalen) { /* no metadata to send */ /* abuse overlimits to count when we allow packet @@@ -801,7 -811,7 +810,7 @@@ static int tcf_ife_act(struct sk_buff * pr_info_ratelimited("unknown failure(policy neither de/encode\n"); spin_lock(&ife->tcf_lock); bstats_update(&ife->tcf_bstats, skb); - ife->tcf_tm.lastuse = jiffies; + tcf_lastuse_update(&ife->tcf_tm); ife->tcf_qstats.drops++; spin_unlock(&ife->tcf_lock);
diff --combined net/sched/act_ipt.c index 6148e32,d4bd19e..b8c5060 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@@ -34,8 -34,7 +34,8 @@@ static int ipt_net_id
static int xt_net_id;
-static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook) +static int ipt_init_target(struct xt_entry_target *t, char *table, + unsigned int hook) { struct xt_tgchk_param par; struct xt_target *target; @@@ -97,8 -96,7 +97,8 @@@ static int __tcf_ipt_init(struct tc_act struct tcf_ipt *ipt; struct xt_entry_target *td, *t; char *tname; - int ret = 0, err, exists = 0; + bool exists = false; + int ret = 0, err; u32 hook = 0; u32 index = 0;
@@@ -123,10 -121,13 +123,13 @@@ }
td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]); - if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) + if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) { + if (exists) + tcf_hash_release(a, bind); return -EINVAL; + }
- if (!tcf_hash_check(tn, index, a, bind)) { + if (!exists) { ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind, false); if (ret) @@@ -214,7 -215,7 +217,7 @@@ static int tcf_ipt(struct sk_buff *skb
spin_lock(&ipt->tcf_lock);
- ipt->tcf_tm.lastuse = jiffies; + tcf_lastuse_update(&ipt->tcf_tm); bstats_update(&ipt->tcf_bstats, skb);
/* yes, we have to worry about both in and out dev @@@ -244,7 -245,7 +247,7 @@@ default: net_notice_ratelimited("tc filter: Bogus netfilter code %d assume ACCEPT\n", ret); - result = TC_POLICE_OK; + result = TC_ACT_OK; break; } spin_unlock(&ipt->tcf_lock); @@@ -252,8 -253,7 +255,8 @@@
}
-static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) +static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, + int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_ipt *ipt = a->priv; @@@ -280,11 -280,11 +283,11 @@@ nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) || nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname)) goto nla_put_failure; - tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); - tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); - tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); + + tcf_tm_dump(&tm, &ipt->tcf_tm); if (nla_put_64bit(skb, TCA_IPT_TM, sizeof(tm), &tm, TCA_IPT_PAD)) goto nla_put_failure; + kfree(t); return skb->len;
diff --combined net/sched/sch_fifo.c index 6ea0db4,2e4bd2c..baeed6a --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@@ -19,35 -19,36 +19,39 @@@
/* 1 band FIFO pseudo-"scheduler" */
-static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) return qdisc_enqueue_tail(skb, sch);
- return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch, to_free); }
-static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch);
- return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch, to_free); }
-static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { + unsigned int prev_backlog; + if (likely(skb_queue_len(&sch->q) < sch->limit)) return qdisc_enqueue_tail(skb, sch);
+ prev_backlog = sch->qstats.backlog; /* queue full, remove one skb to fulfill the limit */ - __qdisc_queue_drop_head(sch, &sch->q); + __qdisc_queue_drop_head(sch, &sch->q, to_free); qdisc_qstats_drop(sch); qdisc_enqueue_tail(skb, sch);
+ qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); return NET_XMIT_CN; }
@@@ -102,6 -103,7 +106,6 @@@ struct Qdisc_ops pfifo_qdisc_ops __read .enqueue = pfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, - .drop = qdisc_queue_drop, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, @@@ -116,6 -118,7 +120,6 @@@ struct Qdisc_ops bfifo_qdisc_ops __read .enqueue = bfifo_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, - .drop = qdisc_queue_drop, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, @@@ -130,6 -133,7 +134,6 @@@ struct Qdisc_ops pfifo_head_drop_qdisc_ .enqueue = pfifo_tail_enqueue, .dequeue = qdisc_dequeue_head, .peek = qdisc_peek_head, - .drop = qdisc_queue_drop_head, .init = fifo_init, .reset = qdisc_reset_queue, .change = fifo_init, diff --combined net/sched/sch_htb.c index ba098f2,62f9d81..91982d9 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@@ -117,6 -117,7 +117,6 @@@ struct htb_class * Written often fields */ struct gnet_stats_basic_packed bstats; - struct gnet_stats_queue qstats; struct tc_htb_xstats xstats; /* our special stats */
/* token bucket parameters */ @@@ -139,8 -140,6 +139,8 @@@ enum htb_cmode cmode; /* current mode of the class */ struct rb_node pq_node; /* node for event queue */ struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */ + + unsigned int drops ____cacheline_aligned_in_smp; };
struct htb_level { @@@ -570,8 -569,7 +570,8 @@@ static inline void htb_deactivate(struc list_del_init(&cl->un.leaf.drop_list); }
-static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { int uninitialized_var(ret); struct htb_sched *q = qdisc_priv(sch); @@@ -583,20 -581,19 +583,20 @@@ __skb_queue_tail(&q->direct_queue, skb); q->direct_pkts++; } else { - return qdisc_drop(skb, sch); + return qdisc_drop(skb, sch, to_free); } #ifdef CONFIG_NET_CLS_ACT } else if (!cl) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { + } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q, + to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); - cl->qstats.drops++; + cl->drops++; } return ret; } else { @@@ -892,6 -889,7 +892,6 @@@ static struct sk_buff *htb_dequeue(stru if (skb != NULL) { ok: qdisc_bstats_update(sch, skb); - qdisc_unthrottled(sch); qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; return skb; @@@ -931,13 -929,38 +931,13 @@@ } qdisc_qstats_overlimit(sch); if (likely(next_event > q->now)) - qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true); + qdisc_watchdog_schedule_ns(&q->watchdog, next_event); else schedule_work(&q->work); fin: return skb; }
-/* try to drop from each class (by prio) until one succeed */ -static unsigned int htb_drop(struct Qdisc *sch) -{ - struct htb_sched *q = qdisc_priv(sch); - int prio; - - for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) { - struct list_head *p; - list_for_each(p, q->drops + prio) { - struct htb_class *cl = list_entry(p, struct htb_class, - un.leaf.drop_list); - unsigned int len; - if (cl->un.leaf.q->ops->drop && - (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) { - sch->qstats.backlog -= len; - sch->q.qlen--; - if (!cl->un.leaf.q->q.qlen) - htb_deactivate(q, cl); - return len; - } - } - } - return 0; -} - /* reset all classes */ /* always caled under BH & queue lock */ static void htb_reset(struct Qdisc *sch) @@@ -960,7 -983,7 +960,7 @@@ } } qdisc_watchdog_cancel(&q->watchdog); - __skb_queue_purge(&q->direct_queue); + __qdisc_reset_queue(&q->direct_queue); sch->q.qlen = 0; sch->qstats.backlog = 0; memset(q->hlevel, 0, sizeof(q->hlevel)); @@@ -984,7 -1007,9 +984,9 @@@ static void htb_work_func(struct work_s struct htb_sched *q = container_of(work, struct htb_sched, work); struct Qdisc *sch = q->watchdog.qdisc;
+ rcu_read_lock(); __netif_schedule(qdisc_root(sch)); + rcu_read_unlock(); }
static int htb_init(struct Qdisc *sch, struct nlattr *opt) @@@ -1111,22 -1136,16 +1113,22 @@@ static in htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) { struct htb_class *cl = (struct htb_class *)arg; + struct gnet_stats_queue qs = { + .drops = cl->drops, + }; __u32 qlen = 0;
- if (!cl->level && cl->un.leaf.q) + if (!cl->level && cl->un.leaf.q) { qlen = cl->un.leaf.q->q.qlen; + qs.backlog = cl->un.leaf.q->qstats.backlog; + } cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens); cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
- if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), + d, NULL, &cl->bstats) < 0 || gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) + gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0) return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); @@@ -1239,7 -1258,7 +1241,7 @@@ static void htb_destroy(struct Qdisc *s htb_destroy_class(sch, cl); } qdisc_class_hash_destroy(&q->clhash); - __skb_queue_purge(&q->direct_queue); + __qdisc_reset_queue(&q->direct_queue); }
static int htb_delete(struct Qdisc *sch, unsigned long arg) @@@ -1378,8 -1397,7 +1380,8 @@@ static int htb_change_class(struct Qdis if (htb_rate_est || tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, - qdisc_root_sleeping_lock(sch), + NULL, + qdisc_root_sleeping_running(sch), tca[TCA_RATE] ? : &est.nla); if (err) { kfree(cl); @@@ -1441,10 -1459,11 +1443,10 @@@ parent->children++; } else { if (tca[TCA_RATE]) { - spinlock_t *lock = qdisc_root_sleeping_lock(sch); - err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, - lock, + NULL, + qdisc_root_sleeping_running(sch), tca[TCA_RATE]); if (err) return err; @@@ -1582,6 -1601,7 +1584,6 @@@ static struct Qdisc_ops htb_qdisc_ops _ .enqueue = htb_enqueue, .dequeue = htb_dequeue, .peek = qdisc_peek_dequeued, - .drop = htb_drop, .init = htb_init, .reset = htb_reset, .destroy = htb_destroy, diff --combined net/sched/sch_netem.c index 6eac3d8,178f163..aaaf021 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@@ -368,7 -368,9 +368,7 @@@ static void tfifo_reset(struct Qdisc *s struct sk_buff *skb = netem_rb_to_skb(p);
rb_erase(p, &q->t_root); - skb->next = NULL; - skb->prev = NULL; - kfree_skb(skb); + rtnl_kfree_skbs(skb, skb); } }
@@@ -397,8 -399,7 +397,8 @@@ static void tfifo_enqueue(struct sk_buf * when we statistically choose to corrupt one, we instead segment it, returning * the first packet to be corrupted, and re-enqueue the remaining frames */ -static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch) +static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct sk_buff *segs; netdev_features_t features = netif_skb_features(skb); @@@ -406,7 -407,7 +406,7 @@@ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
if (IS_ERR_OR_NULL(segs)) { - qdisc_reshape_fail(skb, sch); + qdisc_drop(skb, sch, to_free); return NULL; } consume_skb(skb); @@@ -419,8 -420,7 +419,8 @@@ * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ -static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) +static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + struct sk_buff **to_free) { struct netem_sched_data *q = qdisc_priv(sch); /* We don't fill cb now as skb_unshare() may invalidate it */ @@@ -445,7 -445,7 +445,7 @@@ } if (count == 0) { qdisc_qstats_drop(sch); - kfree_skb(skb); + __qdisc_drop(skb, to_free); return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; }
@@@ -465,7 -465,7 +465,7 @@@ u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0; - rootq->enqueue(skb2, rootq); + rootq->enqueue(skb2, rootq, to_free); q->duplicate = dupsave; }
@@@ -477,7 -477,7 +477,7 @@@ */ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (skb_is_gso(skb)) { - segs = netem_segment(skb, sch); + segs = netem_segment(skb, sch, to_free); if (!segs) return NET_XMIT_DROP; } else { @@@ -487,14 -487,10 +487,14 @@@ skb = segs; segs = segs->next;
- if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || - (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_help(skb))) { - rc = qdisc_drop(skb, sch); + skb = skb_unshare(skb, GFP_ATOMIC); + if (unlikely(!skb)) { + qdisc_qstats_drop(sch); + goto finish_segs; + } + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_help(skb)) { + qdisc_drop(skb, sch, to_free); goto finish_segs; }
@@@ -503,7 -499,7 +503,7 @@@ }
if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) - return qdisc_reshape_fail(skb, sch); + return qdisc_drop(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb);
@@@ -563,7 -559,7 +563,7 @@@ finish_segs segs->next = NULL; qdisc_skb_cb(segs)->pkt_len = segs->len; last_len = segs->len; - rc = qdisc_enqueue(segs, sch); + rc = qdisc_enqueue(segs, sch, to_free); if (rc != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(rc)) qdisc_qstats_drop(sch); @@@ -580,17 -576,50 +580,17 @@@ return NET_XMIT_SUCCESS; }
-static unsigned int netem_drop(struct Qdisc *sch) -{ - struct netem_sched_data *q = qdisc_priv(sch); - unsigned int len; - - len = qdisc_queue_drop(sch); - - if (!len) { - struct rb_node *p = rb_first(&q->t_root); - - if (p) { - struct sk_buff *skb = netem_rb_to_skb(p); - - rb_erase(p, &q->t_root); - sch->q.qlen--; - skb->next = NULL; - skb->prev = NULL; - qdisc_qstats_backlog_dec(sch, skb); - kfree_skb(skb); - } - } - if (!len && q->qdisc && q->qdisc->ops->drop) - len = q->qdisc->ops->drop(q->qdisc); - if (len) - qdisc_qstats_drop(sch); - - return len; -} - static struct sk_buff *netem_dequeue(struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; struct rb_node *p;
- if (qdisc_is_throttled(sch)) - return NULL; - tfifo_dequeue: skb = __skb_dequeue(&sch->q); if (skb) { qdisc_qstats_backlog_dec(sch, skb); deliver: - qdisc_unthrottled(sch); qdisc_bstats_update(sch, skb); return skb; } @@@ -621,17 -650,14 +621,17 @@@ #endif
if (q->qdisc) { + unsigned int pkt_len = qdisc_pkt_len(skb); - int err = qdisc_enqueue(skb, q->qdisc); + struct sk_buff *to_free = NULL; + int err;
+ err = qdisc_enqueue(skb, q->qdisc, &to_free); + kfree_skb_list(to_free); - if (unlikely(err != NET_XMIT_SUCCESS)) { - if (net_xmit_drop_count(err)) { - qdisc_qstats_drop(sch); - qdisc_tree_reduce_backlog(sch, 1, - qdisc_pkt_len(skb)); - } + if (err != NET_XMIT_SUCCESS && + net_xmit_drop_count(err)) { + qdisc_qstats_drop(sch); + qdisc_tree_reduce_backlog(sch, 1, + pkt_len); } goto tfifo_dequeue; } @@@ -1117,6 -1143,7 +1117,6 @@@ static struct Qdisc_ops netem_qdisc_op .enqueue = netem_enqueue, .dequeue = netem_dequeue, .peek = qdisc_peek_dequeued, - .drop = netem_drop, .init = netem_init, .reset = netem_reset, .destroy = netem_destroy, diff --combined net/sched/sch_prio.c index f4d443a,a356450..8f57589 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@@ -67,7 -67,7 +67,7 @@@ prio_classify(struct sk_buff *skb, stru }
static int -prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) +prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct Qdisc *qdisc; int ret; @@@ -83,7 -83,7 +83,7 @@@ } #endif
- ret = qdisc_enqueue(skb, qdisc); + ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; @@@ -127,6 -127,25 +127,6 @@@ static struct sk_buff *prio_dequeue(str
}
-static unsigned int prio_drop(struct Qdisc *sch) -{ - struct prio_sched_data *q = qdisc_priv(sch); - int prio; - unsigned int len; - struct Qdisc *qdisc; - - for (prio = q->bands-1; prio >= 0; prio--) { - qdisc = q->queues[prio]; - if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { - sch->qstats.backlog -= len; - sch->q.qlen--; - return len; - } - } - return 0; -} - - static void prio_reset(struct Qdisc *sch) { @@@ -153,8 -172,9 +153,9 @@@ prio_destroy(struct Qdisc *sch static int prio_tune(struct Qdisc *sch, struct nlattr *opt) { struct prio_sched_data *q = qdisc_priv(sch); + struct Qdisc *queues[TCQ_PRIO_BANDS]; + int oldbands = q->bands, i; struct tc_prio_qopt *qopt; - int i;
if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; @@@ -168,62 -188,42 +169,42 @@@ return -EINVAL; }
+ /* Before commit, make sure we can allocate all new qdiscs */ + for (i = oldbands; i < qopt->bands; i++) { + queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + TC_H_MAKE(sch->handle, i + 1)); + if (!queues[i]) { + while (i > oldbands) + qdisc_destroy(queues[--i]); + return -ENOMEM; + } + } + sch_tree_lock(sch); q->bands = qopt->bands; memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
- for (i = q->bands; i < TCQ_PRIO_BANDS; i++) { + for (i = q->bands; i < oldbands; i++) { struct Qdisc *child = q->queues[i]; - q->queues[i] = &noop_qdisc; - if (child != &noop_qdisc) { - qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog); - qdisc_destroy(child); - } - } - sch_tree_unlock(sch);
- for (i = 0; i < q->bands; i++) { - if (q->queues[i] == &noop_qdisc) { - struct Qdisc *child, *old; - - child = qdisc_create_dflt(sch->dev_queue, - &pfifo_qdisc_ops, - TC_H_MAKE(sch->handle, i + 1)); - if (child) { - sch_tree_lock(sch); - old = q->queues[i]; - q->queues[i] = child; - - if (old != &noop_qdisc) { - qdisc_tree_reduce_backlog(old, - old->q.qlen, - old->qstats.backlog); - qdisc_destroy(old); - } - sch_tree_unlock(sch); - } - } + qdisc_tree_reduce_backlog(child, child->q.qlen, + child->qstats.backlog); + qdisc_destroy(child); } + + for (i = oldbands; i < q->bands; i++) + q->queues[i] = queues[i]; + + sch_tree_unlock(sch); return 0; }
static int prio_init(struct Qdisc *sch, struct nlattr *opt) { - struct prio_sched_data *q = qdisc_priv(sch); - int i; - - for (i = 0; i < TCQ_PRIO_BANDS; i++) - q->queues[i] = &noop_qdisc; - - if (opt == NULL) { + if (!opt) return -EINVAL; - } else { - int err;
- if ((err = prio_tune(sch, opt)) != 0) - return err; - } - return 0; + return prio_tune(sch, opt); }
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) @@@ -304,8 -304,7 +285,8 @@@ static int prio_dump_class_stats(struc struct Qdisc *cl_q;
cl_q = q->queues[cl - 1]; - if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 || + if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), + d, NULL, &cl_q->bstats) < 0 || gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) return -1;
@@@ -364,6 -363,7 +345,6 @@@ static struct Qdisc_ops prio_qdisc_ops .enqueue = prio_enqueue, .dequeue = prio_dequeue, .peek = prio_peek, - .drop = prio_drop, .init = prio_init, .reset = prio_reset, .destroy = prio_destroy, diff --combined net/tipc/bearer.c index 9a70e1d,bf8f05c..8584cc4 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@@ -1,7 -1,7 +1,7 @@@ /* * net/tipc/bearer.c: TIPC bearer code * - * Copyright (c) 1996-2006, 2013-2014, Ericsson AB + * Copyright (c) 1996-2006, 2013-2016, Ericsson AB * Copyright (c) 2004-2006, 2010-2013, Wind River Systems * All rights reserved. * @@@ -39,7 -39,6 +39,7 @@@ #include "bearer.h" #include "link.h" #include "discover.h" +#include "monitor.h" #include "bcast.h" #include "netlink.h"
@@@ -314,10 -313,6 +314,10 @@@ restart rcu_assign_pointer(tn->bearer_list[bearer_id], b); if (skb) tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); + + if (tipc_mon_create(net, bearer_id)) + return -ENOMEM; + pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", name, tipc_addr_string_fill(addr_string, disc_domain), priority); @@@ -353,7 -348,6 +353,7 @@@ static void bearer_disable(struct net * tipc_disc_delete(b->link_req); RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL); kfree_rcu(b, rcu); + tipc_mon_delete(net, bearer_id); }
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, @@@ -411,7 -405,7 +411,7 @@@ int tipc_l2_send_msg(struct net *net, s return 0;
/* Send RESET message even if bearer is detached from device */ - tipc_ptr = rtnl_dereference(dev->tipc_ptr); + tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr); if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) goto drop;
diff --combined net/tipc/link.c index 03f8bdf,67b6ab9..c1df33f --- a/net/tipc/link.c +++ b/net/tipc/link.c @@@ -42,7 -42,6 +42,7 @@@ #include "name_distr.h" #include "discover.h" #include "netlink.h" +#include "monitor.h"
#include <linux/pkt_sched.h>
@@@ -88,6 -87,7 +88,6 @@@ struct tipc_stats * @peer_bearer_id: bearer id used by link's peer endpoint * @bearer_id: local bearer id used by link * @tolerance: minimum link continuity loss needed to reset link [in ms] - * @keepalive_intv: link keepalive timer interval * @abort_limit: # of unacknowledged continuity probes needed to reset link * @state: current state of link FSM * @peer_caps: bitmap describing capabilities of peer node @@@ -96,7 -96,6 +96,7 @@@ * @pmsg: convenience pointer to "proto_msg" field * @priority: current link priority * @net_plane: current link network plane ('A' through 'H') + * @mon_state: cookie with information needed by link monitor * @backlog_limit: backlog queue congestion thresholds (indexed by importance) * @exp_msg_count: # of tunnelled messages expected during link changeover * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset @@@ -132,6 -131,7 +132,6 @@@ struct tipc_link u32 peer_bearer_id; u32 bearer_id; u32 tolerance; - unsigned long keepalive_intv; u32 abort_limit; u32 state; u16 peer_caps; @@@ -140,7 -140,6 +140,7 @@@ char if_name[TIPC_MAX_IF_NAME]; u32 priority; char net_plane; + struct tipc_mon_state mon_state; u16 rst_cnt;
/* Failover/synch */ @@@ -705,31 -704,25 +705,32 @@@ static void link_profile_stats(struct t */ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq) { - int mtyp, rc = 0; + int mtyp = 0; + int rc = 0; bool state = false; bool probe = false; bool setup = false; u16 bc_snt = l->bc_sndlink->snd_nxt - 1; u16 bc_acked = l->bc_rcvlink->acked; - - link_profile_stats(l); + struct tipc_mon_state *mstate = &l->mon_state;
switch (l->state) { case LINK_ESTABLISHED: case LINK_SYNCHING: - if (l->silent_intv_cnt > l->abort_limit) - return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); mtyp = STATE_MSG; + link_profile_stats(l); + tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id); + if (mstate->reset || (l->silent_intv_cnt > l->abort_limit)) + return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); state = bc_acked != bc_snt; - probe = l->silent_intv_cnt; - l->silent_intv_cnt++; + state |= l->bc_rcvlink->rcv_unacked; + state |= l->rcv_unacked; + state |= !skb_queue_empty(&l->transmq); + state |= !skb_queue_empty(&l->deferdq); + probe = mstate->probing; + probe |= l->silent_intv_cnt; + if (probe || mstate->monitoring) + l->silent_intv_cnt++; break; case LINK_RESET: setup = l->rst_cnt++ <= 4; @@@ -840,7 -833,6 +841,7 @@@ void tipc_link_reset(struct tipc_link * l->stats.recv_info = 0; l->stale_count = 0; l->bc_peer_is_up = false; + memset(&l->mon_state, 0, sizeof(l->mon_state)); tipc_link_reset_stats(l); }
@@@ -1249,9 -1241,6 +1250,9 @@@ static void tipc_link_build_proto_msg(s struct tipc_msg *hdr; struct sk_buff_head *dfq = &l->deferdq; bool node_up = link_is_up(l->bc_rcvlink); + struct tipc_mon_state *mstate = &l->mon_state; + int dlen = 0; + void *data;
/* Don't send protocol message during reset or link failover */ if (tipc_link_is_blocked(l)) @@@ -1264,13 -1253,12 +1265,13 @@@ rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE, - TIPC_MAX_IF_NAME, l->addr, + tipc_max_domain_size, l->addr, tipc_own_addr(l->net), 0, 0, 0); if (!skb) return;
hdr = buf_msg(skb); + data = msg_data(hdr); msg_set_session(hdr, l->session); msg_set_bearer_id(hdr, l->bearer_id); msg_set_net_plane(hdr, l->net_plane); @@@ -1286,18 -1274,14 +1287,18 @@@
if (mtyp == STATE_MSG) { msg_set_seq_gap(hdr, rcvgap); - msg_set_size(hdr, INT_H_SIZE); msg_set_probe(hdr, probe); + tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id); + msg_set_size(hdr, INT_H_SIZE + dlen); + skb_trim(skb, INT_H_SIZE + dlen); l->stats.sent_states++; l->rcv_unacked = 0; } else { /* RESET_MSG or ACTIVATE_MSG */ msg_set_max_pkt(hdr, l->advertised_mtu); - strcpy(msg_data(hdr), l->if_name); + strcpy(data, l->if_name); + msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); + skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME); } if (probe) l->stats.sent_probes++; @@@ -1390,9 -1374,7 +1391,9 @@@ static int tipc_link_proto_rcv(struct t u16 peers_tol = msg_link_tolerance(hdr); u16 peers_prio = msg_linkprio(hdr); u16 rcv_nxt = l->rcv_nxt; + u16 dlen = msg_data_sz(hdr); int mtyp = msg_type(hdr); + void *data; char *if_name; int rc = 0;
@@@ -1402,10 -1384,6 +1403,10 @@@ if (tipc_own_addr(l->net) > msg_prevnode(hdr)) l->net_plane = msg_net_plane(hdr);
+ skb_linearize(skb); + hdr = buf_msg(skb); + data = msg_data(hdr); + switch (mtyp) { case RESET_MSG:
@@@ -1416,6 -1394,8 +1417,6 @@@ /* fall thru' */
case ACTIVATE_MSG: - skb_linearize(skb); - hdr = buf_msg(skb);
/* Complete own link name with peer's interface name */ if_name = strrchr(l->name, ':') + 1; @@@ -1423,7 -1403,7 +1424,7 @@@ break; if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME) break; - strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME); + strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) @@@ -1471,8 -1451,6 +1472,8 @@@ rc = TIPC_LINK_UP_EVT; break; } + tipc_mon_rcv(l->net, data, dlen, l->addr, + &l->mon_state, l->bearer_id);
/* Send NACK if peer has sent pkts we haven't received yet */ if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l)) diff --combined tools/virtio/ringtest/Makefile index 50e086c,6173ada..877a8a4 --- a/tools/virtio/ringtest/Makefile +++ b/tools/virtio/ringtest/Makefile @@@ -1,6 -1,6 +1,6 @@@ all:
- all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring -all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring ++all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder ptr_ring noring
CFLAGS += -Wall CFLAGS += -pthread -O2 -ggdb @@@ -8,7 -8,6 +8,7 @@@ LDFLAGS += -pthread -O2 -ggd
main.o: main.c main.h ring.o: ring.c main.h +ptr_ring.o: ptr_ring.c main.h ../../../include/linux/ptr_ring.h virtio_ring_0_9.o: virtio_ring_0_9.c main.h virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h virtio_ring_inorder.o: virtio_ring_inorder.c virtio_ring_0_9.c main.h @@@ -16,13 -15,13 +16,15 @@@ ring: ring.o main. virtio_ring_0_9: virtio_ring_0_9.o main.o virtio_ring_poll: virtio_ring_poll.o main.o virtio_ring_inorder: virtio_ring_inorder.o main.o +ptr_ring: ptr_ring.o main.o + noring: noring.o main.o clean: -rm main.o -rm ring.o ring -rm virtio_ring_0_9.o virtio_ring_0_9 -rm virtio_ring_poll.o virtio_ring_poll -rm virtio_ring_inorder.o virtio_ring_inorder + -rm ptr_ring.o ptr_ring + -rm noring.o noring
.PHONY: all clean