The following commit has been merged in the master branch: commit 0ddead90b223faae475f3296a50bf574b7f7c69a Merge: f7aec129a356ad049edddcb7e77b04a474fcf41f a090bd4ff8387c409732a8e059fbf264ea0bdd56 Author: David S. Miller davem@davemloft.net Date: Thu Jun 15 11:31:37 2017 -0400
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
The conflicts were two cases of overlapping changes in batman-adv and the qed driver.
Signed-off-by: David S. Miller davem@davemloft.net
diff --combined MAINTAINERS index f4e682c67475,09b5ab6a8a5c..10f158ee95a3 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -155,7 -155,7 +155,7 @@@ S: Maintaine F: drivers/scsi/53c700*
6LOWPAN GENERIC (BTLE/IEEE 802.15.4) -M: Alexander Aring aar@pengutronix.de +M: Alexander Aring alex.aring@gmail.com M: Jukka Rissanen jukka.rissanen@linux.intel.com L: linux-bluetooth@vger.kernel.org L: linux-wpan@vger.kernel.org @@@ -1172,7 -1172,7 +1172,7 @@@ N: clps711
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE M: Hartley Sweeten hsweeten@visionengravers.com - M: Ryan Mallon rmallon@gmail.com + M: Alexander Sverdlin alexander.sverdlin@gmail.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ep93xx/ @@@ -1489,13 -1489,15 +1489,15 @@@ M: Gregory Clement <gregory.clement@fre M: Sebastian Hesselbarth sebastian.hesselbarth@gmail.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained - F: arch/arm/mach-mvebu/ - F: drivers/rtc/rtc-armada38x.c F: arch/arm/boot/dts/armada* F: arch/arm/boot/dts/kirkwood* + F: arch/arm/configs/mvebu_*_defconfig + F: arch/arm/mach-mvebu/ F: arch/arm64/boot/dts/marvell/armada* F: drivers/cpufreq/mvebu-cpufreq.c - F: arch/arm/configs/mvebu_*_defconfig + F: drivers/irqchip/irq-armada-370-xp.c + F: drivers/irqchip/irq-mvebu-* + F: drivers/rtc/rtc-armada38x.c
ARM/Marvell Berlin SoC support M: Jisheng Zhang jszhang@marvell.com @@@ -1721,7 -1723,6 +1723,6 @@@ N: rockchi ARM/SAMSUNG EXYNOS ARM ARCHITECTURES M: Kukjin Kim kgene@kernel.org M: Krzysztof Kozlowski krzk@kernel.org - R: Javier Martinez Canillas javier@osg.samsung.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/ @@@ -1829,7 -1830,6 +1830,6 @@@ F: drivers/edac/altera_edac ARM/STI ARCHITECTURE M: Patrice Chotard patrice.chotard@st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) - L: kernel@stlinux.com W: http://www.stlinux.com S: Maintained F: arch/arm/mach-sti/ @@@ -5622,7 -5622,7 +5622,7 @@@ F: scripts/get_maintainer.p
GENWQE (IBM Generic Workqueue Card) M: Frank Haverkamp haver@linux.vnet.ibm.com - M: Gabriel Krisman Bertazi krisman@linux.vnet.ibm.com + M: Guilherme G. Piccoli gpiccoli@linux.vnet.ibm.com S: Supported F: drivers/misc/genwqe/
@@@ -5667,7 -5667,6 +5667,6 @@@ F: tools/testing/selftests/gpio
GPIO SUBSYSTEM M: Linus Walleij linus.walleij@linaro.org - M: Alexandre Courbot gnurou@gmail.com L: linux-gpio@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git S: Maintained @@@ -6427,7 -6426,7 +6426,7 @@@ F: Documentation/cdrom/ide-c F: drivers/ide/ide-cd*
IEEE 802.15.4 SUBSYSTEM -M: Alexander Aring aar@pengutronix.de +M: Alexander Aring alex.aring@gmail.com M: Stefan Schmidt stefan@osg.samsung.com L: linux-wpan@vger.kernel.org W: http://wpan.cakelab.org/ @@@ -6738,7 -6737,6 +6737,7 @@@ F: Documentation/networking/i40e.tx F: Documentation/networking/i40evf.txt F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ +F: include/linux/avf/virtchnl.h
INTEL RDMA RNIC DRIVER M: Faisal Latif faisal.latif@intel.com @@@ -7708,7 -7706,7 +7707,7 @@@ F: drivers/platform/x86/hp_accel.
LIVE PATCHING M: Josh Poimboeuf jpoimboe@redhat.com - M: Jessica Yu jeyu@redhat.com + M: Jessica Yu jeyu@kernel.org M: Jiri Kosina jikos@kernel.org M: Miroslav Benes mbenes@suse.cz R: Petr Mladek pmladek@suse.com @@@ -7979,12 -7977,6 +7978,12 @@@ S: Maintaine F: drivers/net/ethernet/marvell/mv643xx_eth.* F: include/linux/mv643xx.h
+MARVELL MV88X3310 PHY DRIVER +M: Russell King rmk@armlinux.org.uk +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/phy/marvell10g.c + MARVELL MVNETA ETHERNET DRIVER M: Thomas Petazzoni thomas.petazzoni@free-electrons.com L: netdev@vger.kernel.org @@@ -8318,16 -8310,6 +8317,16 @@@ W: http://www.mellanox.co Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlx5/core/en_*
+MELLANOX ETHERNET INNOVA DRIVER +M: Ilan Tayari ilant@mellanox.com +R: Boris Pismenny borisp@mellanox.com +L: netdev@vger.kernel.org +S: Supported +W: http://www.mellanox.com +Q: http://patchwork.ozlabs.org/project/netdev/list/ +F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* +F: include/linux/mlx5/mlx5_ifc_fpga.h + MELLANOX ETHERNET SWITCH DRIVERS M: Jiri Pirko jiri@mellanox.com M: Ido Schimmel idosch@mellanox.com @@@ -8337,14 -8319,6 +8336,14 @@@ W: http://www.mellanox.co Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlxsw/
+MELLANOX FIRMWARE FLASH LIBRARY (mlxfw) +M: Yotam Gigi yotamg@mellanox.com +L: netdev@vger.kernel.org +S: Supported +W: http://www.mellanox.com +Q: http://patchwork.ozlabs.org/project/netdev/list/ +F: drivers/net/ethernet/mellanox/mlxfw/ + MELLANOX MLXCPLD I2C AND MUX DRIVER M: Vadim Pasternak vadimp@mellanox.com M: Michael Shych michaelsh@mellanox.com @@@ -8486,16 -8460,6 +8485,16 @@@ F: drivers/media/platform/atmel/atmel-i F: drivers/media/platform/atmel/atmel-isc-regs.h F: devicetree/bindings/media/atmel-isc.txt
+MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER +M: Woojung Huh Woojung.Huh@microchip.com +M: Microchip Linux Driver Support UNGLinuxDriver@microchip.com +L: netdev@vger.kernel.org +S: Maintained +F: net/dsa/tag_ksz.c +F: drivers/net/dsa/microchip/* +F: include/linux/platform_data/microchip-ksz.h +F: Documentation/devicetree/bindings/net/dsa/ksz.txt + MICROCHIP USB251XB DRIVER M: Richard Leitner richard.leitner@skidata.com L: linux-usb@vger.kernel.org @@@ -8623,7 -8587,7 +8622,7 @@@ S: Maintaine F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT - M: Jessica Yu jeyu@redhat.com + M: Jessica Yu jeyu@kernel.org M: Rusty Russell rusty@rustcorp.com.au T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next S: Maintained @@@ -10634,14 -10598,6 +10633,14 @@@ L: qemu-devel@nongnu.or S: Maintained F: drivers/firmware/qemu_fw_cfg.c
+QUANTENNA QTNFMAC WIRELESS DRIVER +M: Igor Mitsyanko imitsyanko@quantenna.com +M: Avinash Patil avinashp@quantenna.com +M: Sergey Matyukevich smatyukevich@quantenna.com +L: linux-wireless@vger.kernel.org +S: Maintained +F: drivers/net/wireless/quantenna + RADOS BLOCK DEVICE (RBD) M: Ilya Dryomov idryomov@gmail.com M: Sage Weil sage@redhat.com @@@ -11311,7 -11267,6 +11310,6 @@@ F: drivers/media/rc/serial_ir.
STI CEC DRIVER M: Benjamin Gaignard benjamin.gaignard@linaro.org - L: kernel@stlinux.com S: Maintained F: drivers/staging/media/st-cec/ F: Documentation/devicetree/bindings/media/stih-cec.txt @@@ -11821,6 -11776,7 +11819,7 @@@ T: git git://git.kernel.org/pub/scm/lin S: Supported F: arch/arm/mach-davinci/ F: drivers/i2c/busses/i2c-davinci.c + F: arch/arm/boot/dts/da850*
TI DAVINCI SERIES MEDIA DRIVER M: "Lad, Prabhakar" prabhakar.csengg@gmail.com @@@ -13904,7 -13860,7 +13903,7 @@@ S: Odd fixe F: drivers/net/wireless/wl3501*
WOLFSON MICROELECTRONICS DRIVERS - L: patches@opensource.wolfsonmicro.com + L: patches@opensource.cirrus.com T: git https://github.com/CirrusLogic/linux-drivers.git W: https://github.com/CirrusLogic/linux-drivers/wiki S: Supported diff --combined arch/arm64/configs/defconfig index d673c7096b90,97c123e09e45..d789858c4f1b --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@@ -68,6 -68,7 +68,7 @@@ CONFIG_PCIE_QCOM= CONFIG_PCIE_ARMADA_8K=y CONFIG_PCI_AARDVARK=y CONFIG_PCIE_RCAR=y + CONFIG_PCIE_ROCKCHIP=m CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y CONFIG_ARM64_VA_BITS_48=y @@@ -190,7 -191,6 +191,7 @@@ CONFIG_RAVB= CONFIG_SMC91X=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=m +CONFIG_DWMAC_SUN8I=m CONFIG_MDIO_BUS_MUX_MMIOREG=y CONFIG_MESON_GXL_PHY=m CONFIG_MICREL_PHY=y @@@ -209,6 -209,8 +210,8 @@@ CONFIG_BRCMFMAC= CONFIG_WL18XX=m CONFIG_WLCORE_SDIO=m CONFIG_INPUT_EVDEV=y + CONFIG_KEYBOARD_ADC=m + CONFIG_KEYBOARD_CROS_EC=y CONFIG_KEYBOARD_GPIO=y CONFIG_INPUT_MISC=y CONFIG_INPUT_PM8941_PWRKEY=y @@@ -264,6 -266,7 +267,7 @@@ CONFIG_SPI_MESON_SPIFC= CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y CONFIG_SPI_QUP=y + CONFIG_SPI_ROCKCHIP=y CONFIG_SPI_S3C64XX=y CONFIG_SPI_SPIDEV=m CONFIG_SPMI=y @@@ -293,6 -296,7 +297,7 @@@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR= CONFIG_CPU_THERMAL=y CONFIG_THERMAL_EMULATION=y CONFIG_EXYNOS_THERMAL=y + CONFIG_ROCKCHIP_THERMAL=m CONFIG_WATCHDOG=y CONFIG_S3C2410_WATCHDOG=y CONFIG_MESON_GXBB_WATCHDOG=m @@@ -301,12 -305,14 +306,14 @@@ CONFIG_RENESAS_WDT= CONFIG_BCM2835_WDT=y CONFIG_MFD_CROS_EC=y CONFIG_MFD_CROS_EC_I2C=y + CONFIG_MFD_CROS_EC_SPI=y CONFIG_MFD_EXYNOS_LPASS=m CONFIG_MFD_HI655X_PMIC=y CONFIG_MFD_MAX77620=y CONFIG_MFD_SPMI_PMIC=y CONFIG_MFD_RK808=y CONFIG_MFD_SEC_CORE=y + CONFIG_REGULATOR_FAN53555=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_GPIO=y CONFIG_REGULATOR_HI655X=y @@@ -474,8 -480,10 +481,10 @@@ CONFIG_ARCH_TEGRA_186_SOC= CONFIG_EXTCON_USB_GPIO=y CONFIG_IIO=y CONFIG_EXYNOS_ADC=y + CONFIG_ROCKCHIP_SARADC=m CONFIG_PWM=y CONFIG_PWM_BCM2835=m + CONFIG_PWM_CROS_EC=m CONFIG_PWM_MESON=m CONFIG_PWM_ROCKCHIP=y CONFIG_PWM_SAMSUNG=y @@@ -485,6 -493,7 +494,7 @@@ CONFIG_PHY_HI6220_USB= CONFIG_PHY_SUN4I_USB=y CONFIG_PHY_ROCKCHIP_INNO_USB2=y CONFIG_PHY_ROCKCHIP_EMMC=y + CONFIG_PHY_ROCKCHIP_PCIE=m CONFIG_PHY_XGENE=y CONFIG_PHY_TEGRA_XUSB=y CONFIG_ARM_SCPI_PROTOCOL=y diff --combined arch/arm64/net/bpf_jit_comp.c index 73de2c71cfb0,c870d6f01ac2..2f0505b5c240 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@@ -36,6 -36,7 +36,7 @@@ int bpf_jit_enable __read_mostly #define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TCALL_CNT (MAX_BPF_JIT_REG + 2) + #define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
/* Map BPF registers to A64 registers */ static const int bpf2a64[] = { @@@ -57,6 -58,7 +58,7 @@@ /* temporary registers for internal BPF JIT */ [TMP_REG_1] = A64_R(10), [TMP_REG_2] = A64_R(11), + [TMP_REG_3] = A64_R(12), /* tail_call_cnt */ [TCALL_CNT] = A64_R(26), /* temporary register for blinding constants */ @@@ -69,7 -71,6 +71,7 @@@ struct jit_ctx int epilogue_offset; int *offset; u32 *image; + u32 stack_size; };
static inline void emit(const u32 insn, struct jit_ctx *ctx) @@@ -146,11 -147,16 +148,11 @@@ static inline int epilogue_offset(cons /* Stack must be multiples of 16B */ #define STACK_ALIGN(sz) (((sz) + 15) & ~15)
-#define _STACK_SIZE \ - (MAX_BPF_STACK \ - + 4 /* extra for skb_copy_bits buffer */) - -#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) - #define PROLOGUE_OFFSET 8
static int build_prologue(struct jit_ctx *ctx) { + const struct bpf_prog *prog = ctx->prog; const u8 r6 = bpf2a64[BPF_REG_6]; const u8 r7 = bpf2a64[BPF_REG_7]; const u8 r8 = bpf2a64[BPF_REG_8]; @@@ -172,9 -178,9 +174,9 @@@ * | | * | ... | BPF prog stack * | | - * +-----+ <= (BPF_FP - MAX_BPF_STACK) + * +-----+ <= (BPF_FP - prog->aux->stack_depth) * |RSVD | JIT scratchpad - * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) + * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size) * | | * | ... | Function call stack * | | @@@ -198,12 -204,8 +200,12 @@@ /* Initialize tail_call_cnt */ emit(A64_MOVZ(1, tcc, 0, 0), ctx);
+ /* 4 byte extra for skb_copy_bits buffer */ + ctx->stack_size = prog->aux->stack_depth + 4; + ctx->stack_size = STACK_ALIGN(ctx->stack_size); + /* Set up function call stack */ - emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); + emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
cur_offset = ctx->idx - idx0; if (cur_offset != PROLOGUE_OFFSET) { @@@ -288,7 -290,7 +290,7 @@@ static void build_epilogue(struct jit_c const u8 fp = bpf2a64[BPF_REG_FP];
/* We're done with BPF stack */ - emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); + emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
/* Restore fs (x25) and x26 */ emit(A64_POP(fp, A64_R(26), A64_SP), ctx); @@@ -319,6 -321,7 +321,7 @@@ static int build_insn(const struct bpf_ const u8 src = bpf2a64[insn->src_reg]; const u8 tmp = bpf2a64[TMP_REG_1]; const u8 tmp2 = bpf2a64[TMP_REG_2]; + const u8 tmp3 = bpf2a64[TMP_REG_3]; const s16 off = insn->off; const s32 imm = insn->imm; const int i = insn - ctx->prog->insnsi; @@@ -586,7 -589,7 +589,7 @@@ emit_cond_jmp break; } /* tail call */ - case BPF_JMP | BPF_CALL | BPF_X: + case BPF_JMP | BPF_TAIL_CALL: if (emit_bpf_tail_call(ctx)) return -EFAULT; break; @@@ -689,10 -692,10 +692,10 @@@ emit(A64_PRFM(tmp, PST, L1, STRM), ctx); emit(A64_LDXR(isdw, tmp2, tmp), ctx); emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); - emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); + emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx); jmp_offset = -3; check_imm19(jmp_offset); - emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); + emit(A64_CBNZ(0, tmp3, jmp_offset), ctx); break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ @@@ -732,7 -735,7 +735,7 @@@ return -EINVAL; } emit_a64_mov_i64(r3, size, ctx); - emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx); + emit(A64_SUB_I(1, r4, fp, ctx->stack_size), ctx); emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); emit(A64_BLR(r5), ctx); emit(A64_MOV(1, r0, A64_R(0)), ctx); @@@ -900,7 -903,6 +903,7 @@@ struct bpf_prog *bpf_int_jit_compile(st bpf_jit_binary_lock_ro(header); prog->bpf_func = (void *)ctx.image; prog->jited = 1; + prog->jited_len = image_size;
out_off: kfree(ctx.offset); diff --combined drivers/net/bonding/bond_3ad.c index 165a8009c640,e5386ab706ec..5427032aa05e --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@@ -90,10 -90,13 +90,13 @@@ enum ad_link_speed_type AD_LINK_SPEED_100MBPS, AD_LINK_SPEED_1000MBPS, AD_LINK_SPEED_2500MBPS, + AD_LINK_SPEED_5000MBPS, AD_LINK_SPEED_10000MBPS, + AD_LINK_SPEED_14000MBPS, AD_LINK_SPEED_20000MBPS, AD_LINK_SPEED_25000MBPS, AD_LINK_SPEED_40000MBPS, + AD_LINK_SPEED_50000MBPS, AD_LINK_SPEED_56000MBPS, AD_LINK_SPEED_100000MBPS, }; @@@ -259,10 -262,13 +262,13 @@@ static inline int __check_agg_selection * %AD_LINK_SPEED_100MBPS, * %AD_LINK_SPEED_1000MBPS, * %AD_LINK_SPEED_2500MBPS, + * %AD_LINK_SPEED_5000MBPS, * %AD_LINK_SPEED_10000MBPS + * %AD_LINK_SPEED_14000MBPS, * %AD_LINK_SPEED_20000MBPS * %AD_LINK_SPEED_25000MBPS * %AD_LINK_SPEED_40000MBPS + * %AD_LINK_SPEED_50000MBPS * %AD_LINK_SPEED_56000MBPS * %AD_LINK_SPEED_100000MBPS */ @@@ -296,10 -302,18 +302,18 @@@ static u16 __get_link_speed(struct por speed = AD_LINK_SPEED_2500MBPS; break;
+ case SPEED_5000: + speed = AD_LINK_SPEED_5000MBPS; + break; + case SPEED_10000: speed = AD_LINK_SPEED_10000MBPS; break;
+ case SPEED_14000: + speed = AD_LINK_SPEED_14000MBPS; + break; + case SPEED_20000: speed = AD_LINK_SPEED_20000MBPS; break; @@@ -312,6 -326,10 +326,10 @@@ speed = AD_LINK_SPEED_40000MBPS; break;
+ case SPEED_50000: + speed = AD_LINK_SPEED_50000MBPS; + break; + case SPEED_56000: speed = AD_LINK_SPEED_56000MBPS; break; @@@ -322,11 -340,6 +340,11 @@@
default: /* unknown speed value from ethtool. shouldn't happen */ + if (slave->speed != SPEED_UNKNOWN) + pr_warn_once("%s: unknown ethtool speed (%d) for port %d (set it to 0)\n", + slave->bond->dev->name, + slave->speed, + port->actor_port_number); speed = 0; break; } @@@ -712,9 -725,15 +730,15 @@@ static u32 __get_agg_bandwidth(struct a case AD_LINK_SPEED_2500MBPS: bandwidth = nports * 2500; break; + case AD_LINK_SPEED_5000MBPS: + bandwidth = nports * 5000; + break; case AD_LINK_SPEED_10000MBPS: bandwidth = nports * 10000; break; + case AD_LINK_SPEED_14000MBPS: + bandwidth = nports * 14000; + break; case AD_LINK_SPEED_20000MBPS: bandwidth = nports * 20000; break; @@@ -724,6 -743,9 +748,9 @@@ case AD_LINK_SPEED_40000MBPS: bandwidth = nports * 40000; break; + case AD_LINK_SPEED_50000MBPS: + bandwidth = nports * 50000; + break; case AD_LINK_SPEED_56000MBPS: bandwidth = nports * 56000; break; diff --combined drivers/net/bonding/bond_main.c index 7d9474352c36,8ab6bdbe1682..2865f31c6076 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -3488,8 -3488,7 +3488,8 @@@ static int bond_do_ioctl(struct net_dev case BOND_CHANGE_ACTIVE_OLD: case SIOCBONDCHANGEACTIVE: bond_opt_initstr(&newval, slave_dev->name); - res = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval); + res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE, + &newval); break; default: res = -EOPNOTSUPP; @@@ -4175,6 -4174,12 +4175,6 @@@ static const struct net_device_ops bond .ndo_add_slave = bond_enslave, .ndo_del_slave = bond_release, .ndo_fix_features = bond_fix_features, - .ndo_bridge_setlink = switchdev_port_bridge_setlink, - .ndo_bridge_getlink = switchdev_port_bridge_getlink, - .ndo_bridge_dellink = switchdev_port_bridge_dellink, - .ndo_fdb_add = switchdev_port_fdb_add, - .ndo_fdb_del = switchdev_port_fdb_del, - .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_features_check = passthru_features_check, };
@@@ -4187,7 -4192,6 +4187,6 @@@ static void bond_destructor(struct net_ struct bonding *bond = netdev_priv(bond_dev); if (bond->wq) destroy_workqueue(bond->wq); - free_netdev(bond_dev); }
void bond_setup(struct net_device *bond_dev) @@@ -4207,7 -4211,8 +4206,8 @@@ bond_dev->netdev_ops = &bond_netdev_ops; bond_dev->ethtool_ops = &bond_ethtool_ops;
- bond_dev->destructor = bond_destructor; + bond_dev->needs_free_netdev = true; + bond_dev->priv_destructor = bond_destructor;
SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
@@@ -4731,7 -4736,7 +4731,7 @@@ int bond_create(struct net *net, const
rtnl_unlock(); if (res < 0) - bond_destructor(bond_dev); + free_netdev(bond_dev); return res; }
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ef734675885e,f619c4cac51f..67fe3d826566 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@@ -3883,15 -3883,26 +3883,26 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_ /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ + u16 vlan_tci = 0; #ifndef BNX2X_STOP_ON_ERROR - if (IS_VF(bp)) + if (IS_VF(bp)) { #endif - tx_start_bd->vlan_or_ethertype = - cpu_to_le16(ntohs(eth->h_proto)); + /* Still need to consider inband vlan for enforced */ + if (__vlan_get_tag(skb, &vlan_tci)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); + } else { + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_INBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(vlan_tci); + } #ifndef BNX2X_STOP_ON_ERROR - else + } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } #endif }
@@@ -4273,8 -4284,8 +4284,8 @@@ int bnx2x_setup_tc(struct net_device *d return 0; }
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, - struct tc_to_netdev *tc) +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, + __be16 proto, struct tc_to_netdev *tc) { if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 01c9710fc62e,ea1bfcf1870a..2c6de769f4e6 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@@ -891,7 -891,7 +891,7 @@@ static u16 cxgb_select_queue(struct net * The skb's priority is determined via the VLAN Tag Priority Code * Point field. */ - if (cxgb4_dcb_enabled(dev)) { + if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) { u16 vlan_tci; int err;
@@@ -1093,12 -1093,10 +1093,12 @@@ int cxgb4_alloc_stid(struct tid_info *t * This is equivalent to 4 TIDs. With CLIP enabled it * needs 2 TIDs. */ - if (family == PF_INET) - t->stids_in_use++; - else + if (family == PF_INET6) { t->stids_in_use += 2; + t->v6_stids_in_use += 2; + } else { + t->stids_in_use++; + } } spin_unlock_bh(&t->stid_lock); return stid; @@@ -1152,16 -1150,13 +1152,16 @@@ void cxgb4_free_stid(struct tid_info *t bitmap_release_region(t->stid_bmap, stid, 1); t->stid_tab[stid].data = NULL; if (stid < t->nstids) { - if (family == PF_INET) - t->stids_in_use--; - else + if (family == PF_INET6) { t->stids_in_use -= 2; + t->v6_stids_in_use -= 2; + } else { + t->stids_in_use--; + } } else { t->sftids_in_use--; } + spin_unlock_bh(&t->stid_lock); } EXPORT_SYMBOL(cxgb4_free_stid); @@@ -1237,8 -1232,7 +1237,8 @@@ static void process_tid_release_list(st * Release a TID and inform HW. If we are unable to allocate the release * message we defer to a work queue. */ -void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) +void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, + unsigned short family) { struct sk_buff *skb; struct adapter *adap = container_of(t, struct adapter, tids); @@@ -1247,18 -1241,10 +1247,18 @@@
if (t->tid_tab[tid]) { t->tid_tab[tid] = NULL; - if (t->hash_base && (tid >= t->hash_base)) - atomic_dec(&t->hash_tids_in_use); - else - atomic_dec(&t->tids_in_use); + atomic_dec(&t->conns_in_use); + if (t->hash_base && (tid >= t->hash_base)) { + if (family == AF_INET6) + atomic_sub(2, &t->hash_tids_in_use); + else + atomic_dec(&t->hash_tids_in_use); + } else { + if (family == AF_INET6) + atomic_sub(2, &t->tids_in_use); + else + atomic_dec(&t->tids_in_use); + } }
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); @@@ -1306,12 -1292,10 +1306,12 @@@ static int tid_init(struct tid_info *t spin_lock_init(&t->ftid_lock);
t->stids_in_use = 0; + t->v6_stids_in_use = 0; t->sftids_in_use = 0; t->afree = NULL; t->atids_in_use = 0; atomic_set(&t->tids_in_use, 0); + atomic_set(&t->conns_in_use, 0); atomic_set(&t->hash_tids_in_use, 0);
/* Setup the free list for atid_tab and clear the stid bitmap. */ @@@ -2265,13 -2249,6 +2265,13 @@@ static int cxgb_open(struct net_device return err; }
+ /* It's possible that the basic port information could have + * changed since we first read it. + */ + err = t4_update_port_info(pi); + if (err < 0) + return err; + err = link_start(dev); if (!err) netif_tx_start_all_queues(dev); @@@ -2583,8 -2560,6 +2583,8 @@@ static int cxgb_get_vf_config(struct ne if (vf >= adap->num_vfs) return -EINVAL; ivi->vf = vf; + ivi->max_tx_rate = adap->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr); return 0; } @@@ -2601,109 -2576,6 +2601,109 @@@ static int cxgb_get_phys_port_id(struc return 0; }
+static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adap = pi->adapter; + struct fw_port_cmd port_cmd, port_rpl; + u32 link_status, speed = 0; + u32 fw_pfvf, fw_class; + int class_id = vf; + int link_ok, ret; + u16 pktsize; + + if (vf >= adap->num_vfs) + return -EINVAL; + + if (min_tx_rate) { + dev_err(adap->pdev_dev, + "Min tx rate (%d) (> 0) for VF %d is Invalid.\n", + min_tx_rate, vf); + return -EINVAL; + } + /* Retrieve link details for VF port */ + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); + port_cmd.action_to_len16 = + cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(port_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), + &port_rpl); + if (ret != FW_SUCCESS) { + dev_err(adap->pdev_dev, + "Failed to get link status for VF %d\n", vf); + return -EINVAL; + } + link_status = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); + link_ok = (link_status & FW_PORT_CMD_LSTATUS_F) != 0; + if (!link_ok) { + dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); + return -EINVAL; + } + /* Determine link speed */ + if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + speed = 100; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + speed = 1000; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + speed = 10000; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + speed = 25000; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + speed = 40000; + else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + speed = 100000; + + if (max_tx_rate > speed) { + dev_err(adap->pdev_dev, + "Max tx rate %d for VF %d can't be > link-speed %u", + max_tx_rate, vf, speed); + return -EINVAL; + } + pktsize = be16_to_cpu(port_rpl.u.info.mtu); + /* subtract ethhdr size and 4 bytes crc since, f/w appends it */ + pktsize = pktsize - sizeof(struct ethhdr) - 4; + /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */ + pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr); + /* configure Traffic Class for rate-limiting */ + ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET, + SCHED_CLASS_LEVEL_CL_RL, + SCHED_CLASS_MODE_CLASS, + SCHED_CLASS_RATEUNIT_BITS, + SCHED_CLASS_RATEMODE_ABS, + pi->port_id, class_id, 0, + max_tx_rate * 1000, 0, pktsize); + if (ret) { + dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", + ret); + return -EINVAL; + } + dev_info(adap->pdev_dev, + "Class %d with MSS %u configured with rate %u\n", + class_id, pktsize, max_tx_rate); + + /* bind VF to configured Traffic Class */ + fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); + fw_class = class_id; + ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf, + &fw_class); + if (ret) { + dev_err(adap->pdev_dev, + "Err %d in binding VF %d to Traffic Class %d\n", + ret, vf, class_id); + return -EINVAL; + } + dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", + adap->pf, vf, class_id); + adap->vfinfo[vf].tx_rate = max_tx_rate; + return 0; +} + #endif
static int cxgb_set_mac_addr(struct net_device *dev, void *p) @@@ -2823,15 -2695,12 +2823,15 @@@ static int cxgb_set_tx_maxrate(struct n return err; }
-static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto, - struct tc_to_netdev *tc) +static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, + __be16 proto, struct tc_to_netdev *tc) { struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev);
+ if (chain_index) + return -EOPNOTSUPP; + if (!(adap->flags & FULL_INIT_DONE)) { dev_err(adap->pdev_dev, "Failed to setup tc on port %d. Link Down?\n", @@@ -2855,16 -2724,6 +2855,16 @@@ return -EOPNOTSUPP; }
+static netdev_features_t cxgb_fix_features(struct net_device *dev, + netdev_features_t features) +{ + /* Disable GRO, if RX_CSUM is disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_GRO; + + return features; +} + static const struct net_device_ops cxgb4_netdev_ops = { .ndo_open = cxgb_open, .ndo_stop = cxgb_close, @@@ -2886,7 -2745,6 +2886,7 @@@ #endif /* CONFIG_CHELSIO_T4_FCOE */ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, + .ndo_fix_features = cxgb_fix_features, };
#ifdef CONFIG_PCI_IOV @@@ -2894,7 -2752,6 +2894,7 @@@ static const struct net_device_ops cxgb .ndo_open = dummy_open, .ndo_set_vf_mac = cxgb_set_vf_mac, .ndo_get_vf_config = cxgb_get_vf_config, + .ndo_set_vf_rate = cxgb_set_vf_rate, .ndo_get_phys_port_id = cxgb_get_phys_port_id, }; #endif @@@ -4159,7 -4016,10 +4159,7 @@@ static void cfg_queues(struct adapter *
/* Reduce memory usage in kdump environment, disable all offload. */ - if (is_kdump_kernel()) { - adap->params.offload = 0; - adap->params.crypto = 0; - } else if (is_uld(adap) && t4_uld_mem_alloc(adap)) { + if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { adap->params.offload = 0; adap->params.crypto = 0; } @@@ -4180,7 -4040,7 +4180,7 @@@ struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx; - pi->nqsets = 8; + pi->nqsets = is_kdump_kernel() ? 1 : 8; qidx += pi->nqsets; } #else /* !CONFIG_CHELSIO_T4_DCB */ @@@ -4193,9 -4053,6 +4193,9 @@@ if (q10g > netif_get_num_default_rss_queues()) q10g = netif_get_num_default_rss_queues();
+ if (is_kdump_kernel()) + q10g = 1; + for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i);
@@@ -4668,7 -4525,7 +4668,7 @@@ static void dummy_setup(struct net_devi /* Initialize the device structure. */ dev->netdev_ops = &cxgb4_mgmt_netdev_ops; dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; }
static int config_mgmt_dev(struct pci_dev *pdev) @@@ -5101,8 -4958,6 +5101,8 @@@ static int init_one(struct pci_dev *pde netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
+ netif_carrier_off(adapter->port[i]); + err = register_netdev(adapter->port[i]); if (err) break; @@@ -5169,15 -5024,13 +5169,15 @@@ sriov &v, &port_vec); if (err < 0) { dev_err(adapter->pdev_dev, "Could not fetch port params\n"); - goto free_adapter; + goto free_mbox_log; }
adapter->params.nports = hweight32(port_vec); pci_set_drvdata(pdev, adapter); return 0;
+free_mbox_log: + kfree(adapter->mbox_log); free_adapter: kfree(adapter); free_pci_region: @@@ -5277,7 -5130,6 +5277,7 @@@ static void remove_one(struct pci_dev * unregister_netdev(adapter->port[0]); iounmap(adapter->regs); kfree(adapter->vfinfo); + kfree(adapter->mbox_log); kfree(adapter); pci_disable_sriov(pdev); pci_release_regions(pdev); @@@ -5324,7 -5176,6 +5324,7 @@@ static void shutdown_one(struct pci_de unregister_netdev(adapter->port[0]); iounmap(adapter->regs); kfree(adapter->vfinfo); + kfree(adapter->mbox_log); kfree(adapter); pci_disable_sriov(pdev); pci_release_regions(pdev); diff --combined drivers/net/ethernet/ibm/ibmvnic.c index 59ea7a5ae776,c0fbeb387db4..78fdd4f0e341 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@@ -163,16 -163,6 +163,16 @@@ static long h_reg_sub_crq(unsigned lon return rc; }
+static void reset_long_term_buff(struct ibmvnic_adapter *adapter, + struct ibmvnic_long_term_buff *ltb) +{ + memset(ltb->buff, 0, ltb->size); + + init_completion(&adapter->fw_done); + send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); + wait_for_completion(&adapter->fw_done); +} + static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb, int size) { @@@ -210,15 -200,6 +210,15 @@@ static void free_long_term_buff(struct dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); }
+static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) +{ + int i; + + for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + i++) + adapter->rx_pool[i].active = 0; +} + static void replenish_rx_pool(struct ibmvnic_adapter *adapter, struct ibmvnic_rx_pool *pool) { @@@ -236,9 -217,6 +236,9 @@@ int index; int i;
+ if (!pool->active) + return; + handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf-> off_rxadd_subcrqs)); @@@ -309,15 -287,6 +309,15 @@@ failure dev_kfree_skb_any(skb); adapter->replenish_add_buff_failure++; atomic_add(buffers_added, &pool->available); + + if (lpar_rc == H_CLOSED) { + /* Disable buffer pool replenishment and report carrier off if + * queue is closed. Firmware guarantees that a signal will + * be sent to the driver, triggering a reset. + */ + deactivate_rx_pools(adapter); + netif_carrier_off(adapter->netdev); + } }
static void replenish_pools(struct ibmvnic_adapter *adapter) @@@ -362,32 -331,6 +362,32 @@@ static int init_stats_token(struct ibmv return 0; }
+static int reset_rx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_rx_pool *rx_pool; + int rx_scrqs; + int i, j; + + rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + for (i = 0; i < rx_scrqs; i++) { + rx_pool = &adapter->rx_pool[i]; + + reset_long_term_buff(adapter, &rx_pool->long_term_buff); + + for (j = 0; j < rx_pool->size; j++) + rx_pool->free_map[j] = j; + + memset(rx_pool->rx_buff, 0, + rx_pool->size * sizeof(struct ibmvnic_rx_buff)); + + atomic_set(&rx_pool->available, 0); + rx_pool->next_alloc = 0; + rx_pool->next_free = 0; + } + + return 0; +} + static void release_rx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_rx_pool *rx_pool; @@@ -489,32 -432,6 +489,32 @@@ static int init_rx_pools(struct net_dev return 0; }
+static int reset_tx_pools(struct ibmvnic_adapter *adapter) +{ + struct ibmvnic_tx_pool *tx_pool; + int tx_scrqs; + int i, j; + + tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + for (i = 0; i < tx_scrqs; i++) { + tx_pool = &adapter->tx_pool[i]; + + reset_long_term_buff(adapter, &tx_pool->long_term_buff); + + memset(tx_pool->tx_buff, 0, + adapter->req_tx_entries_per_subcrq * + sizeof(struct ibmvnic_tx_buff)); + + for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++) + tx_pool->free_map[j] = j; + + tx_pool->consumer_index = 0; + tx_pool->producer_index = 0; + } + + return 0; +} + static void release_tx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_tx_pool *tx_pool; @@@ -601,32 -518,6 +601,32 @@@ static void release_error_buffers(struc spin_unlock_irqrestore(&adapter->error_list_lock, flags); }
+static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) +{ + int i; + + if (adapter->napi_enabled) + return; + + for (i = 0; i < adapter->req_rx_queues; i++) + napi_enable(&adapter->napi[i]); + + adapter->napi_enabled = true; +} + +static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) +{ + int i; + + if (!adapter->napi_enabled) + return; + + for (i = 0; i < adapter->req_rx_queues; i++) + napi_disable(&adapter->napi[i]); + + adapter->napi_enabled = false; +} + static int ibmvnic_login(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); @@@ -783,7 -674,9 +783,7 @@@ static int __ibmvnic_open(struct net_de
adapter->state = VNIC_OPENING; replenish_pools(adapter); - - for (i = 0; i < adapter->req_rx_queues; i++) - napi_enable(&adapter->napi[i]); + ibmvnic_napi_enable(adapter);
/* We're ready to receive frames, enable the sub-crq interrupts and * set the logical link state to up @@@ -886,7 -779,13 +886,7 @@@ static int __ibmvnic_close(struct net_d
adapter->state = VNIC_CLOSING; netif_tx_stop_all_queues(netdev); - - if (adapter->napi) { - for (i = 0; i < adapter->req_rx_queues; i++) - napi_disable(&adapter->napi[i]); - } - - clean_tx_pools(adapter); + ibmvnic_napi_disable(adapter);
if (adapter->tx_scrq) { for (i = 0; i < adapter->req_tx_queues; i++) @@@ -915,7 -814,6 +915,7 @@@ } }
+ clean_tx_pools(adapter); adapter->state = VNIC_CLOSED; return rc; } @@@ -1194,14 -1092,8 +1194,14 @@@ static int ibmvnic_xmit(struct sk_buff dev_kfree_skb_any(skb); tx_buff->skb = NULL;
- if (lpar_rc == H_CLOSED) - netif_stop_subqueue(netdev, queue_num); + if (lpar_rc == H_CLOSED) { + /* Disable TX and report carrier off if queue is closed. + * Firmware guarantees that a signal will be sent to the + * driver, triggering a reset or some other action. + */ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + }
tx_send_failed++; tx_dropped++; @@@ -1314,39 -1206,37 +1314,39 @@@ static int do_reset(struct ibmvnic_adap if (rc) return rc;
- /* remove the closed state so when we call open it appears - * we are coming from the probed state. - */ - adapter->state = VNIC_PROBED; + if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { + /* remove the closed state so when we call open it appears + * we are coming from the probed state. + */ + adapter->state = VNIC_PROBED;
- release_resources(adapter); - release_sub_crqs(adapter); - release_crq_queue(adapter); + rc = ibmvnic_init(adapter); + if (rc) + return 0;
- rc = ibmvnic_init(adapter); - if (rc) - return 0; + /* If the adapter was in PROBE state prior to the reset, + * exit here. + */ + if (reset_state == VNIC_PROBED) + return 0;
- /* If the adapter was in PROBE state prior to the reset, exit here. */ - if (reset_state == VNIC_PROBED) - return 0; + rc = ibmvnic_login(netdev); + if (rc) { + adapter->state = VNIC_PROBED; + return 0; + }
- rc = ibmvnic_login(netdev); - if (rc) { - adapter->state = VNIC_PROBED; - return 0; - } + rc = reset_tx_pools(adapter); + if (rc) + return rc;
- rtnl_lock(); - rc = init_resources(adapter); - rtnl_unlock(); - if (rc) - return rc; + rc = reset_rx_pools(adapter); + if (rc) + return rc;
- if (reset_state == VNIC_CLOSED) - return 0; + if (reset_state == VNIC_CLOSED) + return 0; + }
rc = __ibmvnic_open(netdev); if (rc) { @@@ -1364,9 -1254,6 +1364,9 @@@ for (i = 0; i < adapter->req_rx_queues; i++) napi_schedule(&adapter->napi[i]);
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER) + netdev_notify_peers(netdev); + return 0; }
@@@ -1426,7 -1313,6 +1426,7 @@@ static void __ibmvnic_reset(struct work
if (rc) { free_all_rwi(adapter); + mutex_unlock(&adapter->reset_lock); return; }
@@@ -1497,10 -1383,6 +1497,10 @@@ static int ibmvnic_poll(struct napi_str struct ibmvnic_adapter *adapter = netdev_priv(netdev); int scrq_num = (int)(napi - adapter->napi); int frames_processed = 0; + + if (adapter->resetting) + return 0; + restart_poll: while (frames_processed < budget) { struct sk_buff *skb; @@@ -1559,9 -1441,7 +1559,9 @@@ netdev->stats.rx_bytes += length; frames_processed++; } - replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); + + if (adapter->state != VNIC_CLOSING) + replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); @@@ -1588,6 -1468,11 +1588,11 @@@ static void ibmvnic_netpoll_controller( } #endif
+ static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) + { + return -EOPNOTSUPP; + } + static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_open = ibmvnic_open, .ndo_stop = ibmvnic_close, @@@ -1599,6 -1484,7 +1604,7 @@@ #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmvnic_netpoll_controller, #endif + .ndo_change_mtu = ibmvnic_change_mtu, };
/* ethtool functions */ @@@ -1728,45 -1614,6 +1734,45 @@@ static const struct ethtool_ops ibmvnic
/* Routines for managing CRQs/sCRQs */
+static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter, + struct ibmvnic_sub_crq_queue *scrq) +{ + int rc; + + if (scrq->irq) { + free_irq(scrq->irq, scrq); + irq_dispose_mapping(scrq->irq); + scrq->irq = 0; + } + + memset(scrq->msgs, 0, 2 * PAGE_SIZE); + scrq->cur = 0; + + rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, + 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); + return rc; +} + +static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) +{ + int i, rc; + + for (i = 0; i < adapter->req_tx_queues; i++) { + rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); + if (rc) + return rc; + } + + for (i = 0; i < adapter->req_rx_queues; i++) { + rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); + if (rc) + return rc; + } + + rc = init_sub_crq_irqs(adapter); + return rc; +} + static void release_sub_crq_queue(struct ibmvnic_adapter *adapter, struct ibmvnic_sub_crq_queue *scrq) { @@@ -2901,8 -2748,6 +2907,8 @@@ static void handle_error_indication(uni
if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR) ibmvnic_reset(adapter, VNIC_RESET_FATAL); + else + ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL); }
static void handle_change_mac_rsp(union ibmvnic_crq *crq, @@@ -3308,8 -3153,6 +3314,8 @@@ static void ibmvnic_handle_crq(union ib switch (gen_crq->cmd) { case IBMVNIC_CRQ_INIT: dev_info(dev, "Partner initialized\n"); + adapter->from_passive_init = true; + complete(&adapter->init_done); break; case IBMVNIC_CRQ_INIT_COMPLETE: dev_info(dev, "Partner initialization complete\n"); @@@ -3618,38 -3461,21 +3624,38 @@@ static int ibmvnic_init(struct ibmvnic_ unsigned long timeout = msecs_to_jiffies(30000); int rc;
- rc = init_crq_queue(adapter); + if (adapter->resetting) { + rc = ibmvnic_reset_crq(adapter); + if (!rc) + rc = vio_enable_interrupts(adapter->vdev); + } else { + rc = init_crq_queue(adapter); + } + if (rc) { dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc); return rc; }
+ adapter->from_passive_init = false; + init_completion(&adapter->init_done); ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Initialization sequence timed out\n"); - release_crq_queue(adapter); return -1; }
- rc = init_sub_crqs(adapter); + if (adapter->from_passive_init) { + adapter->state = VNIC_OPEN; + adapter->from_passive_init = false; + return -1; + } + + if (adapter->resetting) + rc = reset_sub_crq_queues(adapter); + else + rc = init_sub_crqs(adapter); if (rc) { dev_err(dev, "Initialization of sub crqs failed\n"); release_crq_queue(adapter); @@@ -3658,8 -3484,6 +3664,8 @@@ return rc; }
+static struct device_attribute dev_attr_failover; + static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) { struct ibmvnic_adapter *adapter; @@@ -3716,16 -3540,9 +3722,16 @@@
netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ rc = device_create_file(&dev->dev, &dev_attr_failover); + if (rc) { + free_netdev(netdev); + return rc; + } + rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); + device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); return rc; } @@@ -3751,49 -3568,12 +3757,49 @@@ static int ibmvnic_remove(struct vio_de adapter->state = VNIC_REMOVED;
mutex_unlock(&adapter->reset_lock); + device_remove_file(&dev->dev, &dev_attr_failover); free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL);
return 0; }
+static ssize_t failover_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct net_device *netdev = dev_get_drvdata(dev); + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; + __be64 session_token; + long rc; + + if (!sysfs_streq(buf, "1")) + return -EINVAL; + + rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address, + H_GET_SESSION_TOKEN, 0, 0, 0); + if (rc) { + netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n", + rc); + return -EINVAL; + } + + session_token = (__be64)retbuf[0]; + netdev_dbg(netdev, "Initiating client failover, session id %llx\n", + be64_to_cpu(session_token)); + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, + H_SESSION_ERR_DETECTED, session_token, 0, 0); + if (rc) { + netdev_err(netdev, "Client initiated failover failed, rc %ld\n", + rc); + return -EINVAL; + } + + return count; +} + +static DEVICE_ATTR(failover, 0200, NULL, failover_store); + static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev) { struct net_device *netdev = dev_get_drvdata(&vdev->dev); diff --combined drivers/net/ethernet/intel/i40e/i40e.h index 60dc9b2c19ff,44d9610f7a15..395ca94faf80 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@@ -57,7 -57,7 +57,7 @@@ #include "i40e_type.h" #include "i40e_prototype.h" #include "i40e_client.h" -#include "i40e_virtchnl.h" +#include <linux/avf/virtchnl.h> #include "i40e_virtchnl_pf.h" #include "i40e_txrx.h" #include "i40e_dcb.h" @@@ -399,6 -399,7 +399,7 @@@ struct i40e_pf #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) + #define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) @@@ -502,12 -503,10 +503,12 @@@ struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; struct sk_buff *ptp_tx_skb; + unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ u64 ptp_base_adj; u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; u32 latch_event_flags; spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */ @@@ -957,8 -956,7 +958,8 @@@ bool i40e_dcb_need_reconfig(struct i40e struct i40e_dcbx_config *old_cfg, struct i40e_dcbx_config *new_cfg); #endif /* CONFIG_I40E_DCB */ -void i40e_ptp_rx_hang(struct i40e_vsi *vsi); +void i40e_ptp_rx_hang(struct i40e_pf *pf); +void i40e_ptp_tx_hang(struct i40e_pf *pf); void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf); void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index); void i40e_ptp_set_increment(struct i40e_pf *pf); diff --combined drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 35a246f05520,894c8e57ba00..3d58762efbc0 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@@ -147,7 -147,6 +147,7 @@@ static const struct i40e_stats i40e_gst I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), I40E_PF_STAT("arq_overflows", arq_overflows), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + I40E_PF_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match), @@@ -225,7 -224,7 +225,7 @@@ static const struct i40e_priv_flags i40 I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), - I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), + I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), };
@@@ -4093,7 -4092,7 +4093,7 @@@ flags_complete
/* Only allow ATR evict on hardware that is capable of handling it */ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) - pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED;
if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { u16 sw_flags = 0, valid_flags = 0; diff --combined drivers/net/ethernet/intel/i40e/i40e_main.c index abab7fb7a3fc,a7a4b28b4144..98fb644a580e --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@@ -5509,8 -5509,7 +5509,8 @@@ exit return ret; }
-static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, +static int __i40e_setup_tc(struct net_device *netdev, u32 handle, + u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { if (tc->type != TC_SETUP_MQPRIO) @@@ -6373,8 -6372,7 +6373,8 @@@ static void i40e_watchdog_subtask(struc i40e_update_veb_stats(pf->veb[i]); }
- i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); + i40e_ptp_rx_hang(pf); + i40e_ptp_tx_hang(pf); }
/** @@@ -8823,11 -8821,12 +8823,12 @@@ static int i40e_sw_init(struct i40e_pf (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; - } else { - pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; }
+ /* Enable HW ATR eviction if possible */ + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; + pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; diff --combined drivers/net/ethernet/intel/i40e/i40e_txrx.c index ddf885084c77,77115c25d96f..af554f3cda19 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@@ -2341,7 -2341,7 +2341,7 @@@ static void i40e_atr(struct i40e_ring * /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) return; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@@ -2403,7 -2403,7 +2403,7 @@@ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
- if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); @@@ -2629,10 -2629,8 +2629,10 @@@ static int i40e_tsyn(struct i40e_ring * if (pf->ptp_tx && !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + pf->ptp_tx_start = jiffies; pf->ptp_tx_skb = skb_get(skb); } else { + pf->tx_hwtstamp_skipped++; return 0; }
@@@ -2935,12 -2933,10 +2935,12 @@@ bool __i40e_chk_linearize(struct sk_buf * @hdr_len: size of the packet header * @td_cmd: the command field in the descriptor * @td_offset: offset for checksum or crc + * + * Returns 0 on success, -1 on failure to DMA **/ -static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, - struct i40e_tx_buffer *first, u32 tx_flags, - const u8 hdr_len, u32 td_cmd, u32 td_offset) +static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, + struct i40e_tx_buffer *first, u32 tx_flags, + const u8 hdr_len, u32 td_cmd, u32 td_offset) { unsigned int data_len = skb->data_len; unsigned int size = skb_headlen(skb); @@@ -3098,7 -3094,7 +3098,7 @@@ do_rs mmiowb(); }
- return; + return 0;
dma_error: dev_info(tx_ring->dev, "TX DMA map failed\n"); @@@ -3115,8 -3111,6 +3115,8 @@@ }
tx_ring->next_to_use = i; + + return -1; }
/** @@@ -3217,9 -3211,8 +3217,9 @@@ static netdev_tx_t i40e_xmit_frame_ring */ i40e_atr(tx_ring, skb, tx_flags);
- i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, - td_cmd, td_offset); + if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, + td_cmd, td_offset)) + goto cleanup_tx_tstamp;
return NETDEV_TX_OK;
@@@ -3227,15 -3220,6 +3227,15 @@@ out_drop i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); dev_kfree_skb_any(first->skb); first->skb = NULL; +cleanup_tx_tstamp: + if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) { + struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); + + dev_kfree_skb_any(pf->ptp_tx_skb); + pf->ptp_tx_skb = NULL; + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); + } + return NETDEV_TX_OK; }
diff --combined drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 6bee254d34ee,0fb38ca78900..ecbe40ea8ffe --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@@ -39,7 -39,7 +39,7 @@@ * send a message to all VFs on a given PF **/ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, - enum i40e_virtchnl_ops v_opcode, + enum virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen) { @@@ -70,14 -70,14 +70,14 @@@ **/ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) { - struct i40e_virtchnl_pf_event pfe; + struct virtchnl_pf_event pfe; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *ls = &pf->hw.phy.link_info; int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; - pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO; if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = @@@ -85,10 -85,9 +85,10 @@@ } else { pfe.event_data.link_event.link_status = ls->link_info & I40E_AQ_LINK_UP; - pfe.event_data.link_event.link_speed = ls->link_speed; + pfe.event_data.link_event.link_speed = + (enum virtchnl_link_speed)ls->link_speed; } - i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); }
@@@ -114,12 -113,12 +114,12 @@@ void i40e_vc_notify_link_state(struct i **/ void i40e_vc_notify_reset(struct i40e_pf *pf) { - struct i40e_virtchnl_pf_event pfe; + struct virtchnl_pf_event pfe;
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; - pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; - i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, - (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0, + (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); }
/** @@@ -130,7 -129,7 +130,7 @@@ **/ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) { - struct i40e_virtchnl_pf_event pfe; + struct virtchnl_pf_event pfe; int abs_vf_id;
/* validate the request */ @@@ -144,11 -143,11 +144,11 @@@
abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; - pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; - i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING; + pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM; + i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, - sizeof(struct i40e_virtchnl_pf_event), NULL); + sizeof(struct virtchnl_pf_event), NULL); } /***********************misc routines*****************************/
@@@ -251,7 -250,7 +251,7 @@@ static u16 i40e_vc_get_pf_queue_id(stru * configure irq link list from the map **/ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, - struct i40e_virtchnl_vector_map *vecmap) + struct virtchnl_vector_map *vecmap) { unsigned long linklistmap = 0, tempmap; struct i40e_pf *pf = vf->pf; @@@ -339,7 -338,7 +339,7 @@@ /* if the vf is running in polling mode and using interrupt zero, * need to disable auto-mask on enabling zero interrupt for VFs. */ - if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) && + if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) && (vector_id == 0)) { reg = rd32(hw, I40E_GLINT_CTL); if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) { @@@ -360,7 -359,7 +360,7 @@@ irq_list_done static void i40e_release_iwarp_qvlist(struct i40e_vf *vf) { struct i40e_pf *pf = vf->pf; - struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; + struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info; u32 msix_vf; u32 i;
@@@ -369,7 -368,7 +369,7 @@@
msix_vf = pf->hw.func_caps.num_msix_vectors_vf; for (i = 0; i < qvlist_info->num_vectors; i++) { - struct i40e_virtchnl_iwarp_qv_info *qv_info; + struct virtchnl_iwarp_qv_info *qv_info; u32 next_q_index, next_q_type; struct i40e_hw *hw = &pf->hw; u32 v_idx, reg_idx, reg; @@@ -410,17 -409,17 +410,17 @@@ * Return 0 on success or < 0 on error **/ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, - struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info) + struct virtchnl_iwarp_qvlist_info *qvlist_info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; - struct i40e_virtchnl_iwarp_qv_info *qv_info; + struct virtchnl_iwarp_qv_info *qv_info; u32 v_idx, i, reg_idx, reg; u32 next_q_idx, next_q_type; u32 msix_vf, size;
- size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) + - (sizeof(struct i40e_virtchnl_iwarp_qv_info) * + size = sizeof(struct virtchnl_iwarp_qvlist_info) + + (sizeof(struct virtchnl_iwarp_qv_info) * (qvlist_info->num_vectors - 1)); vf->qvlist_info = kzalloc(size, GFP_KERNEL); vf->qvlist_info->num_vectors = qvlist_info->num_vectors; @@@ -493,7 -492,7 +493,7 @@@ err **/ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, - struct i40e_virtchnl_txq_info *info) + struct virtchnl_txq_info *info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; @@@ -570,7 -569,7 +570,7 @@@ error_context **/ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, u16 vsi_queue_id, - struct i40e_virtchnl_rxq_info *info) + struct virtchnl_rxq_info *info) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; @@@ -1018,7 -1017,7 +1018,7 @@@ static void i40e_cleanup_reset_vf(struc * after VF has been fully initialized, because the VF driver may * request resources immediately after setting this flag. */ - wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); + wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE); }
/** @@@ -1462,7 -1461,7 +1462,7 @@@ static int i40e_vc_send_msg_to_vf(struc * send resp msg to VF **/ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf, - enum i40e_virtchnl_ops opcode, + enum virtchnl_ops opcode, i40e_status retval) { return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0); @@@ -1476,17 -1475,18 +1476,17 @@@ **/ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg) { - struct i40e_virtchnl_version_info info = { - I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR + struct virtchnl_version_info info = { + VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR };
- vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg; + vf->vf_ver = *(struct virtchnl_version_info *)msg; /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */ - if (VF_IS_V10(vf)) - info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; - return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION, + if (VF_IS_V10(&vf->vf_ver)) + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, (u8 *)&info, - sizeof(struct - i40e_virtchnl_version_info)); + sizeof(struct virtchnl_version_info)); }
/** @@@ -1499,7 -1499,7 +1499,7 @@@ **/ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) { - struct i40e_virtchnl_vf_resource *vfres = NULL; + struct virtchnl_vf_resource *vfres = NULL; struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; struct i40e_vsi *vsi; @@@ -1512,8 -1512,8 +1512,8 @@@ goto err; }
- len = (sizeof(struct i40e_virtchnl_vf_resource) + - sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis); + len = (sizeof(struct virtchnl_vf_resource) + + sizeof(struct virtchnl_vsi_resource) * num_vsis);
vfres = kzalloc(len, GFP_KERNEL); if (!vfres) { @@@ -1521,48 -1521,50 +1521,48 @@@ len = 0; goto err; } - if (VF_IS_V11(vf)) + if (VF_IS_V11(&vf->vf_ver)) vf->driver_caps = *(u32 *)msg; else - vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | - I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | - I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_RSS_REG | + VIRTCHNL_VF_OFFLOAD_VLAN;
- vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2; + vfres->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2; vsi = pf->vsi[vf->lan_vsi_idx]; if (!vsi->info.pvid) - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN; + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
if (i40e_vf_client_capable(pf, vf->vf_id) && - (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) { - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP; + (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); }
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && - (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)) - vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)) + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; else - vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; }
if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; }
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP) - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP) + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
if ((pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && - (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)) + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) { + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) { if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", @@@ -1570,13 -1572,13 +1570,13 @@@ ret = I40E_ERR_PARAM; goto err; } - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; }
if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; }
vfres->num_vsis = num_vsis; @@@ -1587,7 -1589,7 +1587,7 @@@
if (vf->lan_vsi_idx) { vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; - vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV; + vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; /* VFs only use TC 0 */ vfres->vsi_res[0].qset_handle @@@ -1599,7 -1601,7 +1599,7 @@@
err: /* send the response back to the VF */ - ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES, + ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret, (u8 *)vfres, len);
kfree(vfres); @@@ -1653,8 -1655,8 +1653,8 @@@ static inline int i40e_getnum_vf_vsi_vl static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_promisc_info *info = - (struct i40e_virtchnl_promisc_info *)msg; + struct virtchnl_promisc_info *info = + (struct virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_mac_filter *f; @@@ -1681,7 -1683,7 +1681,7 @@@ goto error_param; } /* Multicast promiscuous handling*/ - if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) + if (info->flags & FLAG_VF_MULTICAST_PROMISC) allmulti = true;
if (vf->port_vlan_id) { @@@ -1732,7 -1734,7 +1732,7 @@@ clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); }
- if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) + if (info->flags & FLAG_VF_UNICAST_PROMISC) alluni = true; if (vf->port_vlan_id) { aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, @@@ -1786,7 -1788,7 +1786,7 @@@ error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, - I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, + VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, aq_ret); }
@@@ -1801,9 -1803,9 +1801,9 @@@ **/ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_vsi_queue_config_info *qci = - (struct i40e_virtchnl_vsi_queue_config_info *)msg; - struct i40e_virtchnl_queue_pair_info *qpi; + struct virtchnl_vsi_queue_config_info *qci = + (struct virtchnl_vsi_queue_config_info *)msg; + struct virtchnl_queue_pair_info *qpi; struct i40e_pf *pf = vf->pf; u16 vsi_id, vsi_queue_id; i40e_status aq_ret = 0; @@@ -1843,7 -1845,7 +1843,7 @@@
error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret); }
@@@ -1858,9 -1860,9 +1858,9 @@@ **/ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_irq_map_info *irqmap_info = - (struct i40e_virtchnl_irq_map_info *)msg; - struct i40e_virtchnl_vector_map *map; + struct virtchnl_irq_map_info *irqmap_info = + (struct virtchnl_irq_map_info *)msg; + struct virtchnl_vector_map *map; u16 vsi_id, vsi_queue_id, vector_id; i40e_status aq_ret = 0; unsigned long tempmap; @@@ -1906,7 -1908,7 +1906,7 @@@ } error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret); }
@@@ -1920,8 -1922,8 +1920,8 @@@ **/ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_queue_select *vqs = - (struct i40e_virtchnl_queue_select *)msg; + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; u16 vsi_id = vqs->vsi_id; i40e_status aq_ret = 0; @@@ -1945,7 -1947,7 +1945,7 @@@ aq_ret = I40E_ERR_TIMEOUT; error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret); }
@@@ -1960,8 -1962,8 +1960,8 @@@ **/ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_queue_select *vqs = - (struct i40e_virtchnl_queue_select *)msg; + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0;
@@@ -1984,7 -1986,7 +1984,7 @@@
error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret); }
@@@ -1998,8 -2000,8 +1998,8 @@@ **/ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_queue_select *vqs = - (struct i40e_virtchnl_queue_select *)msg; + struct virtchnl_queue_select *vqs = + (struct virtchnl_queue_select *)msg; struct i40e_pf *pf = vf->pf; struct i40e_eth_stats stats; i40e_status aq_ret = 0; @@@ -2027,7 -2029,7 +2027,7 @@@
error_param: /* send the response back to the VF */ - return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret, + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret, (u8 *)&stats, sizeof(stats)); }
@@@ -2086,8 -2088,8 +2086,8 @@@ static inline int i40e_check_vf_permiss **/ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_ether_addr_list *al = - (struct i40e_virtchnl_ether_addr_list *)msg; + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = al->vsi_id; @@@ -2141,7 -2143,7 +2141,7 @@@
error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR, ret); }
@@@ -2155,8 -2157,8 +2155,8 @@@ **/ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_ether_addr_list *al = - (struct i40e_virtchnl_ether_addr_list *)msg; + struct virtchnl_ether_addr_list *al = + (struct virtchnl_ether_addr_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = al->vsi_id; @@@ -2201,7 -2203,7 +2201,7 @@@
error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret); }
@@@ -2215,8 -2217,8 +2215,8 @@@ **/ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_vlan_filter_list *vfl = - (struct i40e_virtchnl_vlan_filter_list *)msg; + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = vfl->vsi_id; @@@ -2275,7 -2277,7 +2275,7 @@@
error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret); + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret); }
/** @@@ -2288,8 -2290,8 +2288,8 @@@ **/ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_vlan_filter_list *vfl = - (struct i40e_virtchnl_vlan_filter_list *)msg; + struct virtchnl_vlan_filter_list *vfl = + (struct virtchnl_vlan_filter_list *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = vfl->vsi_id; @@@ -2333,7 -2335,7 +2333,7 @@@
error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret); + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret); }
/** @@@ -2361,7 -2363,7 +2361,7 @@@ static int i40e_vc_iwarp_msg(struct i40
error_param: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP, aq_ret); }
@@@ -2377,8 -2379,8 +2377,8 @@@ static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen, bool config) { - struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = - (struct i40e_virtchnl_iwarp_qvlist_info *)msg; + struct virtchnl_iwarp_qvlist_info *qvlist_info = + (struct virtchnl_iwarp_qvlist_info *)msg; i40e_status aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || @@@ -2397,8 -2399,8 +2397,8 @@@ error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, - config ? I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : - I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, + config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP : + VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP, aq_ret); }
@@@ -2412,8 -2414,8 +2412,8 @@@ **/ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_rss_key *vrk = - (struct i40e_virtchnl_rss_key *)msg; + struct virtchnl_rss_key *vrk = + (struct virtchnl_rss_key *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = vrk->vsi_id; @@@ -2430,7 -2432,7 +2430,7 @@@ aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); err: /* send the response to the VF */ - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret); }
@@@ -2444,8 -2446,8 +2444,8 @@@ **/ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_rss_lut *vrl = - (struct i40e_virtchnl_rss_lut *)msg; + struct virtchnl_rss_lut *vrl = + (struct virtchnl_rss_lut *)msg; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi = NULL; u16 vsi_id = vrl->vsi_id; @@@ -2462,7 -2464,7 +2462,7 @@@ aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); /* send the response to the VF */ err: - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret); }
@@@ -2476,7 -2478,7 +2476,7 @@@ **/ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_rss_hena *vrh = NULL; + struct virtchnl_rss_hena *vrh = NULL; struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; int len = 0; @@@ -2485,7 -2487,7 +2485,7 @@@ aq_ret = I40E_ERR_PARAM; goto err; } - len = sizeof(struct i40e_virtchnl_rss_hena); + len = sizeof(struct virtchnl_rss_hena);
vrh = kzalloc(len, GFP_KERNEL); if (!vrh) { @@@ -2496,7 -2498,7 +2496,7 @@@ vrh->hena = i40e_pf_get_default_rss_hena(pf); err: /* send the response back to the VF */ - aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, + aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, aq_ret, (u8 *)vrh, len); kfree(vrh); return aq_ret; @@@ -2512,8 -2514,8 +2512,8 @@@ **/ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) { - struct i40e_virtchnl_rss_hena *vrh = - (struct i40e_virtchnl_rss_hena *)msg; + struct virtchnl_rss_hena *vrh = + (struct virtchnl_rss_hena *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; i40e_status aq_ret = 0; @@@ -2528,7 -2530,170 +2528,7 @@@
/* send the response to the VF */ err: - return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA, - aq_ret); -} - -/** - * i40e_vc_validate_vf_msg - * @vf: pointer to the VF info - * @msg: pointer to the msg buffer - * @msglen: msg length - * @msghndl: msg handle - * - * validate msg - **/ -static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, - u32 v_retval, u8 *msg, u16 msglen) -{ - bool err_msg_format = false; - int valid_len = 0; - - /* Check if VF is disabled. */ - if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) - return I40E_ERR_PARAM; - - /* Validate message length. */ - switch (v_opcode) { - case I40E_VIRTCHNL_OP_VERSION: - valid_len = sizeof(struct i40e_virtchnl_version_info); - break; - case I40E_VIRTCHNL_OP_RESET_VF: - break; - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: - if (VF_IS_V11(vf)) - valid_len = sizeof(u32); - break; - case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: - valid_len = sizeof(struct i40e_virtchnl_txq_info); - break; - case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE: - valid_len = sizeof(struct i40e_virtchnl_rxq_info); - break; - case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: - valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info); - if (msglen >= valid_len) { - struct i40e_virtchnl_vsi_queue_config_info *vqc = - (struct i40e_virtchnl_vsi_queue_config_info *)msg; - valid_len += (vqc->num_queue_pairs * - sizeof(struct - i40e_virtchnl_queue_pair_info)); - if (vqc->num_queue_pairs == 0) - err_msg_format = true; - } - break; - case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: - valid_len = sizeof(struct i40e_virtchnl_irq_map_info); - if (msglen >= valid_len) { - struct i40e_virtchnl_irq_map_info *vimi = - (struct i40e_virtchnl_irq_map_info *)msg; - valid_len += (vimi->num_vectors * - sizeof(struct i40e_virtchnl_vector_map)); - if (vimi->num_vectors == 0) - err_msg_format = true; - } - break; - case I40E_VIRTCHNL_OP_ENABLE_QUEUES: - case I40E_VIRTCHNL_OP_DISABLE_QUEUES: - valid_len = sizeof(struct i40e_virtchnl_queue_select); - break; - case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: - case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: - valid_len = sizeof(struct i40e_virtchnl_ether_addr_list); - if (msglen >= valid_len) { - struct i40e_virtchnl_ether_addr_list *veal = - (struct i40e_virtchnl_ether_addr_list *)msg; - valid_len += veal->num_elements * - sizeof(struct i40e_virtchnl_ether_addr); - if (veal->num_elements == 0) - err_msg_format = true; - } - break; - case I40E_VIRTCHNL_OP_ADD_VLAN: - case I40E_VIRTCHNL_OP_DEL_VLAN: - valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list); - if (msglen >= valid_len) { - struct i40e_virtchnl_vlan_filter_list *vfl = - (struct i40e_virtchnl_vlan_filter_list *)msg; - valid_len += vfl->num_elements * sizeof(u16); - if (vfl->num_elements == 0) - err_msg_format = true; - } - break; - case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: - valid_len = sizeof(struct i40e_virtchnl_promisc_info); - break; - case I40E_VIRTCHNL_OP_GET_STATS: - valid_len = sizeof(struct i40e_virtchnl_queue_select); - break; - case I40E_VIRTCHNL_OP_IWARP: - /* These messages are opaque to us and will be validated in - * the RDMA client code. We just need to check for nonzero - * length. The firmware will enforce max length restrictions. - */ - if (msglen) - valid_len = msglen; - else - err_msg_format = true; - break; - case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: - valid_len = 0; - break; - case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: - valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info); - if (msglen >= valid_len) { - struct i40e_virtchnl_iwarp_qvlist_info *qv = - (struct i40e_virtchnl_iwarp_qvlist_info *)msg; - if (qv->num_vectors == 0) { - err_msg_format = true; - break; - } - valid_len += ((qv->num_vectors - 1) * - sizeof(struct i40e_virtchnl_iwarp_qv_info)); - } - break; - case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: - valid_len = sizeof(struct i40e_virtchnl_rss_key); - if (msglen >= valid_len) { - struct i40e_virtchnl_rss_key *vrk = - (struct i40e_virtchnl_rss_key *)msg; - if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) { - err_msg_format = true; - break; - } - valid_len += vrk->key_len - 1; - } - break; - case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: - valid_len = sizeof(struct i40e_virtchnl_rss_lut); - if (msglen >= valid_len) { - struct i40e_virtchnl_rss_lut *vrl = - (struct i40e_virtchnl_rss_lut *)msg; - if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { - err_msg_format = true; - break; - } - valid_len += vrl->lut_entries - 1; - } - break; - case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: - break; - case I40E_VIRTCHNL_OP_SET_RSS_HENA: - valid_len = sizeof(struct i40e_virtchnl_rss_hena); - break; - /* These are always errors coming from the VF. */ - case I40E_VIRTCHNL_OP_EVENT: - case I40E_VIRTCHNL_OP_UNKNOWN: - default: - return -EPERM; - } - /* few more checks */ - if ((valid_len != msglen) || (err_msg_format)) { - i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); - return -EINVAL; - } else { - return 0; - } + return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret); }
/** @@@ -2554,104 -2719,80 +2554,104 @@@ int i40e_vc_process_vf_msg(struct i40e_ if (local_vf_id >= pf->num_alloc_vfs) return -EINVAL; vf = &(pf->vf[local_vf_id]); + + /* Check if VF is disabled. */ + if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states)) + return I40E_ERR_PARAM; + /* perform basic checks on the msg */ - ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen); + ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen); + + /* perform additional checks specific to this driver */ + if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) { + struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg; + + if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) + ret = -EINVAL; + } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) { + struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg; + + if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) + ret = -EINVAL; + }
if (ret) { + i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM); dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", local_vf_id, v_opcode, msglen); - return ret; + switch (ret) { + case VIRTCHNL_ERR_PARAM: + return -EPERM; + default: + return -EINVAL; + } }
switch (v_opcode) { - case I40E_VIRTCHNL_OP_VERSION: + case VIRTCHNL_OP_VERSION: ret = i40e_vc_get_version_msg(vf, msg); break; - case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: + case VIRTCHNL_OP_GET_VF_RESOURCES: ret = i40e_vc_get_vf_resources_msg(vf, msg); break; - case I40E_VIRTCHNL_OP_RESET_VF: + case VIRTCHNL_OP_RESET_VF: i40e_vc_reset_vf_msg(vf); ret = 0; break; - case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: + case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE: ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES: + case VIRTCHNL_OP_CONFIG_VSI_QUEUES: ret = i40e_vc_config_queues_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: + case VIRTCHNL_OP_CONFIG_IRQ_MAP: ret = i40e_vc_config_irq_map_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_ENABLE_QUEUES: + case VIRTCHNL_OP_ENABLE_QUEUES: ret = i40e_vc_enable_queues_msg(vf, msg, msglen); i40e_vc_notify_vf_link_state(vf); break; - case I40E_VIRTCHNL_OP_DISABLE_QUEUES: + case VIRTCHNL_OP_DISABLE_QUEUES: ret = i40e_vc_disable_queues_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS: + case VIRTCHNL_OP_ADD_ETH_ADDR: ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS: + case VIRTCHNL_OP_DEL_ETH_ADDR: ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_ADD_VLAN: + case VIRTCHNL_OP_ADD_VLAN: ret = i40e_vc_add_vlan_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_DEL_VLAN: + case VIRTCHNL_OP_DEL_VLAN: ret = i40e_vc_remove_vlan_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_GET_STATS: + case VIRTCHNL_OP_GET_STATS: ret = i40e_vc_get_stats_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_IWARP: + case VIRTCHNL_OP_IWARP: ret = i40e_vc_iwarp_msg(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: + case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP: ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true); break; - case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: + case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); break; - case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: + case VIRTCHNL_OP_CONFIG_RSS_KEY: ret = i40e_vc_config_rss_key(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: + case VIRTCHNL_OP_CONFIG_RSS_LUT: ret = i40e_vc_config_rss_lut(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: + case VIRTCHNL_OP_GET_RSS_HENA_CAPS: ret = i40e_vc_get_rss_hena(vf, msg, msglen); break; - case I40E_VIRTCHNL_OP_SET_RSS_HENA: + case VIRTCHNL_OP_SET_RSS_HENA: ret = i40e_vc_set_rss_hena(vf, msg, msglen); break;
- case I40E_VIRTCHNL_OP_UNKNOWN: + case VIRTCHNL_OP_UNKNOWN: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", v_opcode, local_vf_id); @@@ -2876,10 -3017,12 +2876,12 @@@ int i40e_ndo_set_vf_port_vlan(struct ne VLAN_VID_MASK)); }
+ spin_unlock_bh(&vsi->mac_filter_hash_lock); if (vlan_id || qos) ret = i40e_vsi_add_pvid(vsi, vlanprio); else i40e_vsi_remove_pvid(vsi); + spin_lock_bh(&vsi->mac_filter_hash_lock);
if (vlan_id) { dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", @@@ -3077,7 -3220,7 +3079,7 @@@ int i40e_ndo_set_vf_link_state(struct n { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; - struct i40e_virtchnl_pf_event pfe; + struct virtchnl_pf_event pfe; struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; int abs_vf_id; @@@ -3093,8 -3236,8 +3095,8 @@@ vf = &pf->vf[vf_id]; abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; - pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; + pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; + pfe.severity = PF_EVENT_SEVERITY_INFO;
switch (link) { case IFLA_VF_LINK_STATE_AUTO: @@@ -3102,7 -3245,6 +3104,7 @@@ pfe.event_data.link_event.link_status = pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; pfe.event_data.link_event.link_speed = + (enum virtchnl_link_speed) pf->hw.phy.link_info.link_speed; break; case IFLA_VF_LINK_STATE_ENABLE: @@@ -3122,7 -3264,7 +3124,7 @@@ goto error_out; } /* Notify the VF of its new link state */ - i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, + i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out: diff --combined drivers/net/ethernet/marvell/mvpp2.c index fe1458450e44,33c901622ed5..ca4b55c60682 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@@ -345,15 -345,9 +345,15 @@@ /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, * relative to port->base. */ +#define MVPP22_XLG_CTRL0_REG 0x100 +#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) +#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) + #define MVPP22_XLG_CTRL3_REG 0x11c #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
/* SMI registers. PPv2.2 only, relative to priv->iface_base. */ #define MVPP22_SMI_MISC_CFG_REG 0x1204 @@@ -3725,7 -3719,7 +3725,7 @@@ static void mvpp2_bm_bufs_get_addrs(str dma_addr_t *dma_addr, phys_addr_t *phys_addr) { - int cpu = smp_processor_id(); + int cpu = get_cpu();
*dma_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); @@@ -3746,6 -3740,8 +3746,8 @@@ if (sizeof(phys_addr_t) == 8) *phys_addr |= (u64)phys_addr_highbits << 32; } + + put_cpu(); }
/* Free all buffers from the pool */ @@@ -3926,18 -3922,12 +3928,12 @@@ static inline u32 mvpp2_bm_cookie_pool_ return bm; }
- /* Get pool number from a BM cookie */ - static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) - { - return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; - } - /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { - int cpu = smp_processor_id(); + int cpu = get_cpu();
if (port->priv->hw_version == MVPP22) { u32 val = 0; @@@ -3964,15 -3954,15 +3960,15 @@@ MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); mvpp2_percpu_write(port->priv, cpu, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + + put_cpu(); }
/* Refill BM pool */ - static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, + static void mvpp2_pool_refill(struct mvpp2_port *port, int pool, dma_addr_t dma_addr, phys_addr_t phys_addr) { - int pool = mvpp2_bm_cookie_pool_get(bm); - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); }
@@@ -4192,19 -4182,11 +4188,17 @@@ static void mvpp22_port_mii_set(struct { u32 val;
- return; - /* Only GOP port 0 has an XLG MAC */ if (port->gop_id == 0) { val = readl(port->base + MVPP22_XLG_CTRL3_REG); val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; - val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + + if (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR) + val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G; + else + val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; + writel(val, port->base + MVPP22_XLG_CTRL3_REG); }
@@@ -4254,40 -4236,19 +4248,40 @@@ static void mvpp2_port_enable(struct mv { u32 val;
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val |= MVPP2_GMAC_PORT_EN_MASK; - val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val |= MVPP22_XLG_CTRL0_PORT_EN | + MVPP22_XLG_CTRL0_MAC_RESET_DIS; + val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val |= MVPP2_GMAC_PORT_EN_MASK; + val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } }
static void mvpp2_port_disable(struct mvpp2_port *port) { u32 val;
- val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); - val &= ~(MVPP2_GMAC_PORT_EN_MASK); - writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + /* Only GOP port 0 has an XLG MAC */ + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val &= ~(MVPP22_XLG_CTRL0_PORT_EN | + MVPP22_XLG_CTRL0_MAC_RESET_DIS); + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + } else { + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~(MVPP2_GMAC_PORT_EN_MASK); + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + } }
/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ @@@ -4548,21 -4509,6 +4542,6 @@@ static void mvpp2_rxq_offset_set(struc mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); }
- /* Obtain BM cookie information from descriptor */ - static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) - { - int cpu = smp_processor_id(); - int pool; - - pool = (mvpp2_rxdesc_status_get(port, rx_desc) & - MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - - return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | - ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); - } - /* Tx descriptors helper methods */
/* Get pointer to next Tx descriptor to be processed (send) by HW */ @@@ -4790,7 -4736,7 +4769,7 @@@ static void mvpp2_txp_max_tx_size_set(s static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu = smp_processor_id(); + int cpu = get_cpu();
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; @@@ -4798,6 -4744,8 +4777,8 @@@ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal); + + put_cpu(); }
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) @@@ -4978,7 -4926,7 +4959,7 @@@ static int mvpp2_rxq_init(struct mvpp2_ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; @@@ -4987,6 -4935,7 +4968,7 @@@ mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + put_cpu();
/* Set Offset */ mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); @@@ -5013,9 -4962,13 +4995,13 @@@ static void mvpp2_rxq_drop_pkts(struct
for (i = 0; i < rx_received; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - u32 bm = mvpp2_bm_cookie_build(port, rx_desc); + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + int pool; + + pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS;
- mvpp2_pool_refill(port, bm, + mvpp2_pool_refill(port, pool, mvpp2_rxdesc_dma_addr_get(port, rx_desc), mvpp2_rxdesc_cookie_get(port, rx_desc)); } @@@ -5045,10 -4998,11 +5031,11 @@@ static void mvpp2_rxq_deinit(struct mvp * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + put_cpu(); }
/* Create and initialize a Tx queue */ @@@ -5071,7 -5025,7 +5058,7 @@@ static int mvpp2_txq_init(struct mvpp2_ txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); @@@ -5096,6 -5050,7 +5083,7 @@@ mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); + put_cpu();
/* WRR / EJP configuration - indirect access */ tx_port_num = mvpp2_egress_port(port); @@@ -5166,10 -5121,11 +5154,11 @@@ static void mvpp2_txq_deinit(struct mvp mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + put_cpu(); }
/* Cleanup Tx ports */ @@@ -5179,7 -5135,7 +5168,7 @@@ static void mvpp2_txq_clean(struct mvpp int delay, pending, cpu; u32 val;
- cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; @@@ -5206,6 -5162,7 +5195,7 @@@
val &= ~MVPP2_TXQ_DRAIN_EN_MASK; mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + put_cpu();
for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); @@@ -5453,7 -5410,7 +5443,7 @@@ static void mvpp2_rx_csum(struct mvpp2_
/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, u32 bm) + struct mvpp2_bm_pool *bm_pool, int pool) { dma_addr_t dma_addr; phys_addr_t phys_addr; @@@ -5465,7 -5422,7 +5455,7 @@@ if (!buf) return -ENOMEM;
- mvpp2_pool_refill(port, bm, dma_addr, phys_addr); + mvpp2_pool_refill(port, pool, dma_addr, phys_addr);
return 0; } @@@ -5523,7 -5480,7 +5513,7 @@@ static int mvpp2_rx(struct mvpp2_port * unsigned int frag_size; dma_addr_t dma_addr; phys_addr_t phys_addr; - u32 bm, rx_status; + u32 rx_status; int pool, rx_bytes, err; void *data;
@@@ -5535,8 -5492,8 +5525,8 @@@ phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); data = (void *)phys_to_virt(phys_addr);
- bm = mvpp2_bm_cookie_build(port, rx_desc); - pool = mvpp2_bm_cookie_pool_get(bm); + pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; bm_pool = &port->priv->bm_pools[pool];
/* In case of an error, release the requested buffer pointer @@@ -5549,7 -5506,7 +5539,7 @@@ err_drop_frame dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ - mvpp2_pool_refill(port, bm, dma_addr, phys_addr); + mvpp2_pool_refill(port, pool, dma_addr, phys_addr); continue; }
@@@ -5564,7 -5521,7 +5554,7 @@@ goto err_drop_frame; }
- err = mvpp2_rx_refill(port, bm_pool, bm); + err = mvpp2_rx_refill(port, bm_pool, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); goto err_drop_frame; diff --combined drivers/net/ethernet/mellanox/mlx5/core/en.h index f4b95dbd1c7f,944fc1742464..a0516b0a5273 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@@ -458,13 -458,15 +458,15 @@@ struct mlx5e_mpw_info
struct mlx5e_rx_am_stats { int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ int epms; /* events per msec */ };
struct mlx5e_rx_am_sample { - ktime_t time; - unsigned int pkt_ctr; - u16 event_ctr; + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; };
struct mlx5e_rx_am { /* Adaptive Moderation */ @@@ -623,8 -625,6 +625,8 @@@ struct mlx5e_tc_table
struct rhashtable_params ht_params; struct rhashtable ht; + + DECLARE_HASHTABLE(mod_hdr_tbl, 8); };
struct mlx5e_vlan_table { diff --combined drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6c636c21d24f,8f5125ccd8d4..6380c2db355a --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@@ -376,9 -376,11 +376,9 @@@ static void del_rule(struct fs_node *no int err; bool update_fte = false;
- match_value = mlx5_vzalloc(match_len); - if (!match_value) { - mlx5_core_warn(dev, "failed to allocate inbox\n"); + match_value = kvzalloc(match_len, GFP_KERNEL); + if (!match_value) return; - }
fs_get_obj(rule, node); fs_get_obj(fte, rule->node.parent); @@@ -860,7 -862,7 +860,7 @@@ struct mlx5_flow_table *mlx5_create_vpo ft_attr.level = level; ft_attr.prio = prio;
- return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); + return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); }
struct mlx5_flow_table* @@@ -1155,7 -1157,7 +1155,7 @@@ static struct mlx5_flow_group *create_a if (!ft->autogroup.active) return ERR_PTR(-ENOENT);
- in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return ERR_PTR(-ENOMEM);
@@@ -1775,7 -1777,7 +1775,7 @@@ static struct mlx5_flow_root_namespace struct mlx5_flow_namespace *ns;
/* Create the root namespace */ - root_ns = mlx5_vzalloc(sizeof(*root_ns)); + root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL); if (!root_ns) return NULL;
diff --combined drivers/net/ethernet/mellanox/mlx5/core/health.c index 80b23333de7a,f27f84ffbc85..c6679b21884e --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@@ -185,7 -185,6 +185,7 @@@ static void health_care(struct work_str struct mlx5_core_health *health; struct mlx5_core_dev *dev; struct mlx5_priv *priv; + unsigned long flags;
health = container_of(work, struct mlx5_core_health, work); priv = container_of(health, struct mlx5_priv, health); @@@ -193,13 -192,13 +193,13 @@@ mlx5_core_warn(dev, "handling bad device here\n"); mlx5_handle_bad_state(dev);
- spin_lock(&health->wq_lock); + spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) schedule_delayed_work(&health->recover_work, recover_delay); else dev_err(&dev->pdev->dev, "new health works are not permitted at this stage\n"); - spin_unlock(&health->wq_lock); + spin_unlock_irqrestore(&health->wq_lock, flags); }
static const char *hsynd_str(u8 synd) @@@ -270,30 -269,14 +270,28 @@@ static unsigned long get_next_poll_jiff return next; }
+void mlx5_trigger_health_work(struct mlx5_core_dev *dev) +{ + struct mlx5_core_health *health = &dev->priv.health; + unsigned long flags; + + spin_lock_irqsave(&health->wq_lock, flags); + if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) + queue_work(health->wq, &health->work); + else + dev_err(&dev->pdev->dev, + "new health works are not permitted at this stage\n"); + spin_unlock_irqrestore(&health->wq_lock, flags); +} + static void poll_health(unsigned long data) { struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; struct mlx5_core_health *health = &dev->priv.health; u32 count;
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - mod_timer(&health->timer, get_next_poll_jiffies()); - return; - } + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto out;
count = ioread32be(health->health_counter); if (count == health->prev) @@@ -305,15 -288,22 +303,16 @@@ if (health->miss_counter == MAX_MISSES) { dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); print_health_info(dev); - } else { - mod_timer(&health->timer, get_next_poll_jiffies()); }
if (in_fatal(dev) && !health->sick) { health->sick = true; print_health_info(dev); - spin_lock(&health->wq_lock); - if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) - queue_work(health->wq, &health->work); - else - dev_err(&dev->pdev->dev, - "new health works are not permitted at this stage\n"); - spin_unlock(&health->wq_lock); + mlx5_trigger_health_work(dev); } + + out: + mod_timer(&health->timer, get_next_poll_jiffies()); }
void mlx5_start_health_poll(struct mlx5_core_dev *dev) @@@ -342,11 -332,10 +341,11 @@@ void mlx5_stop_health_poll(struct mlx5_ void mlx5_drain_health_wq(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; + unsigned long flags;
- spin_lock(&health->wq_lock); + spin_lock_irqsave(&health->wq_lock, flags); set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); - spin_unlock(&health->wq_lock); + spin_unlock_irqrestore(&health->wq_lock, flags); cancel_delayed_work_sync(&health->recover_work); cancel_work_sync(&health->work); } diff --combined drivers/net/ethernet/mellanox/mlx5/core/main.c index 9274d93d3183,4f577a5abf88..dc890944c4ea --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@@ -56,7 -56,6 +56,7 @@@ #ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" #endif +#include "fpga/core.h"
MODULE_AUTHOR("Eli Cohen eli@mellanox.com"); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); @@@ -538,8 -537,10 +538,10 @@@ static int handle_hca_cap(struct mlx5_c /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
- /* If the HCA supports 4K UARs use it */ - if (MLX5_CAP_GEN_MAX(dev, uar_4k)) + /* Enable 4K UAR only when HCA supports it and page size is bigger + * than 4K. + */ + if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); @@@ -1106,16 -1107,10 +1108,16 @@@ static int mlx5_load_one(struct mlx5_co goto err_disable_msix; }
+ err = mlx5_fpga_device_init(dev); + if (err) { + dev_err(&pdev->dev, "fpga device init failed %d\n", err); + goto err_put_uars; + } + err = mlx5_start_eqs(dev); if (err) { dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); - goto err_put_uars; + goto err_fpga_init; }
err = alloc_comp_eqs(dev); @@@ -1146,12 -1141,6 +1148,12 @@@ goto err_sriov; }
+ err = mlx5_fpga_device_start(dev); + if (err) { + dev_err(&pdev->dev, "fpga device start failed %d\n", err); + goto err_reg_dev; + } + if (mlx5_device_registered(dev)) { mlx5_attach_device(dev); } else { @@@ -1187,9 -1176,6 +1189,9 @@@ err_affinity_hints err_stop_eqs: mlx5_stop_eqs(dev);
+err_fpga_init: + mlx5_fpga_device_cleanup(dev); + err_put_uars: mlx5_put_uars_page(dev, priv->uar);
@@@ -1254,7 -1240,6 +1256,7 @@@ static int mlx5_unload_one(struct mlx5_ mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); + mlx5_fpga_device_cleanup(dev); mlx5_put_uars_page(dev, priv->uar); mlx5_disable_msix(dev); if (cleanup) @@@ -1529,8 -1514,6 +1531,8 @@@ static const struct pci_device_id mlx5_ { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */ { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */ { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ + { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ + { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { 0, } };
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f446f368dd20,d16d11bfc046..6a1cb59728fe --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@@ -235,17 -235,6 +235,17 @@@ static void stmmac_clk_csr_set(struct s else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) priv->clk_csr = STMMAC_CSR_250_300M; } + + if (priv->plat->has_sun8i) { + if (clk_rate > 160000000) + priv->clk_csr = 0x03; + else if (clk_rate > 80000000) + priv->clk_csr = 0x02; + else if (clk_rate > 40000000) + priv->clk_csr = 0x01; + else + priv->clk_csr = 0; + } }
static void print_pkt(unsigned char *buf, int len) @@@ -445,14 -434,14 +445,14 @@@ static void stmmac_get_tx_hwtstamp(stru return;
/* check tx tstamp status */ - if (!priv->hw->desc->get_tx_timestamp_status(p)) { + if (priv->hw->desc->get_tx_timestamp_status(p)) { /* get the valid tstamp */ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns);
- netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); /* pass tstamp to stack */ skb_tstamp_tx(skb, &shhwtstamp); } @@@ -479,19 -468,19 +479,19 @@@ static void stmmac_get_rx_hwtstamp(stru return;
/* Check if timestamp is available */ - if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { + if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { /* For GMAC4, the valid timestamp is from CTX next desc. */ if (priv->plat->has_gmac4) ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); else ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp->hwtstamp = ns_to_ktime(ns); } else { - netdev_err(priv->dev, "cannot get RX hw timestamp\n"); + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); } }
@@@ -557,7 -546,10 +557,10 @@@ static int stmmac_hwtstamp_ioctl(struc /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@@ -589,7 -581,10 +592,10 @@@ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@@ -623,7 -618,10 +629,10 @@@ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@@ -655,7 -653,6 +664,7 @@@ ptp_over_ethernet = PTP_TCR_TSIPENA; break;
+ case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: /* time stamp any incoming packet */ config.rx_filter = HWTSTAMP_FILTER_ALL; @@@ -786,7 -783,7 +795,7 @@@ static void stmmac_adjust_link(struct n struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; unsigned long flags; - int new_state = 0; + bool new_state = false;
if (!phydev) return; @@@ -799,8 -796,8 +808,8 @@@ /* Now we make sure that we can be in full duplex mode. * If not, we operate in half-duplex mode. */ if (phydev->duplex != priv->oldduplex) { - new_state = 1; - if (!(phydev->duplex)) + new_state = true; + if (!phydev->duplex) ctrl &= ~priv->hw->link.duplex; else ctrl |= priv->hw->link.duplex; @@@ -811,17 -808,30 +820,17 @@@ stmmac_mac_flow_ctrl(priv, phydev->duplex);
if (phydev->speed != priv->speed) { - new_state = 1; + new_state = true; + ctrl &= ~priv->hw->link.speed_mask; switch (phydev->speed) { - case 1000: - if (priv->plat->has_gmac || - priv->plat->has_gmac4) - ctrl &= ~priv->hw->link.port; + case SPEED_1000: + ctrl |= priv->hw->link.speed1000; break; - case 100: - if (priv->plat->has_gmac || - priv->plat->has_gmac4) { - ctrl |= priv->hw->link.port; - ctrl |= priv->hw->link.speed; - } else { - ctrl &= ~priv->hw->link.port; - } + case SPEED_100: + ctrl |= priv->hw->link.speed100; break; - case 10: - if (priv->plat->has_gmac || - priv->plat->has_gmac4) { - ctrl |= priv->hw->link.port; - ctrl &= ~(priv->hw->link.speed); - } else { - ctrl &= ~priv->hw->link.port; - } + case SPEED_10: + ctrl |= priv->hw->link.speed10; break; default: netif_warn(priv, link, priv->dev, @@@ -837,12 -847,12 +846,12 @@@ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
if (!priv->oldlink) { - new_state = 1; - priv->oldlink = 1; + new_state = true; + priv->oldlink = true; } } else if (priv->oldlink) { - new_state = 1; - priv->oldlink = 0; + new_state = true; + priv->oldlink = false; priv->speed = SPEED_UNKNOWN; priv->oldduplex = DUPLEX_UNKNOWN; } @@@ -905,7 -915,7 +914,7 @@@ static int stmmac_init_phy(struct net_d char bus_id[MII_BUS_ID_SIZE]; int interface = priv->plat->interface; int max_speed = priv->plat->max_speed; - priv->oldlink = 0; + priv->oldlink = false; priv->speed = SPEED_UNKNOWN; priv->oldduplex = DUPLEX_UNKNOWN;
@@@ -2878,7 -2888,8 +2887,7 @@@ static netdev_tx_t stmmac_tso_xmit(stru priv->xstats.tx_set_ic_bit++; }
- if (!priv->hwts_tx_en) - skb_tx_timestamp(skb); + skb_tx_timestamp(skb);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)) { @@@ -3082,7 -3093,8 +3091,7 @@@ static netdev_tx_t stmmac_xmit(struct s priv->xstats.tx_set_ic_bit++; }
- if (!priv->hwts_tx_en) - skb_tx_timestamp(skb); + skb_tx_timestamp(skb);
/* Ready to fill the first descriptor and set the OWN bit w/o any * problems because all the descriptors are actually ready to be @@@ -3945,9 -3957,7 +3954,9 @@@ static int stmmac_hw_init(struct stmmac struct mac_device_info *mac;
/* Identify the MAC HW device */ - if (priv->plat->has_gmac) { + if (priv->plat->setup) { + mac = priv->plat->setup(priv); + } else if (priv->plat->has_gmac) { priv->dev->priv_flags |= IFF_UNICAST_FLT; mac = dwmac1000_setup(priv->ioaddr, priv->plat->multicast_filter_bins, @@@ -3967,10 -3977,6 +3976,10 @@@
priv->hw = mac;
+ /* dwmac-sun8i only work in chain mode */ + if (priv->plat->has_sun8i) + chain_mode = 1; + /* To use the chained or ring mode */ if (priv->synopsys_id >= DWMAC_CORE_4_00) { priv->hw->mode = &dwmac4_ring_mode_ops; @@@ -4296,7 -4302,7 +4305,7 @@@ int stmmac_suspend(struct device *dev } spin_unlock_irqrestore(&priv->lock, flags);
- priv->oldlink = 0; + priv->oldlink = false; priv->speed = SPEED_UNKNOWN; priv->oldduplex = DUPLEX_UNKNOWN; return 0; diff --combined drivers/net/geneve.c index ff626dbde23f,199459bd6961..7bcf1b52020e --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@@ -212,7 -212,6 +212,7 @@@ static void geneve_rx(struct geneve_de struct genevehdr *gnvh = geneve_hdr(skb); struct metadata_dst *tun_dst = NULL; struct pcpu_sw_netstats *stats; + unsigned int len; int err = 0; void *oiph;
@@@ -226,10 -225,8 +226,10 @@@ tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4); - if (!tun_dst) + if (!tun_dst) { + geneve->dev->stats.rx_dropped++; goto drop; + } /* Update tunnel dst according to Geneve options. */ ip_tunnel_info_opts_set(&tun_dst->u.tun_info, gnvh->options, gnvh->opt_len * 4); @@@ -237,11 -234,8 +237,11 @@@ /* Drop packets w/ critical options, * since we don't support any... */ - if (gnvh->critical) + if (gnvh->critical) { + geneve->dev->stats.rx_frame_errors++; + geneve->dev->stats.rx_errors++; goto drop; + } }
skb_reset_mac_header(skb); @@@ -252,10 -246,8 +252,10 @@@ skb_dst_set(skb, &tun_dst->dst);
/* Ignore packet loops (and multicast echo) */ - if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) + if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) { + geneve->dev->stats.rx_errors++; goto drop; + }
oiph = skb_network_header(skb); skb_reset_network_header(skb); @@@ -287,15 -279,13 +287,15 @@@ } }
- stats = this_cpu_ptr(geneve->dev->tstats); - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); - - gro_cells_receive(&geneve->gro_cells, skb); + len = skb->len; + err = gro_cells_receive(&geneve->gro_cells, skb); + if (likely(err == NET_RX_SUCCESS)) { + stats = this_cpu_ptr(geneve->dev->tstats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += len; + u64_stats_update_end(&stats->syncp); + } return; drop: /* Consume bad packet */ @@@ -344,7 -334,7 +344,7 @@@ static int geneve_udp_encap_recv(struc struct geneve_sock *gs; int opts_len;
- /* Need Geneve and inner Ethernet header to be present */ + /* Need UDP and Geneve header to be present */ if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) goto drop;
@@@ -367,10 -357,8 +367,10 @@@ opts_len = geneveh->opt_len * 4; if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, htons(ETH_P_TEB), - !net_eq(geneve->net, dev_net(geneve->dev)))) + !net_eq(geneve->net, dev_net(geneve->dev)))) { + geneve->dev->stats.rx_dropped++; goto drop; + }
geneve_rx(geneve, gs, skb); return 0; @@@ -1019,7 -1007,7 +1019,7 @@@ static void geneve_setup(struct net_dev
dev->netdev_ops = &geneve_netdev_ops; dev->ethtool_ops = &geneve_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true;
SET_NETDEV_DEVTYPE(dev, &geneve_type);
diff --combined drivers/net/hyperv/hyperv_net.h index f82d54e0208c,6066f1bcaf2d..b30a3c2f772b --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@@ -171,6 -171,8 +171,8 @@@ struct rndis_device spinlock_t request_lock; struct list_head req_list;
+ struct work_struct mcast_work; + u8 hw_mac_adr[ETH_ALEN]; u8 rss_key[NETVSC_HASH_KEYLEN]; u16 ind_table[ITAB_NUM]; @@@ -201,6 -203,7 +203,7 @@@ int rndis_filter_open(struct netvsc_dev int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *info); + void rndis_filter_update(struct netvsc_device *nvdev); void rndis_filter_device_remove(struct hv_device *dev, struct netvsc_device *nvdev); int rndis_filter_set_rss_param(struct rndis_device *rdev, @@@ -211,7 -214,6 +214,6 @@@ int rndis_filter_receive(struct net_dev struct vmbus_channel *channel, void *data, u32 buflen);
- int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); @@@ -696,7 -698,6 +698,6 @@@ struct net_device_context /* list protection */ spinlock_t lock;
- struct work_struct work; u32 msg_enable; /* debug level */
u32 tx_checksum_mask; @@@ -763,7 -764,8 +764,7 @@@ struct netvsc_device
refcount_t sc_offered;
- /* Holds rndis device info */ - void *extension; + struct rndis_device *extension;
int ring_size;
diff --combined drivers/net/hyperv/netvsc_drv.c index 436a3ad55cfd,82d6c022ca85..b65a97ecb78e --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@@ -37,8 -37,6 +37,8 @@@ #include <net/route.h> #include <net/sock.h> #include <net/pkt_sched.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h>
#include "hyperv_net.h"
@@@ -58,37 -56,12 +58,12 @@@ static int debug = -1 module_param(debug, int, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
- static void do_set_multicast(struct work_struct *w) - { - struct net_device_context *ndevctx = - container_of(w, struct net_device_context, work); - struct hv_device *device_obj = ndevctx->device_ctx; - struct net_device *ndev = hv_get_drvdata(device_obj); - struct netvsc_device *nvdev = rcu_dereference(ndevctx->nvdev); - struct rndis_device *rdev; - - if (!nvdev) - return; - - rdev = nvdev->extension; - if (rdev == NULL) - return; - - if (ndev->flags & IFF_PROMISC) - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_PROMISCUOUS); - else - rndis_filter_set_packet_filter(rdev, - NDIS_PACKET_TYPE_BROADCAST | - NDIS_PACKET_TYPE_ALL_MULTICAST | - NDIS_PACKET_TYPE_DIRECTED); - } - static void netvsc_set_multicast_list(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
- schedule_work(&net_device_ctx->work); + rndis_filter_update(nvdev); }
static int netvsc_open(struct net_device *net) @@@ -120,13 -93,11 +95,11 @@@ static int netvsc_close(struct net_devi struct net_device_context *net_device_ctx = netdev_priv(net); struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); int ret; - u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; + u32 aread, i, msec = 10, retry = 0, retry_max = 20; struct vmbus_channel *chn;
netif_tx_disable(net);
- /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ - cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(nvdev); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); @@@ -141,11 -112,15 +114,11 @@@ if (!chn) continue;
- hv_get_ringbuffer_availbytes(&chn->inbound, &aread, - &awrite); - + aread = hv_get_bytes_to_read(&chn->inbound); if (aread) break;
- hv_get_ringbuffer_availbytes(&chn->outbound, &aread, - &awrite); - + aread = hv_get_bytes_to_read(&chn->outbound); if (aread) break; } @@@ -341,14 -316,34 +314,14 @@@ static u32 init_page_array(void *hdr, u return slots_used; }
-static int count_skb_frag_slots(struct sk_buff *skb) -{ - int i, frags = skb_shinfo(skb)->nr_frags; - int pages = 0; - - for (i = 0; i < frags; i++) { - skb_frag_t *frag = skb_shinfo(skb)->frags + i; - unsigned long size = skb_frag_size(frag); - unsigned long offset = frag->page_offset; - - /* Skip unused frames from start of page */ - offset &= ~PAGE_MASK; - pages += PFN_UP(offset + size); - } - return pages; -} - -static int netvsc_get_slots(struct sk_buff *skb) +/* Estimate number of page buffers neede to transmit + * Need at most 2 for RNDIS header plus skb body and fragments. + */ +static unsigned int netvsc_get_slots(const struct sk_buff *skb) { - char *data = skb->data; - unsigned int offset = offset_in_page(data); - unsigned int len = skb_headlen(skb); - int slots; - int frag_slots; - - slots = DIV_ROUND_UP(offset + len, PAGE_SIZE); - frag_slots = count_skb_frag_slots(skb); - return slots + frag_slots; + return PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)) + + skb_shinfo(skb)->nr_frags + + 2; }
static u32 net_checksum_info(struct sk_buff *skb) @@@ -386,18 -381,21 +359,18 @@@ static int netvsc_start_xmit(struct sk_ struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; struct hv_page_buffer *pb = page_buf;
- /* We will atmost need two pages to describe the rndis - * header. We can only transmit MAX_PAGE_BUFFER_COUNT number + /* We can only transmit MAX_PAGE_BUFFER_COUNT number * of pages in a single packet. If skb is scattered around * more pages we try linearizing it. */ - - num_data_pgs = netvsc_get_slots(skb) + 2; - + num_data_pgs = netvsc_get_slots(skb); if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) { ++net_device_ctx->eth_stats.tx_scattered;
if (skb_linearize(skb)) goto no_memory;
- num_data_pgs = netvsc_get_slots(skb) + 2; + num_data_pgs = netvsc_get_slots(skb); if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { ++net_device_ctx->eth_stats.tx_too_big; goto drop; @@@ -1003,7 -1001,7 +976,7 @@@ static const struct static int netvsc_get_sset_count(struct net_device *dev, int string_set) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
if (!nvdev) return -ENODEV; @@@ -1133,11 -1131,22 +1106,22 @@@ netvsc_get_rxnfc(struct net_device *dev }
#ifdef CONFIG_NET_POLL_CONTROLLER - static void netvsc_poll_controller(struct net_device *net) + static void netvsc_poll_controller(struct net_device *dev) { - /* As netvsc_start_xmit() works synchronous we don't have to - * trigger anything here. - */ + struct net_device_context *ndc = netdev_priv(dev); + struct netvsc_device *ndev; + int i; + + rcu_read_lock(); + ndev = rcu_dereference(ndc->nvdev); + if (ndev) { + for (i = 0; i < ndev->num_chn; i++) { + struct netvsc_channel *nvchan = &ndev->chan_table[i]; + + napi_schedule(&nvchan->napi); + } + } + rcu_read_unlock(); } #endif
@@@ -1527,7 -1536,6 +1511,6 @@@ static int netvsc_probe(struct hv_devic hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); - INIT_WORK(&net_device_ctx->work, do_set_multicast);
spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); @@@ -1597,7 -1605,6 +1580,6 @@@ static int netvsc_remove(struct hv_devi netif_device_detach(net);
cancel_delayed_work_sync(&ndev_ctx->dwork); - cancel_work_sync(&ndev_ctx->work);
/* * Call to the vsc driver to let it know that the device is being diff --combined drivers/net/ipvlan/ipvlan_main.c index e4141d62b5c3,7c7680c8f0e3..dc888dd344eb --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@@ -632,7 -632,7 +632,7 @@@ void ipvlan_link_setup(struct net_devic dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE; dev->netdev_ops = &ipvlan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &ipvlan_header_ops; dev->ethtool_ops = &ipvlan_ethtool_ops; } @@@ -824,33 -824,6 +824,33 @@@ static int ipvlan_addr6_event(struct no return NOTIFY_OK; }
+static int ipvlan_addr6_validator_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr; + struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev; + struct ipvl_dev *ipvlan = netdev_priv(dev); + + /* FIXME IPv6 autoconf calls us from bh without RTNL */ + if (in_softirq()) + return NOTIFY_DONE; + + if (!netif_is_ipvlan(dev)) + return NOTIFY_DONE; + + if (!ipvlan || !ipvlan->port) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UP: + if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true)) + return notifier_from_errno(-EADDRINUSE); + break; + } + + return NOTIFY_OK; +} + static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) { if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) { @@@ -898,37 -871,10 +898,37 @@@ static int ipvlan_addr4_event(struct no return NOTIFY_OK; }
+static int ipvlan_addr4_validator_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_validator_info *ivi = (struct in_validator_info *)ptr; + struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev; + struct ipvl_dev *ipvlan = netdev_priv(dev); + + if (!netif_is_ipvlan(dev)) + return NOTIFY_DONE; + + if (!ipvlan || !ipvlan->port) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UP: + if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false)) + return notifier_from_errno(-EADDRINUSE); + break; + } + + return NOTIFY_OK; +} + static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = { .notifier_call = ipvlan_addr4_event, };
+static struct notifier_block ipvlan_addr4_vtor_notifier_block __read_mostly = { + .notifier_call = ipvlan_addr4_validator_event, +}; + static struct notifier_block ipvlan_notifier_block __read_mostly = { .notifier_call = ipvlan_device_event, }; @@@ -937,10 -883,6 +937,10 @@@ static struct notifier_block ipvlan_add .notifier_call = ipvlan_addr6_event, };
+static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = { + .notifier_call = ipvlan_addr6_validator_event, +}; + static void ipvlan_ns_exit(struct net *net) { struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid); @@@ -965,10 -907,7 +965,10 @@@ static int __init ipvlan_init_module(vo ipvlan_init_secret(); register_netdevice_notifier(&ipvlan_notifier_block); register_inet6addr_notifier(&ipvlan_addr6_notifier_block); + register_inet6addr_validator_notifier( + &ipvlan_addr6_vtor_notifier_block); register_inetaddr_notifier(&ipvlan_addr4_notifier_block); + register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block);
err = register_pernet_subsys(&ipvlan_net_ops); if (err < 0) @@@ -983,11 -922,7 +983,11 @@@ return 0; error: unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); + unregister_inetaddr_validator_notifier( + &ipvlan_addr4_vtor_notifier_block); unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); + unregister_inet6addr_validator_notifier( + &ipvlan_addr6_vtor_notifier_block); unregister_netdevice_notifier(&ipvlan_notifier_block); return err; } @@@ -998,11 -933,7 +998,11 @@@ static void __exit ipvlan_cleanup_modul unregister_pernet_subsys(&ipvlan_net_ops); unregister_netdevice_notifier(&ipvlan_notifier_block); unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block); + unregister_inetaddr_validator_notifier( + &ipvlan_addr4_vtor_notifier_block); unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block); + unregister_inet6addr_validator_notifier( + &ipvlan_addr6_vtor_notifier_block); }
module_init(ipvlan_init_module); diff --combined drivers/net/macsec.c index b79513b8322f,79411675f0e6..2067dcc71535 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@@ -588,6 -588,8 +588,6 @@@ static void count_tx(struct net_device stats->tx_packets++; stats->tx_bytes += len; u64_stats_update_end(&stats->syncp); - } else { - dev->stats.tx_dropped++; } }
@@@ -740,12 -742,7 +740,12 @@@ static struct sk_buff *macsec_encrypt(s macsec_fill_iv(iv, secy->sci, pn);
sg_init_table(sg, ret); - skb_to_sgvec(skb, sg, 0, skb->len); + ret = skb_to_sgvec(skb, sg, 0, skb->len); + if (unlikely(ret < 0)) { + macsec_txsa_put(tx_sa); + kfree_skb(skb); + return ERR_PTR(ret); + }
if (tx_sc->encrypt) { int len = skb->len - macsec_hdr_len(sci_present) - @@@ -886,7 -883,7 +886,7 @@@ static void macsec_decrypt_done(struct struct macsec_dev *macsec = macsec_priv(dev); struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; struct macsec_rx_sc *rx_sc = rx_sa->sc; - int len, ret; + int len; u32 pn;
aead_request_free(macsec_skb_cb(skb)->req); @@@ -907,8 -904,11 +907,8 @@@ macsec_reset_skb(skb, macsec->secy.netdev);
len = skb->len; - ret = gro_cells_receive(&macsec->gro_cells, skb); - if (ret == NET_RX_SUCCESS) + if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) count_rx(dev, len); - else - macsec->secy.netdev->stats.rx_dropped++;
rcu_read_unlock_bh();
@@@ -952,11 -952,7 +952,11 @@@ static struct sk_buff *macsec_decrypt(s macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
sg_init_table(sg, ret); - skb_to_sgvec(skb, sg, 0, skb->len); + ret = skb_to_sgvec(skb, sg, 0, skb->len); + if (unlikely(ret < 0)) { + kfree_skb(skb); + return ERR_PTR(ret); + }
if (hdr->tci_an & MACSEC_TCI_E) { /* confidentiality: ethernet + macsec header @@@ -1041,6 -1037,7 +1041,6 @@@ static void handle_not_macsec(struct sk */ list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct sk_buff *nskb; - int ret; struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { @@@ -1057,10 -1054,13 +1057,10 @@@
nskb->dev = macsec->secy.netdev;
- ret = netif_rx(nskb); - if (ret == NET_RX_SUCCESS) { + if (netif_rx(nskb) == NET_RX_SUCCESS) { u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsUntagged++; u64_stats_update_end(&secy_stats->syncp); - } else { - macsec->secy.netdev->stats.rx_dropped++; } }
@@@ -2996,7 -2996,6 +2996,6 @@@ static void macsec_free_netdev(struct n free_percpu(macsec->secy.tx_sc.stats);
dev_put(real_dev); - free_netdev(dev); }
static void macsec_setup(struct net_device *dev) @@@ -3006,7 -3005,8 +3005,8 @@@ dev->max_mtu = ETH_MAX_MTU; dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &macsec_netdev_ops; - dev->destructor = macsec_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = macsec_free_netdev; SET_NETDEV_DEVTYPE(dev, &macsec_type);
eth_zero_addr(dev->broadcast); diff --combined drivers/net/macvlan.c index ade1213e8a87,67bf7ebae5c6..8ca274c6df3d --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@@ -703,8 -703,10 +703,8 @@@ static int macvlan_set_mac_address(stru if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
- if (vlan->mode == MACVLAN_MODE_PASSTHRU) { - dev_set_mac_address(vlan->lowerdev, addr); - return 0; - } + if (vlan->mode == MACVLAN_MODE_PASSTHRU) + return dev_set_mac_address(vlan->lowerdev, addr);
return macvlan_sync_address(dev, addr->sa_data); } @@@ -1090,7 -1092,7 +1090,7 @@@ void macvlan_common_setup(struct net_de netif_keep_dst(dev); dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->header_ops = &macvlan_hard_header_ops; dev->ethtool_ops = &macvlan_ethtool_ops; } diff --combined drivers/net/phy/Kconfig index 65af31f24f01,3ab6c58d4be6..2dda72004a7d --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@@ -127,6 -127,7 +127,7 @@@ config MDIO_THUNDE tristate "ThunderX SOCs MDIO buses" depends on 64BIT depends on PCI + depends on !(MDIO_DEVICE=y && PHYLIB=m) select MDIO_CAVIUM help This driver supports the MDIO interfaces found on Cavium @@@ -234,11 -235,6 +235,11 @@@ config CICADA_PH ---help--- Currently supports the cis8204
+config CORTINA_PHY + tristate "Cortina EDC CDR 10G Ethernet PHY" + ---help--- + Currently supports the CS4340 phy. + config DAVICOM_PHY tristate "Davicom PHYs" ---help--- @@@ -292,11 -288,6 +293,11 @@@ config MARVELL_PH ---help--- Currently has a driver for the 88E1011S
+config MARVELL_10G_PHY + tristate "Marvell Alaska 10Gbit PHYs" + ---help--- + Support for the Marvell Alaska MV88X3310 and compatible PHYs. + config MESON_GXL_PHY tristate "Amlogic Meson GXL Internal PHY" depends on ARCH_MESON || COMPILE_TEST diff --combined drivers/net/phy/phy.c index edcdf0d872ed,eebb0e1c70ff..a9dc366b9b97 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@@ -54,6 -54,8 +54,8 @@@ static const char *phy_speed_to_str(in return "5Gbps"; case SPEED_10000: return "10Gbps"; + case SPEED_14000: + return "14Gbps"; case SPEED_20000: return "20Gbps"; case SPEED_25000: @@@ -149,25 -151,6 +151,25 @@@ static int phy_config_interrupt(struct return 0; }
+/** + * phy_restart_aneg - restart auto-negotiation + * @phydev: target phy_device struct + * + * Restart the autonegotiation on @phydev. Returns >= 0 on success or + * negative errno on error. + */ +int phy_restart_aneg(struct phy_device *phydev) +{ + int ret; + + if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) + ret = genphy_c45_restart_aneg(phydev); + else + ret = genphy_restart_aneg(phydev); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_restart_aneg);
/** * phy_aneg_done - return auto-negotiation status @@@ -182,12 -165,6 +184,12 @@@ int phy_aneg_done(struct phy_device *ph if (phydev->drv && phydev->drv->aneg_done) return phydev->drv->aneg_done(phydev);
+ /* Avoid genphy_aneg_done() if the Clause 45 PHY does not + * implement Clause 22 registers + */ + if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) + return -EINVAL; + return genphy_aneg_done(phydev); } EXPORT_SYMBOL(phy_aneg_done); @@@ -509,8 -486,32 +511,8 @@@ int phy_ethtool_ksettings_set(struct ph } EXPORT_SYMBOL(phy_ethtool_ksettings_set);
-int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd) -{ - cmd->supported = phydev->supported; - - cmd->advertising = phydev->advertising; - cmd->lp_advertising = phydev->lp_advertising; - - ethtool_cmd_speed_set(cmd, phydev->speed); - cmd->duplex = phydev->duplex; - if (phydev->interface == PHY_INTERFACE_MODE_MOCA) - cmd->port = PORT_BNC; - else - cmd->port = PORT_MII; - cmd->phy_address = phydev->mdio.addr; - cmd->transceiver = phy_is_internal(phydev) ? - XCVR_INTERNAL : XCVR_EXTERNAL; - cmd->autoneg = phydev->autoneg; - cmd->eth_tp_mdix_ctrl = phydev->mdix_ctrl; - cmd->eth_tp_mdix = phydev->mdix; - - return 0; -} -EXPORT_SYMBOL(phy_ethtool_gset); - -int phy_ethtool_ksettings_get(struct phy_device *phydev, - struct ethtool_link_ksettings *cmd) +void phy_ethtool_ksettings_get(struct phy_device *phydev, + struct ethtool_link_ksettings *cmd) { ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, phydev->supported); @@@ -532,6 -533,8 +534,6 @@@ cmd->base.autoneg = phydev->autoneg; cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; cmd->base.eth_tp_mdix = phydev->mdix; - - return 0; } EXPORT_SYMBOL(phy_ethtool_ksettings_get);
@@@ -1414,7 -1417,7 +1416,7 @@@ int phy_ethtool_set_eee(struct phy_devi /* Restart autonegotiation so the new modes get sent to the * link partner. */ - ret = genphy_restart_aneg(phydev); + ret = phy_restart_aneg(phydev); if (ret < 0) return ret; } @@@ -1447,9 -1450,7 +1449,9 @@@ int phy_ethtool_get_link_ksettings(stru if (!phydev) return -ENODEV;
- return phy_ethtool_ksettings_get(phydev, cmd); + phy_ethtool_ksettings_get(phydev, cmd); + + return 0; } EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
@@@ -1475,6 -1476,6 +1477,6 @@@ int phy_ethtool_nway_reset(struct net_d if (!phydev->drv) return -EIO;
- return genphy_restart_aneg(phydev); + return phy_restart_aneg(phydev); } EXPORT_SYMBOL(phy_ethtool_nway_reset); diff --combined drivers/net/team/team.c index a3ec1892a286,fba8c136aa7c..629a412dc690 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@@ -1643,7 -1643,6 +1643,6 @@@ static void team_destructor(struct net_ struct team *team = netdev_priv(dev);
free_percpu(team->pcpu_stats); - free_netdev(dev); }
static int team_open(struct net_device *dev) @@@ -2005,6 -2004,12 +2004,6 @@@ static const struct net_device_ops team .ndo_del_slave = team_del_slave, .ndo_fix_features = team_fix_features, .ndo_change_carrier = team_change_carrier, - .ndo_bridge_setlink = switchdev_port_bridge_setlink, - .ndo_bridge_getlink = switchdev_port_bridge_getlink, - .ndo_bridge_dellink = switchdev_port_bridge_dellink, - .ndo_fdb_add = switchdev_port_fdb_add, - .ndo_fdb_del = switchdev_port_fdb_del, - .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_features_check = passthru_features_check, };
@@@ -2073,7 -2078,8 +2072,8 @@@ static void team_setup(struct net_devic
dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; - dev->destructor = team_destructor; + dev->needs_free_netdev = true; + dev->priv_destructor = team_destructor; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_TEAM; diff --combined drivers/net/tun.c index fe660e524af9,9ee7d4275640..ae49f4b99b67 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@@ -465,7 -465,7 +465,7 @@@ static u16 tun_select_queue(struct net_ rcu_read_lock(); numqueues = ACCESS_ONCE(tun->numqueues);
- txq = skb_get_hash(skb); + txq = __skb_get_hash_symmetric(skb); if (txq) { e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); if (e) { @@@ -867,7 -867,7 +867,7 @@@ static netdev_tx_t tun_net_xmit(struct */ __u32 rxhash;
- rxhash = skb_get_hash(skb); + rxhash = __skb_get_hash_symmetric(skb); if (rxhash) { struct tun_flow_entry *e; e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], @@@ -1334,7 -1334,7 +1334,7 @@@ static ssize_t tun_get_user(struct tun_ skb_reset_network_header(skb); skb_probe_transport_header(skb, 0);
- rxhash = skb_get_hash(skb); + rxhash = __skb_get_hash_symmetric(skb); #ifndef CONFIG_4KSTACKS tun_rx_batched(tun, tfile, skb, more); #else @@@ -1510,8 -1510,9 +1510,8 @@@ out
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *to, - int noblock) + int noblock, struct sk_buff *skb) { - struct sk_buff *skb; ssize_t ret; int err;
@@@ -1520,12 -1521,10 +1520,12 @@@ if (!iov_iter_count(to)) return 0;
- /* Read frames from ring */ - skb = tun_ring_recv(tfile, noblock, &err); - if (!skb) - return err; + if (!skb) { + /* Read frames from ring */ + skb = tun_ring_recv(tfile, noblock, &err); + if (!skb) + return err; + }
ret = tun_put_user(tun, tfile, skb, to); if (unlikely(ret < 0)) @@@ -1545,7 -1544,7 +1545,7 @@@ static ssize_t tun_chr_read_iter(struc
if (!tun) return -EBADFD; - ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK); + ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; @@@ -1561,7 -1560,6 +1561,6 @@@ static void tun_free_netdev(struct net_ free_percpu(tun->pcpu_stats); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); - free_netdev(dev); }
static void tun_setup(struct net_device *dev) @@@ -1572,7 -1570,8 +1571,8 @@@ tun->group = INVALID_GID;
dev->ethtool_ops = &tun_ethtool_ops; - dev->destructor = tun_free_netdev; + dev->needs_free_netdev = true; + dev->priv_destructor = tun_free_netdev; /* We prefer our own queue length */ dev->tx_queue_len = TUN_READQ_SIZE; } @@@ -1647,8 -1646,7 +1647,8 @@@ static int tun_recvmsg(struct socket *s SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } - ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT); + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, + m->msg_control); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@@ -2628,19 -2626,6 +2628,19 @@@ struct socket *tun_get_socket(struct fi } EXPORT_SYMBOL_GPL(tun_get_socket);
+struct skb_array *tun_get_skb_array(struct file *file) +{ + struct tun_file *tfile; + + if (file->f_op != &tun_fops) + return ERR_PTR(-EINVAL); + tfile = file->private_data; + if (!tfile) + return ERR_PTR(-EBADFD); + return &tfile->tx_array; +} +EXPORT_SYMBOL_GPL(tun_get_skb_array); + module_init(tun_init); module_exit(tun_cleanup); MODULE_DESCRIPTION(DRV_DESCRIPTION); diff --combined drivers/net/usb/r8152.c index 5a02053181d1,1a419a45e2a2..8589303b4bf1 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@@ -394,7 -394,6 +394,7 @@@
/* OCP_PHY_STATUS */ #define PHY_STAT_MASK 0x0007 +#define PHY_STAT_EXT_INIT 2 #define PHY_STAT_LAN_ON 3 #define PHY_STAT_PWRDN 5
@@@ -842,6 -841,12 +842,6 @@@ int pla_ocp_write(struct r8152 *tp, u1 }
static inline -int usb_ocp_read(struct r8152 *tp, u16 index, u16 size, void *data) -{ - return generic_ocp_read(tp, index, size, data, MCU_TYPE_USB); -} - -static inline int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data) { return generic_ocp_write(tp, index, byteen, size, data, MCU_TYPE_USB); @@@ -1813,10 -1818,6 +1813,10 @@@ static int rx_bottom(struct r8152 *tp, unsigned int pkt_len; struct sk_buff *skb;
+ /* limite the skb numbers for rx_queue */ + if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000)) + break; + pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK; if (pkt_len < ETH_ZLEN) break; @@@ -1938,8 -1939,7 +1938,8 @@@ static int r8152_poll(struct napi_struc bottom_half(tp);
if (work_done < budget) { - napi_complete(napi); + if (!napi_complete_done(napi, work_done)) + goto out; if (!list_empty(&tp->rx_done)) napi_schedule(napi); else if (!skb_queue_empty(&tp->tx_queue) && @@@ -1947,7 -1947,6 +1947,7 @@@ napi_schedule(napi); }
+out: return work_done; }
@@@ -2269,6 -2268,7 +2269,6 @@@ static int rtl8153_enable(struct r8152 if (test_bit(RTL8152_UNPLUG, &tp->flags)) return -ENODEV;
- usb_disable_lpm(tp->udev); set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); @@@ -2434,29 -2434,6 +2434,29 @@@ static void __rtl_set_wol(struct r8152 device_set_wakeup_enable(&tp->udev->dev, false); }
+static void r8153_mac_clk_spd(struct r8152 *tp, bool enable) +{ + /* MAC clock speed down */ + if (enable) { + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, + ALDPS_SPDWN_RATIO); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, + EEE_SPDWN_RATIO); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, + PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN | + U1U2_SPDWN_EN | L1_SPDWN_EN); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, + PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN | + TP100_SPDWN_EN | TP500_SPDWN_EN | EEE_SPDWN_EN | + TP1000_SPDWN_EN); + } else { + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); + } +} + static void r8153_u1u2en(struct r8152 *tp, bool enable) { u8 u1u2[8]; @@@ -2474,35 -2451,13 +2474,35 @@@ static void r8153_u2p3en(struct r8152 * u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); - if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04) + if (enable) ocp_data |= U2P3_ENABLE; else ocp_data &= ~U2P3_ENABLE; ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); }
+static u16 r8153_phy_status(struct r8152 *tp, u16 desired) +{ + u16 data; + int i; + + for (i = 0; i < 500; i++) { + data = ocp_reg_read(tp, OCP_PHY_STATUS); + data &= PHY_STAT_MASK; + if (desired) { + if (data == desired) + break; + } else if (data == PHY_STAT_LAN_ON || data == PHY_STAT_PWRDN || + data == PHY_STAT_EXT_INIT) { + break; + } + + msleep(20); + } + + return data; +} + static void r8153_power_cut_en(struct r8152 *tp, bool enable) { u32 ocp_data; @@@ -2557,26 -2512,13 +2557,26 @@@ static void rtl_runtime_suspend_enable(
static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) { - rtl_runtime_suspend_enable(tp, enable); - if (enable) { r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); + r8153_mac_clk_spd(tp, true); + rtl_runtime_suspend_enable(tp, true); } else { - r8153_u2p3en(tp, true); + rtl_runtime_suspend_enable(tp, false); + r8153_mac_clk_spd(tp, false); + + switch (tp->version) { + case RTL_VER_03: + case RTL_VER_04: + break; + case RTL_VER_05: + case RTL_VER_06: + default: + r8153_u2p3en(tp, true); + break; + } + r8153_u1u2en(tp, true); } } @@@ -2842,15 -2784,9 +2842,15 @@@ static void r8153_aldps_en(struct r815 data |= EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); } else { + int i; + data &= ~EN_ALDPS; ocp_reg_write(tp, OCP_POWER_CFG, data); - msleep(20); + for (i = 0; i < 20; i++) { + usleep_range(1000, 2000); + if (ocp_read_word(tp, MCU_TYPE_PLA, 0xe000) & 0x0100) + break; + } } }
@@@ -2921,17 -2857,6 +2921,17 @@@ static void r8153_hw_phy_cfg(struct r81 r8153_aldps_en(tp, true); r8152b_enable_fc(tp);
+ switch (tp->version) { + case RTL_VER_03: + case RTL_VER_04: + break; + case RTL_VER_05: + case RTL_VER_06: + default: + r8153_u2p3en(tp, true); + break; + } + set_bit(PHY_RESET, &tp->flags); }
@@@ -2940,7 -2865,6 +2940,7 @@@ static void r8153_first_init(struct r81 u32 ocp_data; int i;
+ r8153_mac_clk_spd(tp, false); rxdy_gated_en(tp, true); r8153_teredo_off(tp);
@@@ -2995,6 -2919,11 +2995,6 @@@ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_NORMAL); /* TX share fifo free credit full threshold */ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TXFIFO_CTRL, TXFIFO_THR_NORMAL2); - - /* rx aggregation */ - ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); - ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); - ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data); }
static void r8153_enter_oob(struct r8152 *tp) @@@ -3002,8 -2931,6 +3002,8 @@@ u32 ocp_data; int i;
+ r8153_mac_clk_spd(tp, true); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); ocp_data &= ~NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); @@@ -3059,6 -2986,7 +3059,6 @@@ static void rtl8153_disable(struct r815 rtl_disable(tp); rtl_reset_bmu(tp); r8153_aldps_en(tp, true); - usb_enable_lpm(tp->udev); }
static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) @@@ -3177,23 -3105,12 +3177,23 @@@ static void rtl8153_up(struct r8152 *tp return;
r8153_u1u2en(tp, false); + r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); r8153_aldps_en(tp, true); - r8153_u2p3en(tp, true); + + switch (tp->version) { + case RTL_VER_03: + case RTL_VER_04: + break; + case RTL_VER_05: + case RTL_VER_06: + default: + r8153_u2p3en(tp, true); + break; + } + r8153_u1u2en(tp, true); - usb_enable_lpm(tp->udev); }
static void rtl8153_down(struct r8152 *tp) @@@ -3509,7 -3426,12 +3509,7 @@@ static void r8153_init(struct r8152 *tp msleep(20); }
- for (i = 0; i < 500; i++) { - ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK; - if (ocp_data == PHY_STAT_LAN_ON || ocp_data == PHY_STAT_PWRDN) - break; - msleep(20); - } + data = r8153_phy_status(tp, 0);
if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || tp->version == RTL_VER_05) @@@ -3521,8 -3443,14 +3521,8 @@@ r8152_mdio_write(tp, MII_BMCR, data); }
- for (i = 0; i < 500; i++) { - ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK; - if (ocp_data == PHY_STAT_LAN_ON) - break; - msleep(20); - } + data = r8153_phy_status(tp, PHY_STAT_LAN_ON);
- usb_disable_lpm(tp->udev); r8153_u2p3en(tp, false);
if (tp->version == RTL_VER_04) { @@@ -3582,28 -3510,15 +3582,28 @@@
r8153_power_cut_en(tp, false); r8153_u1u2en(tp, true); + r8153_mac_clk_spd(tp, false); + usb_enable_lpm(tp->udev);
- /* MAC clock speed down */ - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); - ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); + /* rx aggregation */ + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); + ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
rtl_tally_reset(tp); - r8153_u2p3en(tp, true); + + switch (tp->udev->speed) { + case USB_SPEED_SUPER: + case USB_SPEED_SUPER_PLUS: + tp->coalesce = COALESCE_SUPER; + break; + case USB_SPEED_HIGH: + tp->coalesce = COALESCE_HIGH; + break; + default: + tp->coalesce = COALESCE_SLOW; + break; + } }
static int rtl8152_pre_reset(struct usb_interface *intf) @@@ -3788,8 -3703,11 +3788,8 @@@ static int rtl8152_resume(struct usb_in
mutex_lock(&tp->control);
- if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { - tp->rtl_ops.init(tp); - queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); + if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) netif_device_attach(netdev); - }
if (netif_running(netdev) && netdev->flags & IFF_UP) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { @@@ -3835,10 -3753,6 +3835,10 @@@ static int rtl8152_reset_resume(struct struct r8152 *tp = usb_get_intfdata(intf);
clear_bit(SELECTIVE_SUSPEND, &tp->flags); + mutex_lock(&tp->control); + tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); + mutex_unlock(&tp->control); return rtl8152_resume(intf); }
@@@ -3927,7 -3841,7 +3927,7 @@@ int rtl8152_get_link_ksettings(struct n
mutex_lock(&tp->control);
- ret = mii_ethtool_get_link_ksettings(&tp->mii, cmd); + mii_ethtool_get_link_ksettings(&tp->mii, cmd);
mutex_unlock(&tp->control);
@@@ -4454,6 -4368,8 +4454,8 @@@ static u8 rtl_get_version(struct usb_in break; }
+ dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); + return version; }
@@@ -4550,6 -4466,19 +4552,6 @@@ static int rtl8152_probe(struct usb_int tp->mii.reg_num_mask = 0x1f; tp->mii.phy_id = R8152_PHY_ID;
- switch (udev->speed) { - case USB_SPEED_SUPER: - case USB_SPEED_SUPER_PLUS: - tp->coalesce = COALESCE_SUPER; - break; - case USB_SPEED_HIGH: - tp->coalesce = COALESCE_HIGH; - break; - default: - tp->coalesce = COALESCE_SLOW; - break; - } - tp->autoneg = AUTONEG_ENABLE; tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; tp->duplex = DUPLEX_FULL; diff --combined drivers/net/vxlan.c index e045c34ffbeb,5fa798a5c9a6..25b70cad055c --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@@ -970,7 -970,7 +970,7 @@@ static bool vxlan_snoop(struct net_devi return false;
/* Don't migrate static entries, drop packets */ - if (f->state & NUD_NOARP) + if (f->state & (NUD_PERMANENT | NUD_NOARP)) return true;
if (net_ratelimit()) @@@ -1077,10 -1077,10 +1077,10 @@@ static void vxlan_sock_release(struct v #if IS_ENABLED(CONFIG_IPV6) struct vxlan_sock *sock6 = rtnl_dereference(vxlan->vn6_sock);
- rcu_assign_pointer(vxlan->vn6_sock, NULL); + RCU_INIT_POINTER(vxlan->vn6_sock, NULL); #endif
- rcu_assign_pointer(vxlan->vn4_sock, NULL); + RCU_INIT_POINTER(vxlan->vn4_sock, NULL); synchronize_net();
vxlan_vs_del_dev(vxlan); @@@ -2611,7 -2611,7 +2611,7 @@@ static void vxlan_setup(struct net_devi eth_hw_addr_random(dev); ether_setup(dev);
- dev->destructor = free_netdev; + dev->needs_free_netdev = true; SET_NETDEV_DEVTYPE(dev, &vxlan_type);
dev->features |= NETIF_F_LLTX; diff --combined drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index a2bf11fc8ecc,617199c0e5a0..b8f042146fc9 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@@ -4674,6 -4674,9 +4674,6 @@@ static int brcmf_cfg80211_stop_ap(struc err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); if (err < 0) brcmf_err("setting AP mode failed %d\n", err); - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); - if (err < 0) - brcmf_err("setting INFRA mode failed %d\n", err); if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) brcmf_fil_iovar_int_set(ifp, "mbss", 0); brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY, @@@ -5222,7 -5225,6 +5222,6 @@@ void brcmf_cfg80211_free_netdev(struct
if (vif) brcmf_free_vif(vif); - free_netdev(ndev); }
static bool brcmf_is_linkup(const struct brcmf_event_msg *e) @@@ -6375,6 -6377,16 +6374,6 @@@ err return -ENOMEM; }
-static void brcmf_wiphy_pno_params(struct wiphy *wiphy) -{ - /* scheduled scan settings */ - wiphy->max_sched_scan_reqs = 1; - wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT; - wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT; - wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX; - wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD; -} - #ifdef CONFIG_PM static const struct wiphy_wowlan_support brcmf_wowlan_support = { .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, @@@ -6421,7 -6433,6 +6420,7 @@@ static int brcmf_setup_wiphy(struct wip const struct ieee80211_iface_combination *combo; struct ieee80211_supported_band *band; u16 max_interfaces = 0; + bool gscan; __le32 bandlist[3]; u32 n_bands; int err, i; @@@ -6471,10 -6482,9 +6470,10 @@@ wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; wiphy->mgmt_stypes = brcmf_txrx_stypes; wiphy->max_remain_on_channel_duration = 5000; - if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) - brcmf_wiphy_pno_params(wiphy); - + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) { + gscan = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_GSCAN); + brcmf_pno_wiphy_params(wiphy, gscan); + } /* vendor commands/events support */ wiphy->vendor_commands = brcmf_vendor_cmds; wiphy->n_vendor_commands = BRCMF_VNDR_CMDS_LAST - 1; diff --combined drivers/net/wireless/marvell/mwifiex/main.c index 2c42191293c3,39b6b5e3f6e0..f2600b827e81 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@@ -44,10 -44,6 +44,10 @@@ bool mfg_mode module_param(mfg_mode, bool, 0); MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0");
+bool aggr_ctrl; +module_param(aggr_ctrl, bool, 0000); +MODULE_PARM_DESC(aggr_ctrl, "usb tx aggreataon enable:1, disable:0"); + /* * This function registers the device and performs all the necessary * initializations. @@@ -1284,7 -1280,7 +1284,7 @@@ void mwifiex_init_priv_params(struct mw struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; diff --combined drivers/net/wireless/quantenna/qtnfmac/core.c index c5ac252464f4,000000000000..f053532c0e87 mode 100644,000000..100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@@ -1,618 -1,0 +1,618 @@@ +/* + * Copyright (c) 2015-2016 Quantenna Communications, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/if_ether.h> + +#include "core.h" +#include "bus.h" +#include "trans.h" +#include "commands.h" +#include "cfg80211.h" +#include "event.h" +#include "util.h" + +#define QTNF_DMP_MAX_LEN 48 +#define QTNF_PRIMARY_VIF_IDX 0 + +struct qtnf_frame_meta_info { + u8 magic_s; + u8 ifidx; + u8 macid; + u8 magic_e; +} __packed; + +struct qtnf_wmac *qtnf_core_get_mac(const struct qtnf_bus *bus, u8 macid) +{ + struct qtnf_wmac *mac = NULL; + + if (unlikely(macid >= QTNF_MAX_MAC)) { + pr_err("invalid MAC index %u\n", macid); + return NULL; + } + + mac = bus->mac[macid]; + + if (unlikely(!mac)) { + pr_err("MAC%u: not initialized\n", macid); + return NULL; + } + + return mac; +} + +/* Netdev handler for open. + */ +static int qtnf_netdev_open(struct net_device *ndev) +{ + netif_carrier_off(ndev); + qtnf_netdev_updown(ndev, 1); + return 0; +} + +/* Netdev handler for close. + */ +static int qtnf_netdev_close(struct net_device *ndev) +{ + netif_carrier_off(ndev); + qtnf_virtual_intf_cleanup(ndev); + qtnf_netdev_updown(ndev, 0); + return 0; +} + +/* Netdev handler for data transmission. + */ +static int +qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct qtnf_vif *vif; + struct qtnf_wmac *mac; + + vif = qtnf_netdev_get_priv(ndev); + + if (unlikely(skb->dev != ndev)) { + pr_err_ratelimited("invalid skb->dev"); + dev_kfree_skb_any(skb); + return 0; + } + + if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)) { + pr_err_ratelimited("%s: VIF not initialized\n", ndev->name); + dev_kfree_skb_any(skb); + return 0; + } + + mac = vif->mac; + if (unlikely(!mac)) { + pr_err_ratelimited("%s: NULL mac pointer", ndev->name); + dev_kfree_skb_any(skb); + return 0; + } + + if (!skb->len || (skb->len > ETH_FRAME_LEN)) { + pr_err_ratelimited("%s: invalid skb len %d\n", ndev->name, + skb->len); + dev_kfree_skb_any(skb); + ndev->stats.tx_dropped++; + return 0; + } + + /* tx path is enabled: reset vif timeout */ + vif->cons_tx_timeout_cnt = 0; + + return qtnf_bus_data_tx(mac->bus, skb); +} + +/* Netdev handler for getting stats. + */ +static struct net_device_stats *qtnf_netdev_get_stats(struct net_device *dev) +{ + return &dev->stats; +} + +/* Netdev handler for transmission timeout. + */ +static void qtnf_netdev_tx_timeout(struct net_device *ndev) +{ + struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev); + struct qtnf_wmac *mac; + struct qtnf_bus *bus; + + if (unlikely(!vif || !vif->mac || !vif->mac->bus)) + return; + + mac = vif->mac; + bus = mac->bus; + + pr_warn("VIF%u.%u: Tx timeout- %lu\n", mac->macid, vif->vifid, jiffies); + + qtnf_bus_data_tx_timeout(bus, ndev); + ndev->stats.tx_errors++; + + if (++vif->cons_tx_timeout_cnt > QTNF_TX_TIMEOUT_TRSHLD) { + pr_err("Tx timeout threshold exceeded !\n"); + pr_err("schedule interface %s reset !\n", netdev_name(ndev)); + queue_work(bus->workqueue, &vif->reset_work); + } +} + +/* Network device ops handlers */ +const struct net_device_ops qtnf_netdev_ops = { + .ndo_open = qtnf_netdev_open, + .ndo_stop = qtnf_netdev_close, + .ndo_start_xmit = qtnf_netdev_hard_start_xmit, + .ndo_tx_timeout = qtnf_netdev_tx_timeout, + .ndo_get_stats = qtnf_netdev_get_stats, +}; + +static int qtnf_mac_init_single_band(struct wiphy *wiphy, + struct qtnf_wmac *mac, + enum nl80211_band band) +{ + int ret; + + wiphy->bands[band] = kzalloc(sizeof(*wiphy->bands[band]), GFP_KERNEL); + if (!wiphy->bands[band]) + return -ENOMEM; + + wiphy->bands[band]->band = band; + + ret = qtnf_cmd_get_mac_chan_info(mac, wiphy->bands[band]); + if (ret) { + pr_err("MAC%u: band %u: failed to get chans info: %d\n", + mac->macid, band, ret); + return ret; + } + + qtnf_band_init_rates(wiphy->bands[band]); + qtnf_band_setup_htvht_caps(&mac->macinfo, wiphy->bands[band]); + + return 0; +} + +static int qtnf_mac_init_bands(struct qtnf_wmac *mac) +{ + struct wiphy *wiphy = priv_to_wiphy(mac); + int ret = 0; + + if (mac->macinfo.bands_cap & QLINK_BAND_2GHZ) { + ret = qtnf_mac_init_single_band(wiphy, mac, NL80211_BAND_2GHZ); + if (ret) + goto out; + } + + if (mac->macinfo.bands_cap & QLINK_BAND_5GHZ) { + ret = qtnf_mac_init_single_band(wiphy, mac, NL80211_BAND_5GHZ); + if (ret) + goto out; + } + + if (mac->macinfo.bands_cap & QLINK_BAND_60GHZ) + ret = qtnf_mac_init_single_band(wiphy, mac, NL80211_BAND_60GHZ); + +out: + return ret; +} + +struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac) +{ + struct qtnf_vif *vif; + int i; + + for (i = 0; i < QTNF_MAX_INTF; i++) { + vif = &mac->iflist[i]; + if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) + return vif; + } + + return NULL; +} + +struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac) +{ + struct qtnf_vif *vif; + + vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX]; + + if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) + return NULL; + + return vif; +} + +static void qtnf_vif_reset_handler(struct work_struct *work) +{ + struct qtnf_vif *vif = container_of(work, struct qtnf_vif, reset_work); + + rtnl_lock(); + + if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) { + rtnl_unlock(); + return; + } + + /* stop tx completely */ + netif_tx_stop_all_queues(vif->netdev); + if (netif_carrier_ok(vif->netdev)) + netif_carrier_off(vif->netdev); + + qtnf_cfg80211_vif_reset(vif); + + rtnl_unlock(); +} + +static void qtnf_mac_init_primary_intf(struct qtnf_wmac *mac) +{ + struct qtnf_vif *vif = &mac->iflist[QTNF_PRIMARY_VIF_IDX]; + + vif->wdev.iftype = NL80211_IFTYPE_AP; + vif->bss_priority = QTNF_DEF_BSS_PRIORITY; + vif->wdev.wiphy = priv_to_wiphy(mac); + INIT_WORK(&vif->reset_work, qtnf_vif_reset_handler); + vif->cons_tx_timeout_cnt = 0; +} + +static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, + unsigned int macid) +{ + struct wiphy *wiphy; + struct qtnf_wmac *mac; + unsigned int i; + + wiphy = qtnf_wiphy_allocate(bus); + if (!wiphy) + return ERR_PTR(-ENOMEM); + + mac = wiphy_priv(wiphy); + + mac->macid = macid; + mac->bus = bus; + + for (i = 0; i < QTNF_MAX_INTF; i++) { + memset(&mac->iflist[i], 0, sizeof(struct qtnf_vif)); + mac->iflist[i].wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; + mac->iflist[i].mac = mac; + mac->iflist[i].vifid = i; + qtnf_sta_list_init(&mac->iflist[i].sta_list); + } + + qtnf_mac_init_primary_intf(mac); + bus->mac[macid] = mac; + + return mac; +} + +int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif, + const char *name, unsigned char name_assign_type, + enum nl80211_iftype iftype) +{ + struct wiphy *wiphy = priv_to_wiphy(mac); + struct net_device *dev; + void *qdev_vif; + int ret; + + dev = alloc_netdev_mqs(sizeof(struct qtnf_vif *), name, + name_assign_type, ether_setup, 1, 1); + if (!dev) { + memset(&vif->wdev, 0, sizeof(vif->wdev)); + vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; + return -ENOMEM; + } + + vif->netdev = dev; + + dev->netdev_ops = &qtnf_netdev_ops; - dev->destructor = free_netdev; ++ dev->needs_free_netdev = true; + dev_net_set(dev, wiphy_net(wiphy)); + dev->ieee80211_ptr = &vif->wdev; + dev->ieee80211_ptr->iftype = iftype; + ether_addr_copy(dev->dev_addr, vif->mac_addr); + SET_NETDEV_DEV(dev, wiphy_dev(wiphy)); + dev->flags |= IFF_BROADCAST | IFF_MULTICAST; + dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT; + dev->tx_queue_len = 100; + + qdev_vif = netdev_priv(dev); + *((void **)qdev_vif) = vif; + + SET_NETDEV_DEV(dev, mac->bus->dev); + + ret = register_netdevice(dev); + if (ret) { + free_netdev(dev); + vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; + } + + return ret; +} + +static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid) +{ + struct qtnf_wmac *mac; + struct wiphy *wiphy; + struct qtnf_vif *vif; + unsigned int i; + enum nl80211_band band; + + mac = bus->mac[macid]; + + if (!mac) + return; + + wiphy = priv_to_wiphy(mac); + + for (i = 0; i < QTNF_MAX_INTF; i++) { + vif = &mac->iflist[i]; + rtnl_lock(); + if (vif->netdev && + vif->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) { + qtnf_virtual_intf_cleanup(vif->netdev); + qtnf_del_virtual_intf(wiphy, &vif->wdev); + } + rtnl_unlock(); + qtnf_sta_list_free(&vif->sta_list); + } + + if (mac->wiphy_registered) + wiphy_unregister(wiphy); + + for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; ++band) { + if (!wiphy->bands[band]) + continue; + + kfree(wiphy->bands[band]->channels); + wiphy->bands[band]->n_channels = 0; + + kfree(wiphy->bands[band]); + wiphy->bands[band] = NULL; + } + + kfree(mac->macinfo.limits); + kfree(wiphy->iface_combinations); + wiphy_free(wiphy); + bus->mac[macid] = NULL; +} + +static int qtnf_core_mac_attach(struct qtnf_bus *bus, unsigned int macid) +{ + struct qtnf_wmac *mac; + struct qtnf_vif *vif; + int ret; + + if (!(bus->hw_info.mac_bitmap & BIT(macid))) { + pr_info("MAC%u is not active in FW\n", macid); + return 0; + } + + mac = qtnf_core_mac_alloc(bus, macid); + if (IS_ERR(mac)) { + pr_err("MAC%u allocation failed\n", macid); + return PTR_ERR(mac); + } + + ret = qtnf_cmd_get_mac_info(mac); + if (ret) { + pr_err("MAC%u: failed to get info\n", macid); + goto error; + } + + vif = qtnf_mac_get_base_vif(mac); + if (!vif) { + pr_err("MAC%u: primary VIF is not ready\n", macid); + ret = -EFAULT; + goto error; + } + + ret = qtnf_cmd_send_add_intf(vif, NL80211_IFTYPE_AP, vif->mac_addr); + if (ret) { + pr_err("MAC%u: failed to add VIF\n", macid); + goto error; + } + + ret = qtnf_cmd_send_get_phy_params(mac); + if (ret) { + pr_err("MAC%u: failed to get PHY settings\n", macid); + goto error; + } + + ret = qtnf_mac_init_bands(mac); + if (ret) { + pr_err("MAC%u: failed to init bands\n", macid); + goto error; + } + + ret = qtnf_wiphy_register(&bus->hw_info, mac); + if (ret) { + pr_err("MAC%u: wiphy registration failed\n", macid); + goto error; + } + + mac->wiphy_registered = 1; + + rtnl_lock(); + + ret = qtnf_core_net_attach(mac, vif, "wlan%d", NET_NAME_ENUM, + NL80211_IFTYPE_AP); + rtnl_unlock(); + + if (ret) { + pr_err("MAC%u: failed to attach netdev\n", macid); + vif->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; + vif->netdev = NULL; + goto error; + } + + pr_debug("MAC%u initialized\n", macid); + + return 0; + +error: + qtnf_core_mac_detach(bus, macid); + return ret; +} + +int qtnf_core_attach(struct qtnf_bus *bus) +{ + unsigned int i; + int ret; + + qtnf_trans_init(bus); + + bus->fw_state = QTNF_FW_STATE_BOOT_DONE; + qtnf_bus_data_rx_start(bus); + + bus->workqueue = alloc_ordered_workqueue("QTNF_BUS", 0); + if (!bus->workqueue) { + pr_err("failed to alloc main workqueue\n"); + ret = -ENOMEM; + goto error; + } + + INIT_WORK(&bus->event_work, qtnf_event_work_handler); + + ret = qtnf_cmd_send_init_fw(bus); + if (ret) { + pr_err("failed to init FW: %d\n", ret); + goto error; + } + + bus->fw_state = QTNF_FW_STATE_ACTIVE; + + ret = qtnf_cmd_get_hw_info(bus); + if (ret) { + pr_err("failed to get HW info: %d\n", ret); + goto error; + } + + if (bus->hw_info.ql_proto_ver != QLINK_PROTO_VER) { + pr_err("qlink version mismatch %u != %u\n", + QLINK_PROTO_VER, bus->hw_info.ql_proto_ver); + ret = -EPROTONOSUPPORT; + goto error; + } + + if (bus->hw_info.num_mac > QTNF_MAX_MAC) { + pr_err("no support for number of MACs=%u\n", + bus->hw_info.num_mac); + ret = -ERANGE; + goto error; + } + + for (i = 0; i < bus->hw_info.num_mac; i++) { + ret = qtnf_core_mac_attach(bus, i); + + if (ret) { + pr_err("MAC%u: attach failed: %d\n", i, ret); + goto error; + } + } + + return 0; + +error: + qtnf_core_detach(bus); + + return ret; +} +EXPORT_SYMBOL_GPL(qtnf_core_attach); + +void qtnf_core_detach(struct qtnf_bus *bus) +{ + unsigned int macid; + + qtnf_bus_data_rx_stop(bus); + + for (macid = 0; macid < QTNF_MAX_MAC; macid++) + qtnf_core_mac_detach(bus, macid); + + if (bus->fw_state == QTNF_FW_STATE_ACTIVE) + qtnf_cmd_send_deinit_fw(bus); + + bus->fw_state = QTNF_FW_STATE_DEAD; + + if (bus->workqueue) { + flush_workqueue(bus->workqueue); + destroy_workqueue(bus->workqueue); + } + + qtnf_trans_free(bus); +} +EXPORT_SYMBOL_GPL(qtnf_core_detach); + +static inline int qtnf_is_frame_meta_magic_valid(struct qtnf_frame_meta_info *m) +{ + return m->magic_s == 0xAB && m->magic_e == 0xBA; +} + +struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb) +{ + struct qtnf_frame_meta_info *meta; + struct net_device *ndev = NULL; + struct qtnf_wmac *mac; + struct qtnf_vif *vif; + + meta = (struct qtnf_frame_meta_info *) + (skb_tail_pointer(skb) - sizeof(*meta)); + + if (unlikely(!qtnf_is_frame_meta_magic_valid(meta))) { + pr_err_ratelimited("invalid magic 0x%x:0x%x\n", + meta->magic_s, meta->magic_e); + goto out; + } + + if (unlikely(meta->macid >= QTNF_MAX_MAC)) { + pr_err_ratelimited("invalid mac(%u)\n", meta->macid); + goto out; + } + + if (unlikely(meta->ifidx >= QTNF_MAX_INTF)) { + pr_err_ratelimited("invalid vif(%u)\n", meta->ifidx); + goto out; + } + + mac = bus->mac[meta->macid]; + + if (unlikely(!mac)) { + pr_err_ratelimited("mac(%d) does not exist\n", meta->macid); + goto out; + } + + vif = &mac->iflist[meta->ifidx]; + + if (unlikely(vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)) { + pr_err_ratelimited("vif(%u) does not exists\n", meta->ifidx); + goto out; + } + + ndev = vif->netdev; + + if (unlikely(!ndev)) { + pr_err_ratelimited("netdev for wlan%u.%u does not exists\n", + meta->macid, meta->ifidx); + goto out; + } + + __skb_trim(skb, skb->len - sizeof(*meta)); + +out: + return ndev; +} +EXPORT_SYMBOL_GPL(qtnf_classify_skb); + +MODULE_AUTHOR("Quantenna Communications"); +MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver."); +MODULE_LICENSE("GPL"); diff --combined drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 9a92b5150218,0aae094ab91c..397094b8bad6 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@@ -806,7 -806,7 +806,7 @@@ static void do_act_establish(struct cxg
cxgbi_sock_get(csk); csk->tid = tid; - cxgb4_insert_tid(lldi->tids, csk, tid); + cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family); cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
free_atid(csk); @@@ -956,8 -956,7 +956,8 @@@ static void do_act_open_rpl(struct cxgb if (status && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS) - cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); + cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl), + csk->csk_family);
cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); @@@ -1591,13 -1590,11 +1591,12 @@@ static void release_offload_resources(s free_atid(csk); else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { lldi = cxgbi_cdev_priv(csk->cdev); - cxgb4_remove_tid(lldi->tids, 0, csk->tid); + cxgb4_remove_tid(lldi->tids, 0, csk->tid, + csk->csk_family); cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); cxgbi_sock_put(csk); } csk->dst = NULL; - csk->cdev = NULL; }
static int init_act_open(struct cxgbi_sock *csk) diff --combined include/linux/netdevice.h index 524c7776ce96,4ed952c17fc7..ad98a83f1332 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@@ -914,8 -914,7 +914,7 @@@ struct xfrmdev_ops * * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); * Called when a user wants to change the Maximum Transfer Unit - * of a device. If not defined, any request to change MTU will - * will return an error. + * of a device. * * void (*ndo_tx_timeout)(struct net_device *dev); * Callback used when the transmitter has not made any progress @@@ -972,7 -971,7 +971,7 @@@ * with PF and querying it may introduce a theoretical security risk. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, + * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u32 chain_index, * __be16 protocol, struct tc_to_netdev *tc); * Called to setup any 'tc' scheduler, classifier or action on @dev. * This is always called from the stack with the rtnl lock held and netif @@@ -1222,7 -1221,7 +1221,7 @@@ struct net_device_ops struct net_device *dev, int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, - u32 handle, + u32 handle, u32 chain_index, __be16 protocol, struct tc_to_netdev *tc); #if IS_ENABLED(CONFIG_FCOE) @@@ -1596,8 -1595,8 +1595,8 @@@ enum netdev_priv_flags * @rtnl_link_state: This enum represents the phases of creating * a new link * - * @destructor: Called from unregister, - * can be used to call free_netdev + * @needs_free_netdev: Should unregister perform free_netdev? + * @priv_destructor: Called from unregister * @npinfo: XXX: need comments on this one * @nd_net: Network namespace this network device is inside * @@@ -1824,7 -1823,7 +1823,7 @@@ struct net_device #ifdef CONFIG_NET_SCHED DECLARE_HASHTABLE (qdisc_hash, 4); #endif - unsigned long tx_queue_len; + unsigned int tx_queue_len; spinlock_t tx_global_lock; int watchdog_timeo;
@@@ -1858,7 -1857,8 +1857,8 @@@ RTNL_LINK_INITIALIZING, } rtnl_link_state:16;
- void (*destructor)(struct net_device *dev); + bool needs_free_netdev; + void (*priv_destructor)(struct net_device *dev);
#ifdef CONFIG_NETPOLL struct netpoll_info __rcu *npinfo; @@@ -2456,7 -2456,6 +2456,7 @@@ static inline int dev_recursion_level(v struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); +struct net_device *dev_get_by_napi_id(unsigned int napi_id); int netdev_get_name(struct net *net, char *name, int ifindex); int dev_restart(struct net_device *dev); int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); @@@ -2574,7 -2573,9 +2574,7 @@@ static inline void skb_gro_incr_csum_un if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ __ret = __skb_gro_checksum_validate_complete(skb, \ compute_pseudo(skb, proto)); \ - if (__ret) \ - __skb_mark_checksum_bad(skb); \ - else \ + if (!__ret) \ skb_gro_incr_csum_unnecessary(skb); \ __ret; \ }) @@@ -3930,10 -3931,6 +3930,10 @@@ void netdev_rss_key_fill(void *buffer,
int dev_get_nest_level(struct net_device *dev); int skb_checksum_help(struct sk_buff *skb); +int skb_crc32c_csum_help(struct sk_buff *skb); +int skb_csum_hwoffload_help(struct sk_buff *skb, + const netdev_features_t features); + struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, @@@ -4264,6 -4261,11 +4264,11 @@@ static inline const char *netdev_name(c return dev->name; }
+ static inline bool netdev_unregistering(const struct net_device *dev) + { + return dev->reg_state == NETREG_UNREGISTERING; + } + static inline const char *netdev_reg_state(const struct net_device *dev) { switch (dev->reg_state) { diff --combined kernel/events/core.c index 51e40e4876c0,6c4e523dc1e2..9afab55c68c2 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@@ -3636,10 -3636,10 +3636,10 @@@ static inline u64 perf_event_count(stru * will not be local and we cannot read them atomically * - must not have a pmu::count method */ -u64 perf_event_read_local(struct perf_event *event) +int perf_event_read_local(struct perf_event *event, u64 *value) { unsigned long flags; - u64 val; + int ret = 0;
/* * Disabling interrupts avoids all counter scheduling (context @@@ -3647,37 -3647,25 +3647,37 @@@ */ local_irq_save(flags);
- /* If this is a per-task event, it must be for current */ - WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && - event->hw.target != current); - - /* If this is a per-CPU event, it must be for this CPU */ - WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && - event->cpu != smp_processor_id()); - /* * It must not be an event with inherit set, we cannot read * all child counters from atomic context. */ - WARN_ON_ONCE(event->attr.inherit); + if (event->attr.inherit) { + ret = -EOPNOTSUPP; + goto out; + }
/* * It must not have a pmu::count method, those are not * NMI safe. */ - WARN_ON_ONCE(event->pmu->count); + if (event->pmu->count) { + ret = -EOPNOTSUPP; + goto out; + } + + /* If this is a per-task event, it must be for current */ + if ((event->attach_state & PERF_ATTACH_TASK) && + event->hw.target != current) { + ret = -EINVAL; + goto out; + } + + /* If this is a per-CPU event, it must be for this CPU */ + if (!(event->attach_state & PERF_ATTACH_TASK) && + event->cpu != smp_processor_id()) { + ret = -EINVAL; + goto out; + }
/* * If the event is currently on this CPU, its either a per-task event, @@@ -3687,11 -3675,10 +3687,11 @@@ if (event->oncpu == smp_processor_id()) event->pmu->read(event);
- val = local64_read(&event->count); + *value = local64_read(&event->count); +out: local_irq_restore(flags);
- return val; + return ret; }
static int perf_event_read(struct perf_event *event, bool group) @@@ -7329,6 -7316,21 +7329,21 @@@ int perf_event_account_interrupt(struc return __perf_event_account_interrupt(event, 1); }
+ static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) + { + /* + * Due to interrupt latency (AKA "skid"), we may enter the + * kernel before taking an overflow, even if the PMU is only + * counting user events. + * To avoid leaking information to userspace, we must always + * reject kernel samples when exclude_kernel is set. + */ + if (event->attr.exclude_kernel && !user_mode(regs)) + return false; + + return true; + } + /* * Generic event overflow handling, sampling. */ @@@ -7350,6 -7352,12 +7365,12 @@@ static int __perf_event_overflow(struc ret = __perf_event_account_interrupt(event, throttle);
/* + * For security, drop the skid kernel samples if necessary. + */ + if (!sample_is_allowed(event, regs)) + return ret; + + /* * XXX event_limit might not quite work as expected on inherited * events */ @@@ -8050,8 -8058,12 +8071,8 @@@ static int perf_event_set_bpf_prog(stru bool is_kprobe, is_tracepoint; struct bpf_prog *prog;
- if (event->attr.type == PERF_TYPE_HARDWARE || - event->attr.type == PERF_TYPE_SOFTWARE) - return perf_event_set_bpf_handler(event, prog_fd); - if (event->attr.type != PERF_TYPE_TRACEPOINT) - return -EINVAL; + return perf_event_set_bpf_handler(event, prog_fd);
if (event->tp_event->prog) return -EEXIST; diff --combined net/8021q/vlan_dev.c index 56d4b6977d03,abc5f400fc71..c1742322f7d2 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@@ -797,6 -797,12 +797,6 @@@ static const struct net_device_ops vlan .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, #endif .ndo_fix_features = vlan_dev_fix_features, - .ndo_fdb_add = switchdev_port_fdb_add, - .ndo_fdb_del = switchdev_port_fdb_del, - .ndo_fdb_dump = switchdev_port_fdb_dump, - .ndo_bridge_setlink = switchdev_port_bridge_setlink, - .ndo_bridge_getlink = switchdev_port_bridge_getlink, - .ndo_bridge_dellink = switchdev_port_bridge_dellink, .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, .ndo_get_iflink = vlan_dev_get_iflink, }; @@@ -807,7 -813,6 +807,6 @@@ static void vlan_dev_free(struct net_de
free_percpu(vlan->vlan_pcpu_stats); vlan->vlan_pcpu_stats = NULL; - free_netdev(dev); }
void vlan_setup(struct net_device *dev) @@@ -820,7 -825,8 +819,8 @@@ netif_keep_dst(dev);
dev->netdev_ops = &vlan_netdev_ops; - dev->destructor = vlan_dev_free; + dev->needs_free_netdev = true; + dev->priv_destructor = vlan_dev_free; dev->ethtool_ops = &vlan_ethtool_ops;
dev->min_mtu = 0; diff --combined net/batman-adv/distributed-arp-table.c index 362cae2ef82a,000ca2f113ab..6930d6b50f99 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@@ -601,7 -601,7 +601,7 @@@ batadv_dat_select_candidates(struct bat BATADV_DAT_ADDR_MAX);
batadv_dbg(BATADV_DBG_DAT, bat_priv, - "dat_select_candidates(): IP=%pI4 hash(IP)=%u\n", &ip_dst, + "%s(): IP=%pI4 hash(IP)=%u\n", __func__, &ip_dst, ip_key);
for (select = 0; select < BATADV_DAT_CANDIDATES_NUM; select++) @@@ -1064,8 -1064,9 +1064,9 @@@ bool batadv_dat_snoop_outgoing_arp_requ
skb_new->protocol = eth_type_trans(skb_new, soft_iface);
- soft_iface->stats.rx_packets++; - soft_iface->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size; + batadv_inc_counter(bat_priv, BATADV_CNT_RX); + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, + skb->len + ETH_HLEN + hdr_size);
netif_rx(skb_new); batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); diff --combined net/batman-adv/routing.c index 1338b9221613,ae9f4d37d34f..f10e3ff26f9d --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@@ -985,9 -985,9 +985,9 @@@ int batadv_recv_unicast_packet(struct s batadv_orig_node_put(orig_node_gw); if (is_gw) { batadv_dbg(BATADV_DBG_BLA, bat_priv, - "recv_unicast_packet(): Dropped unicast pkt received from another backbone gw %pM.\n", - orig_addr_gw); + "%s(): Dropped unicast pkt received from another backbone gw %pM.\n", + __func__, orig_addr_gw); - return NET_RX_DROP; + goto free_skb; } }
diff --combined net/caif/caif_socket.c index 4674d17e7c08,21f18ea2fce4..7506b853a84d --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@@ -754,6 -754,10 +754,10 @@@ static int caif_connect(struct socket *
lock_sock(sk);
+ err = -EINVAL; + if (addr_len < offsetofend(struct sockaddr, sa_family)) + goto out; + err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) goto out; @@@ -1099,7 -1103,7 +1103,7 @@@ static int caif_create(struct net *net }
-static struct net_proto_family caif_family_ops = { +static const struct net_proto_family caif_family_ops = { .family = PF_CAIF, .create = caif_create, .owner = THIS_MODULE, diff --combined net/core/dev.c index 8f72f4a9c6ac,6d60149287a1..8658074ecad6 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -105,7 -105,6 +105,7 @@@ #include <net/dst.h> #include <net/dst_metadata.h> #include <net/pkt_sched.h> +#include <net/pkt_cls.h> #include <net/checksum.h> #include <net/xfrm.h> #include <linux/highmem.h> @@@ -143,7 -142,6 +143,7 @@@ #include <linux/hrtimer.h> #include <linux/netfilter_ingress.h> #include <linux/crash_dump.h> +#include <linux/sctp.h>
#include "net-sysfs.h"
@@@ -163,7 -161,6 +163,7 @@@ static int netif_rx_internal(struct sk_ static int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, struct netdev_notifier_info *info); +static struct napi_struct *napi_by_id(unsigned int napi_id);
/* * The @dev_base_head list is protected by @dev_base_lock and the rtnl @@@ -868,31 -865,6 +868,31 @@@ struct net_device *dev_get_by_index(str EXPORT_SYMBOL(dev_get_by_index);
/** + * dev_get_by_napi_id - find a device by napi_id + * @napi_id: ID of the NAPI struct + * + * Search for an interface by NAPI ID. Returns %NULL if the device + * is not found or a pointer to the device. The device has not had + * its reference counter increased so the caller must be careful + * about locking. The caller must hold RCU lock. + */ + +struct net_device *dev_get_by_napi_id(unsigned int napi_id) +{ + struct napi_struct *napi; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (napi_id < MIN_NAPI_ID) + return NULL; + + napi = napi_by_id(napi_id); + + return napi ? napi->dev : NULL; +} +EXPORT_SYMBOL(dev_get_by_napi_id); + +/** * netdev_get_name - get a netdevice name, knowing its ifindex. * @net: network namespace * @name: a pointer to the buffer where the name will be stored. @@@ -1281,8 -1253,9 +1281,9 @@@ int dev_set_alias(struct net_device *de if (!new_ifalias) return -ENOMEM; dev->ifalias = new_ifalias; + memcpy(dev->ifalias, alias, len); + dev->ifalias[len] = 0;
- strlcpy(dev->ifalias, alias, len+1); return len; }
@@@ -2639,47 -2612,6 +2640,47 @@@ out } EXPORT_SYMBOL(skb_checksum_help);
+int skb_crc32c_csum_help(struct sk_buff *skb) +{ + __le32 crc32c_csum; + int ret = 0, offset, start; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto out; + + if (unlikely(skb_is_gso(skb))) + goto out; + + /* Before computing a checksum, we should make sure no frag could + * be modified by an external entity : checksum could be wrong. + */ + if (unlikely(skb_has_shared_frag(skb))) { + ret = __skb_linearize(skb); + if (ret) + goto out; + } + start = skb_checksum_start_offset(skb); + offset = start + offsetof(struct sctphdr, checksum); + if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { + ret = -EINVAL; + goto out; + } + if (skb_cloned(skb) && + !skb_clone_writable(skb, offset + sizeof(__le32))) { + ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (ret) + goto out; + } + crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start, + skb->len - start, ~(__u32)0, + crc32c_csum_stub)); + *(__le32 *)(skb->data + offset) = crc32c_csum; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_not_inet = 0; +out: + return ret; +} + __be16 skb_network_protocol(struct sk_buff *skb, int *depth) { __be16 type = skb->protocol; @@@ -3022,17 -2954,6 +3023,17 @@@ static struct sk_buff *validate_xmit_vl return skb; }
+int skb_csum_hwoffload_help(struct sk_buff *skb, + const netdev_features_t features) +{ + if (unlikely(skb->csum_not_inet)) + return !!(features & NETIF_F_SCTP_CRC) ? 0 : + skb_crc32c_csum_help(skb); + + return !!(features & NETIF_F_CSUM_MASK) ? 0 : skb_checksum_help(skb); +} +EXPORT_SYMBOL(skb_csum_hwoffload_help); + static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) { netdev_features_t features; @@@ -3071,7 -2992,8 +3072,7 @@@ else skb_set_transport_header(skb, skb_checksum_start_offset(skb)); - if (!(features & NETIF_F_CSUM_MASK) && - skb_checksum_help(skb)) + if (skb_csum_hwoffload_help(skb, features)) goto out_kfree_skb; } } @@@ -3257,7 -3179,7 +3258,7 @@@ sch_handle_egress(struct sk_buff *skb, /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */ qdisc_bstats_cpu_update(cl->q, skb);
- switch (tc_classify(skb, cl, &cl_res, false)) { + switch (tcf_classify(skb, cl, &cl_res, false)) { case TC_ACT_OK: case TC_ACT_RECLASSIFY: skb->tc_index = TC_H_MIN(cl_res.classid); @@@ -3269,7 -3191,6 +3270,7 @@@ return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: *ret = NET_XMIT_SUCCESS; consume_skb(skb); return NULL; @@@ -4028,7 -3949,7 +4029,7 @@@ sch_handle_ingress(struct sk_buff *skb skb->tc_at_ingress = 1; qdisc_bstats_cpu_update(cl->q, skb);
- switch (tc_classify(skb, cl, &cl_res, false)) { + switch (tcf_classify(skb, cl, &cl_res, false)) { case TC_ACT_OK: case TC_ACT_RECLASSIFY: skb->tc_index = TC_H_MIN(cl_res.classid); @@@ -4039,7 -3960,6 +4040,7 @@@ return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: + case TC_ACT_TRAP: consume_skb(skb); return NULL; case TC_ACT_REDIRECT: @@@ -4717,6 -4637,9 +4718,6 @@@ static enum gro_result dev_gro_receive( if (netif_elide_gro(skb->dev)) goto normal;
- if (skb->csum_bad) - goto normal; - gro_list_prepare(napi, skb);
rcu_read_lock(); @@@ -5026,6 -4949,19 +5027,19 @@@ __sum16 __skb_gro_checksum_complete(str } EXPORT_SYMBOL(__skb_gro_checksum_complete);
+ static void net_rps_send_ipi(struct softnet_data *remsd) + { + #ifdef CONFIG_RPS + while (remsd) { + struct softnet_data *next = remsd->rps_ipi_next; + + if (cpu_online(remsd->cpu)) + smp_call_function_single_async(remsd->cpu, &remsd->csd); + remsd = next; + } + #endif + } + /* * net_rps_action_and_irq_enable sends any pending IPI's for rps. * Note: called with local irq disabled, but exits with local irq enabled. @@@ -5041,14 -4977,7 +5055,7 @@@ static void net_rps_action_and_irq_enab local_irq_enable();
/* Send pending IPI's to kick RPS processing on remote cpus. */ - while (remsd) { - struct softnet_data *next = remsd->rps_ipi_next; - - if (cpu_online(remsd->cpu)) - smp_call_function_single_async(remsd->cpu, - &remsd->csd); - remsd = next; - } + net_rps_send_ipi(remsd); } else #endif local_irq_enable(); @@@ -7086,7 -7015,7 +7093,7 @@@ static void rollback_registered_many(st
if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) - skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, + skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0, GFP_KERNEL);
/* @@@ -7579,6 -7508,8 +7586,8 @@@ out err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); + if (dev->priv_destructor) + dev->priv_destructor(dev); goto out; } EXPORT_SYMBOL(register_netdevice); @@@ -7786,8 -7717,10 +7795,10 @@@ void netdev_run_todo(void WARN_ON(rcu_access_pointer(dev->ip6_ptr)); WARN_ON(dev->dn_ptr);
- if (dev->destructor) - dev->destructor(dev); + if (dev->priv_destructor) + dev->priv_destructor(dev); + if (dev->needs_free_netdev) + free_netdev(dev);
/* Report a network device has been unregistered */ rtnl_lock(); @@@ -8270,7 -8203,7 +8281,7 @@@ static int dev_cpu_dead(unsigned int ol struct sk_buff **list_skb; struct sk_buff *skb; unsigned int cpu; - struct softnet_data *sd, *oldsd; + struct softnet_data *sd, *oldsd, *remsd = NULL;
local_irq_disable(); cpu = smp_processor_id(); @@@ -8311,6 -8244,13 +8322,13 @@@ raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable();
+ #ifdef CONFIG_RPS + remsd = oldsd->rps_ipi_list; + oldsd->rps_ipi_list = NULL; + #endif + /* send out pending IPI's on offline CPU */ + net_rps_send_ipi(remsd); + /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { netif_rx_ni(skb); diff --combined net/core/rtnetlink.c index 7084f1db2446,5e61456f6bc7..2769ad9834d1 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@@ -941,7 -941,6 +941,7 @@@ static noinline size_t if_nlmsg_size(co + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */ + nla_total_size(IFNAMSIZ) /* IFLA_PHYS_PORT_NAME */ + rtnl_xdp_size() /* IFLA_XDP */ + + nla_total_size(4) /* IFLA_EVENT */ + nla_total_size(1); /* IFLA_PROTO_DOWN */
} @@@ -1125,6 -1124,8 +1125,8 @@@ static noinline_for_stack int rtnl_fill struct ifla_vf_mac vf_mac; struct ifla_vf_info ivi;
+ memset(&ivi, 0, sizeof(ivi)); + /* Not all SR-IOV capable drivers support the * spoofcheck and "RSS query enable" query. Preset to * -1 so the user space tool can detect that the driver @@@ -1133,7 -1134,6 +1135,6 @@@ ivi.spoofchk = -1; ivi.rss_query_en = -1; ivi.trusted = -1; - memset(ivi.mac, 0, sizeof(ivi.mac)); /* The default value for VF link state is "auto" * IFLA_VF_LINK_STATE_AUTO which equals zero */ @@@ -1283,40 -1283,9 +1284,40 @@@ err_cancel return err; }
+static u32 rtnl_get_event(unsigned long event) +{ + u32 rtnl_event_type = IFLA_EVENT_NONE; + + switch (event) { + case NETDEV_REBOOT: + rtnl_event_type = IFLA_EVENT_REBOOT; + break; + case NETDEV_FEAT_CHANGE: + rtnl_event_type = IFLA_EVENT_FEATURES; + break; + case NETDEV_BONDING_FAILOVER: + rtnl_event_type = IFLA_EVENT_BONDING_FAILOVER; + break; + case NETDEV_NOTIFY_PEERS: + rtnl_event_type = IFLA_EVENT_NOTIFY_PEERS; + break; + case NETDEV_RESEND_IGMP: + rtnl_event_type = IFLA_EVENT_IGMP_RESEND; + break; + case NETDEV_CHANGEINFODATA: + rtnl_event_type = IFLA_EVENT_BONDING_OPTIONS; + break; + default: + break; + } + + return rtnl_event_type; +} + static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, - unsigned int flags, u32 ext_filter_mask) + unsigned int flags, u32 ext_filter_mask, + u32 event) { struct ifinfomsg *ifm; struct nlmsghdr *nlh; @@@ -1365,11 -1334,6 +1366,11 @@@ nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) goto nla_put_failure;
+ if (event != IFLA_EVENT_NONE) { + if (nla_put_u32(skb, IFLA_EVENT, event)) + goto nla_put_failure; + } + if (rtnl_fill_link_ifmap(skb, dev)) goto nla_put_failure;
@@@ -1504,7 -1468,6 +1505,7 @@@ static const struct nla_policy ifla_pol [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, [IFLA_XDP] = { .type = NLA_NESTED }, + [IFLA_EVENT] = { .type = NLA_U32 }, };
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@@ -1664,7 -1627,7 +1665,7 @@@ static int rtnl_dump_ifinfo(struct sk_b NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 0, flags, - ext_filter_mask); + ext_filter_mask, 0);
if (err < 0) { if (likely(skb->len)) @@@ -2086,8 -2049,8 +2087,8 @@@ static int do_setlink(const struct sk_b }
if (tb[IFLA_TXQLEN]) { - unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]); - unsigned long orig_len = dev->tx_queue_len; + unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); + unsigned int orig_len = dev->tx_queue_len;
if (dev->tx_queue_len ^ value) { dev->tx_queue_len = value; @@@ -2774,7 -2737,7 +2775,7 @@@ static int rtnl_getlink(struct sk_buff return -ENOBUFS;
err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid, - nlh->nlmsg_seq, 0, 0, ext_filter_mask); + nlh->nlmsg_seq, 0, 0, ext_filter_mask, 0); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size */ WARN_ON(err == -EMSGSIZE); @@@ -2846,8 -2809,7 +2847,8 @@@ static int rtnl_dump_all(struct sk_buf }
struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, - unsigned int change, gfp_t flags) + unsigned int change, + u32 event, gfp_t flags) { struct net *net = dev_net(dev); struct sk_buff *skb; @@@ -2858,7 -2820,7 +2859,7 @@@ if (skb == NULL) goto errout;
- err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0); + err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0, event); if (err < 0) { /* -EMSGSIZE implies BUG in if_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@@ -2879,25 -2841,18 +2880,25 @@@ void rtmsg_ifinfo_send(struct sk_buff * rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, flags); }
-void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, - gfp_t flags) +static void rtmsg_ifinfo_event(int type, struct net_device *dev, + unsigned int change, u32 event, + gfp_t flags) { struct sk_buff *skb;
if (dev->reg_state != NETREG_REGISTERED) return;
- skb = rtmsg_ifinfo_build_skb(type, dev, change, flags); + skb = rtmsg_ifinfo_build_skb(type, dev, change, event, flags); if (skb) rtmsg_ifinfo_send(skb, dev, flags); } + +void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change, + gfp_t flags) +{ + rtmsg_ifinfo_event(type, dev, change, rtnl_get_event(0), flags); +} EXPORT_SYMBOL(rtmsg_ifinfo);
static int nlmsg_populate_fdb_fill(struct sk_buff *skb, @@@ -4214,8 -4169,7 +4215,8 @@@ static int rtnetlink_event(struct notif case NETDEV_NOTIFY_PEERS: case NETDEV_RESEND_IGMP: case NETDEV_CHANGEINFODATA: - rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); + rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), + GFP_KERNEL); break; default: break; diff --combined net/ipv4/icmp.c index 5610971bf859,9144fa7df2ad..c2be26b98b5f --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@@ -489,7 -489,7 +489,7 @@@ static struct rtable *icmp_route_lookup fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); - rt = __ip_route_output_key_hash(net, fl4, skb_in); + rt = ip_route_output_key_hash(net, fl4, skb_in); if (IS_ERR(rt)) return rt;
@@@ -657,8 -657,12 +657,12 @@@ void icmp_send(struct sk_buff *skb_in, /* Needed by both icmp_global_allow and icmp_xmit_lock */ local_bh_disable();
- /* Check global sysctl_icmp_msgs_per_sec ratelimit */ - if (!icmpv4_global_allow(net, type, code)) + /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless + * incoming dev is loopback. If outgoing dev change to not be + * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) + */ + if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && + !icmpv4_global_allow(net, type, code)) goto out_bh_enable;
sk = icmp_xmit_lock(net); diff --combined net/ipv4/ipmr.c index 9374b99c7c17,8ae425cad818..a1199895b8a6 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@@ -101,8 -101,8 +101,8 @@@ static struct mr_table *ipmr_new_table( static void ipmr_free_table(struct mr_table *mrt);
static void ip_mr_forward(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, struct mfc_cache *cache, - int local); + struct net_device *dev, struct sk_buff *skb, + struct mfc_cache *cache, int local); static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, @@@ -501,7 -501,7 +501,7 @@@ static void reg_vif_setup(struct net_de dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; dev->flags = IFF_NOARP; dev->netdev_ops = ®_vif_netdev_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; dev->features |= NETIF_F_NETNS_LOCAL; }
@@@ -988,7 -988,7 +988,7 @@@ static void ipmr_cache_resolve(struct n
rtnl_unicast(skb, net, NETLINK_CB(skb).portid); } else { - ip_mr_forward(net, mrt, skb, c, 0); + ip_mr_forward(net, mrt, skb->dev, skb, c, 0); } } } @@@ -1073,7 -1073,7 +1073,7 @@@ static int ipmr_cache_report(struct mr_
/* Queue a packet for resolution. It gets locked cache entry! */ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, - struct sk_buff *skb) + struct sk_buff *skb, struct net_device *dev) { const struct iphdr *iph = ip_hdr(skb); struct mfc_cache *c; @@@ -1130,6 -1130,10 +1130,10 @@@ kfree_skb(skb); err = -ENOBUFS; } else { + if (dev) { + skb->dev = dev; + skb->skb_iif = dev->ifindex; + } skb_queue_tail(&c->mfc_un.unres.unresolved, skb); err = 0; } @@@ -1828,10 -1832,10 +1832,10 @@@ static int ipmr_find_vif(struct mr_tabl
/* "local" means that we should preserve one skb (for local delivery) */ static void ip_mr_forward(struct net *net, struct mr_table *mrt, - struct sk_buff *skb, struct mfc_cache *cache, - int local) + struct net_device *dev, struct sk_buff *skb, + struct mfc_cache *cache, int local) { - int true_vifi = ipmr_find_vif(mrt, skb->dev); + int true_vifi = ipmr_find_vif(mrt, dev); int psend = -1; int vif, ct;
@@@ -1853,13 -1857,7 +1857,7 @@@ }
/* Wrong interface: drop packet and (maybe) send PIM assert. */ - if (mrt->vif_table[vif].dev != skb->dev) { - struct net_device *mdev; - - mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev); - if (mdev == skb->dev) - goto forward; - + if (mrt->vif_table[vif].dev != dev) { if (rt_is_output_route(skb_rtable(skb))) { /* It is our own packet, looped back. * Very complicated situation... @@@ -2053,7 -2051,7 +2051,7 @@@ int ip_mr_input(struct sk_buff *skb read_lock(&mrt_lock); vif = ipmr_find_vif(mrt, dev); if (vif >= 0) { - int err2 = ipmr_cache_unresolved(mrt, vif, skb); + int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev); read_unlock(&mrt_lock);
return err2; @@@ -2064,7 -2062,7 +2062,7 @@@ }
read_lock(&mrt_lock); - ip_mr_forward(net, mrt, skb, cache, local); + ip_mr_forward(net, mrt, dev, skb, cache, local); read_unlock(&mrt_lock);
if (local) @@@ -2238,7 -2236,7 +2236,7 @@@ int ipmr_get_route(struct net *net, str iph->saddr = saddr; iph->daddr = daddr; iph->version = 0; - err = ipmr_cache_unresolved(mrt, vif, skb2); + err = ipmr_cache_unresolved(mrt, vif, skb2, dev); read_unlock(&mrt_lock); rcu_read_unlock(); return err; @@@ -2528,129 -2526,6 +2526,129 @@@ static int ipmr_rtm_route(struct sk_buf return ipmr_mfc_delete(tbl, &mfcc, parent); }
+static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb) +{ + u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len); + + if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) || + nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) || + nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM, + mrt->mroute_reg_vif_num) || + nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT, + mrt->mroute_do_assert) || + nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim)) + return false; + + return true; +} + +static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb) +{ + struct nlattr *vif_nest; + struct vif_device *vif; + + /* if the VIF doesn't exist just continue */ + if (!VIF_EXISTS(mrt, vifid)) + return true; + + vif = &mrt->vif_table[vifid]; + vif_nest = nla_nest_start(skb, IPMRA_VIF); + if (!vif_nest) + return false; + if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) || + nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) || + nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) || + nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in, + IPMRA_VIFA_PAD) || + nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out, + IPMRA_VIFA_PAD) || + nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in, + IPMRA_VIFA_PAD) || + nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out, + IPMRA_VIFA_PAD) || + nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) || + nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) { + nla_nest_cancel(skb, vif_nest); + return false; + } + nla_nest_end(skb, vif_nest); + + return true; +} + +static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct nlmsghdr *nlh = NULL; + unsigned int t = 0, s_t; + unsigned int e = 0, s_e; + struct mr_table *mrt; + + s_t = cb->args[0]; + s_e = cb->args[1]; + + ipmr_for_each_table(mrt, net) { + struct nlattr *vifs, *af; + struct ifinfomsg *hdr; + u32 i; + + if (t < s_t) + goto skip_table; + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWLINK, + sizeof(*hdr), NLM_F_MULTI); + if (!nlh) + break; + + hdr = nlmsg_data(nlh); + memset(hdr, 0, sizeof(*hdr)); + hdr->ifi_family = RTNL_FAMILY_IPMR; + + af = nla_nest_start(skb, IFLA_AF_SPEC); + if (!af) { + nlmsg_cancel(skb, nlh); + goto out; + } + + if (!ipmr_fill_table(mrt, skb)) { + nlmsg_cancel(skb, nlh); + goto out; + } + + vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS); + if (!vifs) { + nla_nest_end(skb, af); + nlmsg_end(skb, nlh); + goto out; + } + for (i = 0; i < mrt->maxvif; i++) { + if (e < s_e) + goto skip_entry; + if (!ipmr_fill_vif(mrt, i, skb)) { + nla_nest_end(skb, vifs); + nla_nest_end(skb, af); + nlmsg_end(skb, nlh); + goto out; + } +skip_entry: + e++; + } + s_e = 0; + e = 0; + nla_nest_end(skb, vifs); + nla_nest_end(skb, af); + nlmsg_end(skb, nlh); +skip_table: + t++; + } + +out: + cb->args[1] = e; + cb->args[0] = t; + + return skb->len; +} + #ifdef CONFIG_PROC_FS /* The /proc interfaces to multicast routing : * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif @@@ -2993,9 -2868,6 +2991,9 @@@ int __init ip_mr_init(void ipmr_rtm_route, NULL, NULL); rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE, ipmr_rtm_route, NULL, NULL); + + rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK, + NULL, ipmr_rtm_dumplink, NULL); return 0;
#ifdef CONFIG_IP_PIMSM_V2 diff --combined net/ipv6/route.c index 9d9b5bbea153,7cebd954d5bb..18fe6e2b88d5 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -938,15 -938,14 +938,15 @@@ EXPORT_SYMBOL(rt6_lookup) */
static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, - struct mx6_config *mxc) + struct mx6_config *mxc, + struct netlink_ext_ack *extack) { int err; struct fib6_table *table;
table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); - err = fib6_add(&table->tb6_root, rt, info, mxc); + err = fib6_add(&table->tb6_root, rt, info, mxc, extack); write_unlock_bh(&table->tb6_lock);
return err; @@@ -957,7 -956,7 +957,7 @@@ int ip6_ins_rt(struct rt6_info *rt struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; struct mx6_config mxc = { .mx = NULL, };
- return __ip6_ins_rt(rt, &info, &mxc); + return __ip6_ins_rt(rt, &info, &mxc, NULL); }
static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, @@@ -1845,8 -1844,7 +1845,8 @@@ static struct rt6_info *ip6_nh_lookup_t return rt; }
-static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) +static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct net *net = cfg->fc_nlinfo.nl_net; struct rt6_info *rt = NULL; @@@ -1857,25 -1855,14 +1857,25 @@@ int err = -EINVAL;
/* RTF_PCPU is an internal flag; can not be set by userspace */ - if (cfg->fc_flags & RTF_PCPU) + if (cfg->fc_flags & RTF_PCPU) { + NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); goto out; + }
- if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) + if (cfg->fc_dst_len > 128) { + NL_SET_ERR_MSG(extack, "Invalid prefix length"); + goto out; + } + if (cfg->fc_src_len > 128) { + NL_SET_ERR_MSG(extack, "Invalid source address length"); goto out; + } #ifndef CONFIG_IPV6_SUBTREES - if (cfg->fc_src_len) + if (cfg->fc_src_len) { + NL_SET_ERR_MSG(extack, + "Specifying source address requires IPV6_SUBTREES to be enabled"); goto out; + } #endif if (cfg->fc_ifindex) { err = -ENODEV; @@@ -1939,7 -1926,7 +1939,7 @@@
err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET6, cfg, - &lwtstate); + &lwtstate, extack); if (err) goto out; rt->dst.lwtstate = lwtstate_get(lwtstate); @@@ -2026,10 -2013,9 +2026,10 @@@ err = -EINVAL; if (ipv6_chk_addr_and_flags(net, gw_addr, gwa_type & IPV6_ADDR_LINKLOCAL ? - dev : NULL, 0, 0)) + dev : NULL, 0, 0)) { + NL_SET_ERR_MSG(extack, "Invalid gateway address"); goto out; - + } rt->rt6i_gateway = *gw_addr;
if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { @@@ -2045,11 -2031,8 +2045,11 @@@ addressing */ if (!(gwa_type & (IPV6_ADDR_UNICAST | - IPV6_ADDR_MAPPED))) + IPV6_ADDR_MAPPED))) { + NL_SET_ERR_MSG(extack, + "Invalid gateway address"); goto out; + }
if (cfg->fc_table) { grt = ip6_nh_lookup_table(net, cfg, gw_addr); @@@ -2089,14 -2072,8 +2089,14 @@@ goto out; } err = -EINVAL; - if (!dev || (dev->flags & IFF_LOOPBACK)) + if (!dev) { + NL_SET_ERR_MSG(extack, "Egress device not specified"); + goto out; + } else if (dev->flags & IFF_LOOPBACK) { + NL_SET_ERR_MSG(extack, + "Egress device can not be loopback device for this route"); goto out; + } }
err = -ENODEV; @@@ -2105,7 -2082,6 +2105,7 @@@
if (!ipv6_addr_any(&cfg->fc_prefsrc)) { if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { + NL_SET_ERR_MSG(extack, "Invalid source address"); err = -EINVAL; goto out; } @@@ -2135,14 -2111,13 +2135,14 @@@ out return ERR_PTR(err); }
-int ip6_route_add(struct fib6_config *cfg) +int ip6_route_add(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct mx6_config mxc = { .mx = NULL, }; struct rt6_info *rt; int err;
- rt = ip6_route_info_create(cfg); + rt = ip6_route_info_create(cfg, extack); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; @@@ -2153,7 -2128,7 +2153,7 @@@ if (err) goto out;
- err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc); + err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
kfree(mxc.mx);
@@@ -2247,8 -2222,7 +2247,8 @@@ out_put return err; }
-static int ip6_route_del(struct fib6_config *cfg) +static int ip6_route_del(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct fib6_table *table; struct fib6_node *fn; @@@ -2256,10 -2230,8 +2256,10 @@@ int err = -ESRCH;
table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); - if (!table) + if (!table) { + NL_SET_ERR_MSG(extack, "FIB table does not exist"); return err; + }
read_lock_bh(&table->tb6_lock);
@@@ -2511,7 -2483,7 +2511,7 @@@ static struct rt6_info *rt6_add_route_i if (!prefixlen) cfg.fc_flags |= RTF_DEFAULT;
- ip6_route_add(&cfg); + ip6_route_add(&cfg, NULL);
return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); } @@@ -2557,7 -2529,7 +2557,7 @@@ struct rt6_info *rt6_add_dflt_router(co
cfg.fc_gateway = *gwaddr;
- if (!ip6_route_add(&cfg)) { + if (!ip6_route_add(&cfg, NULL)) { struct fib6_table *table;
table = fib6_get_table(dev_net(dev), cfg.fc_table); @@@ -2650,10 -2622,10 +2650,10 @@@ int ipv6_route_ioctl(struct net *net, u rtnl_lock(); switch (cmd) { case SIOCADDRT: - err = ip6_route_add(&cfg); + err = ip6_route_add(&cfg, NULL); break; case SIOCDELRT: - err = ip6_route_del(&cfg); + err = ip6_route_del(&cfg, NULL); break; default: err = -EINVAL; @@@ -2832,6 -2804,7 +2832,7 @@@ static int fib6_ifdown(struct rt6_info if ((rt->dst.dev == dev || !dev) && rt != adn->net->ipv6.ip6_null_entry && (rt->rt6i_nsiblings == 0 || + (dev && netdev_unregistering(dev)) || !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) return -1;
@@@ -2931,8 -2904,7 +2932,8 @@@ static const struct nla_policy rtm_ipv6 };
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, - struct fib6_config *cfg) + struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; @@@ -3016,7 -2988,7 +3017,7 @@@ cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, - cfg->fc_mp_len); + cfg->fc_mp_len, extack); if (err < 0) goto errout; } @@@ -3035,7 -3007,7 +3036,7 @@@ if (tb[RTA_ENCAP_TYPE]) { cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
- err = lwtunnel_valid_encap_type(cfg->fc_encap_type); + err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); if (err < 0) goto errout; } @@@ -3126,8 -3098,7 +3127,8 @@@ static void ip6_route_mpath_notify(stru inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); }
-static int ip6_route_multipath_add(struct fib6_config *cfg) +static int ip6_route_multipath_add(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct rt6_info *rt_notif = NULL, *rt_last = NULL; struct nl_info *info = &cfg->fc_nlinfo; @@@ -3175,7 -3146,7 +3176,7 @@@ r_cfg.fc_encap_type = nla_get_u16(nla); }
- rt = ip6_route_info_create(&r_cfg); + rt = ip6_route_info_create(&r_cfg, extack); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; @@@ -3200,7 -3171,7 +3201,7 @@@ err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { rt_last = nh->rt6_info; - err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc); + err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack); /* save reference to first route for notification */ if (!rt_notif && !err) rt_notif = nh->rt6_info; @@@ -3242,7 -3213,7 +3243,7 @@@ add_errout list_for_each_entry(nh, &rt6_nh_list, next) { if (err_nh == nh) break; - ip6_route_del(&nh->r_cfg); + ip6_route_del(&nh->r_cfg, extack); }
cleanup: @@@ -3257,8 -3228,7 +3258,8 @@@ return err; }
-static int ip6_route_multipath_del(struct fib6_config *cfg) +static int ip6_route_multipath_del(struct fib6_config *cfg, + struct netlink_ext_ack *extack) { struct fib6_config r_cfg; struct rtnexthop *rtnh; @@@ -3285,7 -3255,7 +3286,7 @@@ r_cfg.fc_flags |= RTF_GATEWAY; } } - err = ip6_route_del(&r_cfg); + err = ip6_route_del(&r_cfg, extack); if (err) last_err = err;
@@@ -3301,15 -3271,15 +3302,15 @@@ static int inet6_rtm_delroute(struct sk struct fib6_config cfg; int err;
- err = rtm_to_fib6_config(skb, nlh, &cfg); + err = rtm_to_fib6_config(skb, nlh, &cfg, extack); if (err < 0) return err;
if (cfg.fc_mp) - return ip6_route_multipath_del(&cfg); + return ip6_route_multipath_del(&cfg, extack); else { cfg.fc_delete_all_nh = 1; - return ip6_route_del(&cfg); + return ip6_route_del(&cfg, extack); } }
@@@ -3319,14 -3289,14 +3320,14 @@@ static int inet6_rtm_newroute(struct sk struct fib6_config cfg; int err;
- err = rtm_to_fib6_config(skb, nlh, &cfg); + err = rtm_to_fib6_config(skb, nlh, &cfg, extack); if (err < 0) return err;
if (cfg.fc_mp) - return ip6_route_multipath_add(&cfg); + return ip6_route_multipath_add(&cfg, extack); else - return ip6_route_add(&cfg); + return ip6_route_add(&cfg, extack); }
static size_t rt6_nlmsg_size(struct rt6_info *rt) @@@ -3607,13 -3577,11 +3608,13 @@@ static int inet6_rtm_getroute(struct sk { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[RTA_MAX+1]; + int err, iif = 0, oif = 0; + struct dst_entry *dst; struct rt6_info *rt; struct sk_buff *skb; struct rtmsg *rtm; struct flowi6 fl6; - int err, iif = 0, oif = 0; + bool fibmatch;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, extack); @@@ -3624,7 -3592,6 +3625,7 @@@ memset(&fl6, 0, sizeof(fl6)); rtm = nlmsg_data(nlh); fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0); + fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
if (tb[RTA_SRC]) { if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) @@@ -3670,23 -3637,12 +3671,23 @@@ if (!ipv6_addr_any(&fl6.saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR;
- rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6, - flags); + if (!fibmatch) + dst = ip6_route_input_lookup(net, dev, &fl6, flags); } else { fl6.flowi6_oif = oif;
- rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); + if (!fibmatch) + dst = ip6_route_output(net, NULL, &fl6); + } + + if (fibmatch) + dst = ip6_route_lookup(net, &fl6, 0); + + rt = container_of(dst, struct rt6_info, dst); + if (rt->dst.error) { + err = rt->dst.error; + ip6_rt_put(rt); + goto errout; }
if (rt == net->ipv6.ip6_null_entry) { @@@ -3703,14 -3659,10 +3704,14 @@@ }
skb_dst_set(skb, &rt->dst); - - err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, - RTM_NEWROUTE, NETLINK_CB(in_skb).portid, - nlh->nlmsg_seq, 0); + if (fibmatch) + err = rt6_fill_node(net, skb, rt, NULL, NULL, iif, + RTM_NEWROUTE, NETLINK_CB(in_skb).portid, + nlh->nlmsg_seq, 0); + else + err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, + RTM_NEWROUTE, NETLINK_CB(in_skb).portid, + nlh->nlmsg_seq, 0); if (err < 0) { kfree_skb(skb); goto errout; diff --combined net/mac80211/cfg.c index 6980a936a437,4a388fe8c2d1..f9eb2486d550 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@@ -902,6 -902,8 +902,8 @@@ static int ieee80211_start_ap(struct wi default: return -EINVAL; } + sdata->u.ap.req_smps = sdata->smps_mode; + sdata->needed_rx_chains = sdata->local->rx_chains;
sdata->vif.bss_conf.beacon_int = params->beacon_interval; @@@ -1874,7 -1876,6 +1876,7 @@@ static int copy_mesh_setup(struct ieee8 ifmsh->user_mpm = setup->user_mpm; ifmsh->mesh_auth_id = setup->auth_id; ifmsh->security = IEEE80211_MESH_SEC_NONE; + ifmsh->userspace_handles_dfs = setup->userspace_handles_dfs; if (setup->is_authenticated) ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; if (setup->is_secure) diff --combined net/mac80211/ieee80211_i.h index 392fbab73c04,5e002f62c235..2197c62a0a6e --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@@ -643,8 -643,6 +643,8 @@@ struct ieee80211_if_mesh unsigned long wrkq_flags; unsigned long mbss_changed;
+ bool userspace_handles_dfs; + u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; size_t mesh_id_len; /* Active Path Selection Protocol Identifier */ @@@ -1031,6 -1029,17 +1031,6 @@@ ieee80211_vif_get_shift(struct ieee8021 return shift; }
-struct ieee80211_rx_agg { - u8 addr[ETH_ALEN]; - u16 tid; -}; - -enum sdata_queue_type { - IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, - IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, - IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, -}; - enum { IEEE80211_RX_MSG = 1, IEEE80211_TX_STATUS_MSG = 2, @@@ -1423,7 -1432,6 +1423,7 @@@ struct ieee80211_csa_ie u8 count; u8 ttl; u16 pre_value; + u16 reason_code; };
/* Parsed Information Elements */ @@@ -1523,7 -1531,7 +1523,7 @@@ ieee80211_have_rx_timestamp(struct ieee return true; /* can't handle non-legacy preamble yet */ if (status->flag & RX_FLAG_MACTIME_PLCP_START && - status->encoding != RX_ENC_LEGACY) + status->encoding == RX_ENC_LEGACY) return true; return false; } @@@ -2049,8 -2057,6 +2049,8 @@@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, const struct cfg80211_chan_def *chandef, u16 prot_mode, bool rifs_mode); +void ieee80211_ie_build_wide_bw_cs(u8 *pos, + const struct cfg80211_chan_def *chandef); u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap); u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, diff --combined net/mac80211/iface.c index 6ac0a0198d19,f5f50150ba1c..9228ac73c429 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@@ -1213,7 -1213,6 +1213,6 @@@ static const struct net_device_ops ieee static void ieee80211_if_free(struct net_device *dev) { free_percpu(dev->tstats); - free_netdev(dev); }
static void ieee80211_if_setup(struct net_device *dev) @@@ -1221,7 -1220,8 +1220,8 @@@ ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; - dev->destructor = ieee80211_if_free; + dev->needs_free_netdev = true; + dev->priv_destructor = ieee80211_if_free; }
static void ieee80211_if_setup_no_queue(struct net_device *dev) @@@ -1237,6 -1237,7 +1237,6 @@@ static void ieee80211_iface_work(struc struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct sta_info *sta; - struct ieee80211_rx_agg *rx_agg;
if (!ieee80211_sdata_running(sdata)) return; @@@ -1251,8 -1252,28 +1251,8 @@@ while ((skb = skb_dequeue(&sdata->skb_queue))) { struct ieee80211_mgmt *mgmt = (void *)skb->data;
- if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) { - rx_agg = (void *)&skb->cb; - mutex_lock(&local->sta_mtx); - sta = sta_info_get_bss(sdata, rx_agg->addr); - if (sta) - __ieee80211_start_rx_ba_session(sta, - 0, 0, 0, 1, rx_agg->tid, - IEEE80211_MAX_AMPDU_BUF, - false, true); - mutex_unlock(&local->sta_mtx); - } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_STOP) { - rx_agg = (void *)&skb->cb; - mutex_lock(&local->sta_mtx); - sta = sta_info_get_bss(sdata, rx_agg->addr); - if (sta) - __ieee80211_stop_rx_ba_session(sta, - rx_agg->tid, - WLAN_BACK_RECIPIENT, 0, - false); - mutex_unlock(&local->sta_mtx); - } else if (ieee80211_is_action(mgmt->frame_control) && - mgmt->u.action.category == WLAN_CATEGORY_BACK) { + if (ieee80211_is_action(mgmt->frame_control) && + mgmt->u.action.category == WLAN_CATEGORY_BACK) { int len = skb->len;
mutex_lock(&local->sta_mtx); @@@ -1795,6 -1816,7 +1795,7 @@@ int ieee80211_if_add(struct ieee80211_l ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) { ieee80211_if_free(ndev); + free_netdev(ndev); return ret; }
@@@ -1884,7 -1906,7 +1885,7 @@@
ret = register_netdevice(ndev); if (ret) { - ieee80211_if_free(ndev); + free_netdev(ndev); return ret; } } diff --combined net/mac80211/mlme.c index 1ae9be090309,cc8e6ea1b27e..1929bce8e518 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@@ -601,7 -601,7 +601,7 @@@ static void ieee80211_send_assoc(struc struct ieee80211_supported_band *sband; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *chan; - u32 rate_flags, rates = 0; + u32 rates = 0;
sdata_assert_lock(sdata);
@@@ -612,7 -612,6 +612,6 @@@ return; } chan = chanctx_conf->def.chan; - rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); rcu_read_unlock(); sband = local->hw.wiphy->bands[chan->band]; shift = ieee80211_vif_get_shift(&sdata->vif); @@@ -636,9 -635,6 +635,6 @@@ */ rates_len = 0; for (i = 0; i < sband->n_bitrates; i++) { - if ((rate_flags & sband->bitrates[i].flags) - != rate_flags) - continue; rates |= BIT(i); rates_len++; } @@@ -1126,6 -1122,7 +1122,6 @@@ ieee80211_sta_process_chanswitch(struc return;
current_band = cbss->channel->band; - memset(&csa_ie, 0, sizeof(csa_ie)); res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, ifmgd->flags, ifmgd->associated->bssid, &csa_ie); @@@ -2817,7 -2814,7 +2813,7 @@@ static void ieee80211_get_rates(struct u32 *rates, u32 *basic_rates, bool *have_higher_than_11mbit, int *min_rate, int *min_rate_index, - int shift, u32 rate_flags) + int shift) { int i, j;
@@@ -2845,8 -2842,6 +2841,6 @@@ int brate;
br = &sband->bitrates[j]; - if ((rate_flags & br->flags) != rate_flags) - continue;
brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5); if (brate == rate) { @@@ -4397,40 -4392,32 +4391,32 @@@ static int ieee80211_prep_connection(st return -ENOMEM; }
- if (new_sta || override) { - err = ieee80211_prep_channel(sdata, cbss); - if (err) { - if (new_sta) - sta_info_free(local, new_sta); - return -EINVAL; - } - } - + /* + * Set up the information for the new channel before setting the + * new channel. We can't - completely race-free - change the basic + * rates bitmap and the channel (sband) that it refers to, but if + * we set it up before we at least avoid calling into the driver's + * bss_info_changed() method with invalid information (since we do + * call that from changing the channel - only for IDLE and perhaps + * some others, but ...). + * + * So to avoid that, just set up all the new information before the + * channel, but tell the driver to apply it only afterwards, since + * it might need the new channel for that. + */ if (new_sta) { u32 rates = 0, basic_rates = 0; bool have_higher_than_11mbit; int min_rate = INT_MAX, min_rate_index = -1; - struct ieee80211_chanctx_conf *chanctx_conf; const struct cfg80211_bss_ies *ies; int shift = ieee80211_vif_get_shift(&sdata->vif); - u32 rate_flags; - - rcu_read_lock(); - chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); - if (WARN_ON(!chanctx_conf)) { - rcu_read_unlock(); - sta_info_free(local, new_sta); - return -EINVAL; - } - rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); - rcu_read_unlock();
ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len, &rates, &basic_rates, &have_higher_than_11mbit, &min_rate, &min_rate_index, - shift, rate_flags); + shift);
/* * This used to be a workaround for basic rates missing @@@ -4488,8 -4475,22 +4474,22 @@@ sdata->vif.bss_conf.sync_dtim_count = 0; } rcu_read_unlock(); + }
- /* tell driver about BSSID, basic rates and timing */ + if (new_sta || override) { + err = ieee80211_prep_channel(sdata, cbss); + if (err) { + if (new_sta) + sta_info_free(local, new_sta); + return -EINVAL; + } + } + + if (new_sta) { + /* + * tell driver about BSSID, basic rates and timing + * this was set up above, before setting the channel + */ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT); diff --combined net/mac80211/rx.c index 8c7d932fd09b,3674fe3d67dc..004a2283c5d9 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@@ -237,6 -237,7 +237,6 @@@ static void ieee80211_handle_mu_mimo_mo if (!skb) return;
- skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; skb_queue_tail(&sdata->skb_queue, skb); ieee80211_queue_work(&sdata->local->hw, &sdata->work); } @@@ -1216,6 -1217,7 +1216,6 @@@ static void ieee80211_rx_reorder_ampdu( /* if this mpdu is fragmented - terminate rx aggregation session */ sc = le16_to_cpu(hdr->seq_ctrl); if (sc & IEEE80211_SCTL_FRAG) { - skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; skb_queue_tail(&rx->sdata->skb_queue, skb); ieee80211_queue_work(&local->hw, &rx->sdata->work); return; @@@ -1611,12 -1613,16 +1611,16 @@@ ieee80211_rx_h_sta_process(struct ieee8 */ if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && !ieee80211_has_morefrags(hdr->frame_control) && + !ieee80211_is_back_req(hdr->frame_control) && !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && - /* PM bit is only checked in frames where it isn't reserved, + /* + * PM bit is only checked in frames where it isn't reserved, * in AP mode it's reserved in non-bufferable management frames * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) + * BAR frames should be ignored as specified in + * IEEE 802.11-2012 10.2.1.2. */ (!ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { @@@ -3098,6 -3104,7 +3102,6 @@@ ieee80211_rx_h_action(struct ieee80211_ return RX_QUEUED;
queue: - rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; skb_queue_tail(&sdata->skb_queue, rx->skb); ieee80211_queue_work(&local->hw, &sdata->work); if (rx->sta) @@@ -3243,6 -3250,7 +3247,6 @@@ ieee80211_rx_h_mgmt(struct ieee80211_rx }
/* queue up frame and kick off work to process it */ - rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; skb_queue_tail(&sdata->skb_queue, rx->skb); ieee80211_queue_work(&rx->local->hw, &sdata->work); if (rx->sta) diff --combined net/sctp/socket.c index 32d5495e793c,30aa0a529215..039a93175adf --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@@ -103,7 -103,7 +103,7 @@@ static int sctp_autobind(struct sock *s static void sctp_sock_migrate(struct sock *, struct sock *, struct sctp_association *, sctp_socket_type_t);
-static int sctp_memory_pressure; +static unsigned long sctp_memory_pressure; static atomic_long_t sctp_memory_allocated; struct percpu_counter sctp_sockets_allocated;
@@@ -1494,7 -1494,7 +1494,7 @@@ static void sctp_close(struct sock *sk
pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
- lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); sk->sk_shutdown = SHUTDOWN_MASK; sk->sk_state = SCTP_SS_CLOSING;
@@@ -1544,7 -1544,7 +1544,7 @@@ * held and that should be grabbed before socket lock. */ spin_lock_bh(&net->sctp.addr_wq_lock); - bh_lock_sock(sk); + bh_lock_sock_nested(sk);
/* Hold the sock, since sk_common_release() will put sock_put() * and we have just a little more cleanup. @@@ -1920,7 -1920,7 +1920,7 @@@ static int sctp_sendmsg(struct sock *sk }
/* Check for invalid stream. */ - if (sinfo->sinfo_stream >= asoc->stream->outcnt) { + if (sinfo->sinfo_stream >= asoc->stream.outcnt) { err = -EINVAL; goto out_free; } @@@ -4497,8 -4497,8 +4497,8 @@@ int sctp_get_sctp_info(struct sock *sk info->sctpi_rwnd = asoc->a_rwnd; info->sctpi_unackdata = asoc->unack_data; info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); - info->sctpi_instrms = asoc->stream->incnt; - info->sctpi_outstrms = asoc->stream->outcnt; + info->sctpi_instrms = asoc->stream.incnt; + info->sctpi_outstrms = asoc->stream.outcnt; list_for_each(pos, &asoc->base.inqueue.in_chunk_list) info->sctpi_inqueue++; list_for_each(pos, &asoc->outqueue.out_chunk_list) @@@ -4622,13 -4622,13 +4622,13 @@@ int sctp_for_each_endpoint(int (*cb)(st
for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { - read_lock(&head->lock); + read_lock_bh(&head->lock); sctp_for_each_hentry(epb, &head->chain) { err = cb(sctp_ep(epb), p); if (err) break; } - read_unlock(&head->lock); + read_unlock_bh(&head->lock); }
return err; @@@ -4727,8 -4727,8 +4727,8 @@@ static int sctp_getsockopt_sctp_status( status.sstat_unackdata = asoc->unack_data;
status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); - status.sstat_instrms = asoc->stream->incnt; - status.sstat_outstrms = asoc->stream->outcnt; + status.sstat_instrms = asoc->stream.incnt; + status.sstat_outstrms = asoc->stream.outcnt; status.sstat_fragmentation_point = asoc->frag_point; status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, @@@ -6600,10 -6600,10 +6600,10 @@@ static int sctp_getsockopt_pr_streamsta goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); - if (!asoc || params.sprstat_sid >= asoc->stream->outcnt) + if (!asoc || params.sprstat_sid >= asoc->stream.outcnt) goto out;
- streamout = &asoc->stream->out[params.sprstat_sid]; + streamout = &asoc->stream.out[params.sprstat_sid]; if (policy == SCTP_PR_SCTP_NONE) { params.sprstat_abandoned_unsent = 0; params.sprstat_abandoned_sent = 0;
linux-merge@lists.open-mesh.org