[linux-next] LinuxNextTracking branch, master, updated. next-20180108

batman at open-mesh.org batman at open-mesh.org
Tue Jan 9 00:15:51 CET 2018


The following commit has been merged in the master branch:
commit 1995abc11f840a8158d996eb7a03a23dff4b50b9
Merge: be8a7f7c3cf5666c4d642ac0249067d5ffbc8062 d0adb51edb73c94a595bfa9d9bd8b35977e74fbf
Author: Stephen Rothwell <sfr at canb.auug.org.au>
Date:   Mon Jan 8 11:05:41 2018 +1100

    Merge remote-tracking branch 'net-next/master'

diff --combined MAINTAINERS
index 6809623840de,753799d24cd9..f8b0299bfe08
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -321,7 -321,7 +321,7 @@@ F:	drivers/acpi/apei
  
  ACPI COMPONENT ARCHITECTURE (ACPICA)
  M:	Robert Moore <robert.moore at intel.com>
 -M:	Lv Zheng <lv.zheng at intel.com>
 +M:	Erik Schmauss <erik.schmauss at intel.com>
  M:	"Rafael J. Wysocki" <rafael.j.wysocki at intel.com>
  L:	linux-acpi at vger.kernel.org
  L:	devel at acpica.org
@@@ -1255,12 -1255,6 +1255,12 @@@ L:	linux-arm-kernel at lists.infradead.or
  S:	Supported
  F:	drivers/net/ethernet/cavium/thunder/
  
 +ARM/CIRRUS LOGIC BK3 MACHINE SUPPORT
 +M:	Lukasz Majewski <lukma at denx.de>
 +L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
 +S:	Maintained
 +F:	arch/arm/mach-ep93xx/ts72xx.c
 +
  ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
  M:	Alexander Shiyan <shc_work at mail.ru>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
@@@ -1589,7 -1583,6 +1589,7 @@@ F:	arch/arm/boot/dts/kirkwood
  F:	arch/arm/configs/mvebu_*_defconfig
  F:	arch/arm/mach-mvebu/
  F:	arch/arm64/boot/dts/marvell/armada*
 +F:	drivers/cpufreq/armada-37xx-cpufreq.c
  F:	drivers/cpufreq/mvebu-cpufreq.c
  F:	drivers/irqchip/irq-armada-370-xp.c
  F:	drivers/irqchip/irq-mvebu-*
@@@ -1642,38 -1635,14 +1642,38 @@@ ARM/NEC MOBILEPRO 900/c MACHINE SUPPOR
  M:	Michael Petchkovsky <mkpetch at internode.on.net>
  S:	Maintained
  
 -ARM/NOMADIK ARCHITECTURE
 -M:	Alessandro Rubini <rubini at unipv.it>
 +ARM/NOMADIK/U300/Ux500 ARCHITECTURES
  M:	Linus Walleij <linus.walleij at linaro.org>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  S:	Maintained
  F:	arch/arm/mach-nomadik/
 -F:	drivers/pinctrl/nomadik/
 +F:	arch/arm/mach-u300/
 +F:	arch/arm/mach-ux500/
 +F:	arch/arm/boot/dts/ste-*
 +F:	drivers/clk/clk-nomadik.c
 +F:	drivers/clk/clk-u300.c
 +F:	drivers/clocksource/clksrc-dbx500-prcmu.c
 +F:	drivers/clocksource/timer-u300.c
 +F:	drivers/dma/coh901318*
 +F:	drivers/dma/ste_dma40*
 +F:	drivers/hwspinlock/u8500_hsem.c
  F:	drivers/i2c/busses/i2c-nomadik.c
 +F:	drivers/i2c/busses/i2c-stu300.c
 +F:	drivers/mfd/ab3100*
 +F:	drivers/mfd/ab8500*
 +F:	drivers/mfd/abx500*
 +F:	drivers/mfd/dbx500*
 +F:	drivers/mfd/db8500*
 +F:	drivers/pinctrl/nomadik/
 +F:	drivers/pinctrl/pinctrl-coh901*
 +F:	drivers/pinctrl/pinctrl-u300.c
 +F:	drivers/rtc/rtc-ab3100.c
 +F:	drivers/rtc/rtc-ab8500.c
 +F:	drivers/rtc/rtc-coh901331.c
 +F:	drivers/rtc/rtc-pl031.c
 +F:	drivers/watchdog/coh901327_wdt.c
 +F:	Documentation/devicetree/bindings/arm/ste-*
 +F:	Documentation/devicetree/bindings/arm/ux500/
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git
  
  ARM/NUVOTON W90X900 ARM ARCHITECTURE
@@@ -1987,10 -1956,9 +1987,10 @@@ N:	stm3
  F:	drivers/clocksource/armv7m_systick.c
  
  ARM/TANGO ARCHITECTURE
 -M:	Marc Gonzalez <marc_gonzalez at sigmadesigns.com>
 +M:	Marc Gonzalez <marc.w.gonzalez at free.fr>
 +M:	Mans Rullgard <mans at mansr.com>
  L:	linux-arm-kernel at lists.infradead.org
 -S:	Maintained
 +S:	Odd Fixes
  N:	tango
  
  ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
@@@ -2054,6 -2022,21 +2054,6 @@@ M:	Dmitry Eremin-Solenikov <dbaryshkov@
  M:	Dirk Opfer <dirk at opfer-online.de>
  S:	Maintained
  
 -ARM/U300 MACHINE SUPPORT
 -M:	Linus Walleij <linus.walleij at linaro.org>
 -L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
 -S:	Supported
 -F:	arch/arm/mach-u300/
 -F:	drivers/clocksource/timer-u300.c
 -F:	drivers/i2c/busses/i2c-stu300.c
 -F:	drivers/rtc/rtc-coh901331.c
 -F:	drivers/watchdog/coh901327_wdt.c
 -F:	drivers/dma/coh901318*
 -F:	drivers/mfd/ab3100*
 -F:	drivers/rtc/rtc-ab3100.c
 -F:	drivers/rtc/rtc-coh901331.c
 -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 -
  ARM/UNIPHIER ARCHITECTURE
  M:	Masahiro Yamada <yamada.masahiro at socionext.com>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
@@@ -2075,6 -2058,24 +2075,6 @@@ F:	drivers/reset/reset-uniphier.
  F:	drivers/tty/serial/8250/8250_uniphier.c
  N:	uniphier
  
 -ARM/Ux500 ARM ARCHITECTURE
 -M:	Linus Walleij <linus.walleij at linaro.org>
 -L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
 -S:	Maintained
 -F:	arch/arm/mach-ux500/
 -F:	drivers/clocksource/clksrc-dbx500-prcmu.c
 -F:	drivers/dma/ste_dma40*
 -F:	drivers/hwspinlock/u8500_hsem.c
 -F:	drivers/mfd/abx500*
 -F:	drivers/mfd/ab8500*
 -F:	drivers/mfd/dbx500*
 -F:	drivers/mfd/db8500*
 -F:	drivers/pinctrl/nomadik/pinctrl-ab*
 -F:	drivers/pinctrl/nomadik/pinctrl-nomadik*
 -F:	drivers/rtc/rtc-ab8500.c
 -F:	drivers/rtc/rtc-pl031.c
 -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
 -
  ARM/Ux500 CLOCK FRAMEWORK SUPPORT
  M:	Ulf Hansson <ulf.hansson at linaro.org>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
@@@ -2201,6 -2202,14 +2201,6 @@@ L:	linux-leds at vger.kernel.or
  S:	Maintained
  F:	drivers/leds/leds-as3645a.c
  
 -AS3645A LED FLASH CONTROLLER DRIVER
 -M:	Laurent Pinchart <laurent.pinchart at ideasonboard.com>
 -L:	linux-media at vger.kernel.org
 -T:	git git://linuxtv.org/media_tree.git
 -S:	Maintained
 -F:	drivers/media/i2c/as3645a.c
 -F:	include/media/i2c/as3645a.h
 -
  ASAHI KASEI AK8974 DRIVER
  M:	Linus Walleij <linus.walleij at linaro.org>
  L:	linux-iio at vger.kernel.org
@@@ -2253,9 -2262,7 +2253,9 @@@ F:	include/linux/async_tx.
  AT24 EEPROM DRIVER
  M:	Bartosz Golaszewski <brgl at bgdev.pl>
  L:	linux-i2c at vger.kernel.org
 +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
  S:	Maintained
 +F:	Documentation/devicetree/bindings/eeprom/at24.txt
  F:	drivers/misc/eeprom/at24.c
  F:	include/linux/platform_data/at24.h
  
@@@ -2494,8 -2501,6 +2494,8 @@@ L:	linux-arm-kernel at lists.infradead.or
  S:	Maintained
  F:	Documentation/devicetree/bindings/arm/axentia.txt
  F:	arch/arm/boot/dts/at91-linea.dtsi
 +F:	arch/arm/boot/dts/at91-natte.dtsi
 +F:	arch/arm/boot/dts/at91-nattis-2-natte-2.dts
  F:	arch/arm/boot/dts/at91-tse850-3.dts
  
  AXENTIA ASOC DRIVERS
@@@ -2559,6 -2564,7 +2559,7 @@@ S:	Maintaine
  F:	Documentation/ABI/testing/sysfs-class-net-batman-adv
  F:	Documentation/ABI/testing/sysfs-class-net-mesh
  F:	Documentation/networking/batman-adv.rst
+ F:	include/uapi/linux/batadv_packet.h
  F:	include/uapi/linux/batman_adv.h
  F:	net/batman-adv/
  
@@@ -2616,22 -2622,24 +2617,22 @@@ F:	fs/bfs
  F:	include/uapi/linux/bfs_fs.h
  
  BLACKFIN ARCHITECTURE
 -M:	Steven Miao <realmz6 at gmail.com>
  L:	adi-buildroot-devel at lists.sourceforge.net (moderated for non-subscribers)
  T:	git git://git.code.sf.net/p/adi-linux/code
  W:	http://blackfin.uclinux.org
 -S:	Supported
 +S:	Orphan
  F:	arch/blackfin/
  
  BLACKFIN EMAC DRIVER
  L:	adi-buildroot-devel at lists.sourceforge.net (moderated for non-subscribers)
  W:	http://blackfin.uclinux.org
 -S:	Supported
 +S:	Orphan
  F:	drivers/net/ethernet/adi/
  
  BLACKFIN MEDIA DRIVER
 -M:	Scott Jiang <scott.jiang.linux at gmail.com>
  L:	adi-buildroot-devel at lists.sourceforge.net (moderated for non-subscribers)
  W:	http://blackfin.uclinux.org/
 -S:	Supported
 +S:	Orphan
  F:	drivers/media/platform/blackfin/
  F:	drivers/media/i2c/adv7183*
  F:	drivers/media/i2c/vs6624*
@@@ -2639,25 -2647,25 +2640,25 @@@
  BLACKFIN RTC DRIVER
  L:	adi-buildroot-devel at lists.sourceforge.net (moderated for non-subscribers)
  W:	http://blackfin.uclinux.org
 -S:	Supported
 +S:	Orphan
  F:	drivers/rtc/rtc-bfin.c
  
  BLACKFIN SDH DRIVER
  L:	adi-buildroot-devel at lists.sourceforge.net (moderated for non-subscribers)
  W:	http://blackfin.uclinux.org
 -S:	Supported
 +S:	Orphan
  F:	drivers/mmc/host/bfin_sdh.c
  
  BLACKFIN SERIAL DRIVER
  L:	adi-buildroot-devel at lists.sourceforge.net (moderated for non-subscribers)
  W:	http://blackfin.uclinux.org
 -S:	Supported
 +S:	Orphan
  F:	drivers/tty/serial/bfin_uart.c
  
  BLACKFIN WATCHDOG DRIVER
  L:	adi-buildroot-devel at lists.sourceforge.net (moderated for non-subscribers)
  W:	http://blackfin.uclinux.org
 -S:	Supported
 +S:	Orphan
  F:	drivers/watchdog/bfin_wdt.c
  
  BLINKM RGB LED DRIVER
@@@ -2682,7 -2690,6 +2683,6 @@@ F:	drivers/mtd/devices/block2mtd.
  
  BLUETOOTH DRIVERS
  M:	Marcel Holtmann <marcel at holtmann.org>
- M:	Gustavo Padovan <gustavo at padovan.org>
  M:	Johan Hedberg <johan.hedberg at gmail.com>
  L:	linux-bluetooth at vger.kernel.org
  W:	http://www.bluez.org/
@@@ -2693,7 -2700,6 +2693,6 @@@ F:	drivers/bluetooth
  
  BLUETOOTH SUBSYSTEM
  M:	Marcel Holtmann <marcel at holtmann.org>
- M:	Gustavo Padovan <gustavo at padovan.org>
  M:	Johan Hedberg <johan.hedberg at gmail.com>
  L:	linux-bluetooth at vger.kernel.org
  W:	http://www.bluez.org/
@@@ -2718,12 -2724,16 +2717,16 @@@ M:	Alexei Starovoitov <ast at kernel.org
  M:	Daniel Borkmann <daniel at iogearbox.net>
  L:	netdev at vger.kernel.org
  L:	linux-kernel at vger.kernel.org
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
  S:	Supported
  F:	arch/x86/net/bpf_jit*
  F:	Documentation/networking/filter.txt
  F:	Documentation/bpf/
  F:	include/linux/bpf*
  F:	include/linux/filter.h
+ F:	include/trace/events/bpf.h
+ F:	include/trace/events/xdp.h
  F:	include/uapi/linux/bpf*
  F:	include/uapi/linux/filter.h
  F:	kernel/bpf/
@@@ -2827,8 -2837,6 +2830,8 @@@ S:	Maintaine
  F:	arch/arm/mach-bcm/*brcmstb*
  F:	arch/arm/boot/dts/bcm7*.dts*
  F:	drivers/bus/brcmstb_gisb.c
 +F:	arch/arm/mm/cache-b15-rac.c
 +F:	arch/arm/include/asm/hardware/cache-b15-rac.h
  N:	brcmstb
  
  BROADCOM BMIPS CPUFREQ DRIVER
@@@ -5146,15 -5154,15 +5149,15 @@@ F:	sound/usb/misc/ua101.
  EFI TEST DRIVER
  L:	linux-efi at vger.kernel.org
  M:	Ivan Hu <ivan.hu at canonical.com>
 -M:	Matt Fleming <matt at codeblueprint.co.uk>
 +M:	Ard Biesheuvel <ard.biesheuvel at linaro.org>
  S:	Maintained
  F:	drivers/firmware/efi/test/
  
  EFI VARIABLE FILESYSTEM
  M:	Matthew Garrett <matthew.garrett at nebula.com>
  M:	Jeremy Kerr <jk at ozlabs.org>
 -M:	Matt Fleming <matt at codeblueprint.co.uk>
 -T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 +M:	Ard Biesheuvel <ard.biesheuvel at linaro.org>
 +T:	git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
  L:	linux-efi at vger.kernel.org
  S:	Maintained
  F:	fs/efivarfs/
@@@ -5315,6 -5323,7 +5318,6 @@@ S:	Supporte
  F:	security/integrity/evm/
  
  EXTENSIBLE FIRMWARE INTERFACE (EFI)
 -M:	Matt Fleming <matt at codeblueprint.co.uk>
  M:	Ard Biesheuvel <ard.biesheuvel at linaro.org>
  L:	linux-efi at vger.kernel.org
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
@@@ -7070,14 -7079,6 +7073,14 @@@ R:	Dan Williams <dan.j.williams at intel.c
  S:	Odd fixes
  F:	drivers/dma/iop-adma.c
  
 +INTEL IPU3 CSI-2 CIO2 DRIVER
 +M:	Yong Zhi <yong.zhi at intel.com>
 +M:	Sakari Ailus <sakari.ailus at linux.intel.com>
 +L:	linux-media at vger.kernel.org
 +S:	Maintained
 +F:	drivers/media/pci/intel/ipu3/
 +F:	Documentation/media/uapi/v4l/pixfmt-srggb10-ipu3.rst
 +
  INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
  M:	Krzysztof Halasa <khalasa at piap.pl>
  S:	Maintained
@@@ -8683,15 -8684,6 +8686,15 @@@ T:	git git://linuxtv.org/media_tree.gi
  S:	Maintained
  F:	drivers/media/dvb-frontends/stv6111*
  
 +MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
 +M:	Dmitry Osipenko <digetx at gmail.com>
 +L:	linux-media at vger.kernel.org
 +L:	linux-tegra at vger.kernel.org
 +T:	git git://linuxtv.org/media_tree.git
 +S:	Maintained
 +F:	Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
 +F:	drivers/staging/media/tegra-vde/
 +
  MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
  M:	Mauro Carvalho Chehab <mchehab at s-opensource.com>
  M:	Mauro Carvalho Chehab <mchehab at kernel.org>
@@@ -8735,6 -8727,13 +8738,13 @@@ L:	netdev at vger.kernel.or
  S:	Maintained
  F:	drivers/net/ethernet/mediatek/
  
+ MEDIATEK SWITCH DRIVER
+ M:	Sean Wang <sean.wang at mediatek.com>
+ L:	netdev at vger.kernel.org
+ S:	Maintained
+ F:	drivers/net/dsa/mt7530.*
+ F:	net/dsa/tag_mtk.c
+ 
  MEDIATEK JPEG DRIVER
  M:	Rick Chang <rick.chang at mediatek.com>
  M:	Bin Liu <bin.liu at mediatek.com>
@@@ -9107,7 -9106,6 +9117,7 @@@ S:	Supporte
  F:	Documentation/devicetree/bindings/mips/
  F:	Documentation/mips/
  F:	arch/mips/
 +F:	drivers/platform/mips/
  
  MIPS BOSTON DEVELOPMENT BOARD
  M:	Paul Burton <paul.burton at mips.com>
@@@ -9135,25 -9133,6 +9145,25 @@@ F:	arch/mips/include/asm/mach-loongson3
  F:	drivers/*/*loongson1*
  F:	drivers/*/*/*loongson1*
  
 +MIPS/LOONGSON2 ARCHITECTURE
 +M:	Jiaxun Yang <jiaxun.yang at flygoat.com>
 +L:	linux-mips at linux-mips.org
 +S:	Maintained
 +F:	arch/mips/loongson64/*{2e/2f}*
 +F:	arch/mips/include/asm/mach-loongson64/
 +F:	drivers/*/*loongson2*
 +F:	drivers/*/*/*loongson2*
 +
 +MIPS/LOONGSON3 ARCHITECTURE
 +M:	Huacai Chen <chenhc at lemote.com>
 +L:	linux-mips at linux-mips.org
 +S:	Maintained
 +F:	arch/mips/loongson64/
 +F:	arch/mips/include/asm/mach-loongson64/
 +F:	drivers/platform/mips/cpu_hwmon.c
 +F:	drivers/*/*loongson3*
 +F:	drivers/*/*/*loongson3*
 +
  MIPS RINT INSTRUCTION EMULATION
  M:	Aleksandar Markovic <aleksandar.markovic at mips.com>
  L:	linux-mips at linux-mips.org
@@@ -9631,6 -9610,11 +9641,11 @@@ NETWORKING [WIRELESS
  L:	linux-wireless at vger.kernel.org
  Q:	http://patchwork.kernel.org/project/linux-wireless/list/
  
+ NETDEVSIM
+ M:	Jakub Kicinski <jakub.kicinski at netronome.com>
+ S:	Maintained
+ F:	drivers/net/netdevsim/*
+ 
  NETXEN (1/10) GbE SUPPORT
  M:	Manish Chopra <manish.chopra at cavium.com>
  M:	Rahul Verma <rahul.verma at cavium.com>
@@@ -10085,14 -10069,6 +10100,14 @@@ S:	Maintaine
  F:	drivers/media/i2c/ov7670.c
  F:	Documentation/devicetree/bindings/media/i2c/ov7670.txt
  
 +OMNIVISION OV7740 SENSOR DRIVER
 +M:	Wenyou Yang <wenyou.yang at microchip.com>
 +L:	linux-media at vger.kernel.org
 +T:	git git://linuxtv.org/media_tree.git
 +S:	Maintained
 +F:	drivers/media/i2c/ov7740.c
 +F:	Documentation/devicetree/bindings/media/i2c/ov7740.txt
 +
  ONENAND FLASH DRIVER
  M:	Kyungmin Park <kyungmin.park at samsung.com>
  L:	linux-mtd at lists.infradead.org
@@@ -10176,7 -10152,7 +10191,7 @@@ F:	drivers/irqchip/irq-ompic.
  F:	drivers/irqchip/irq-or1k-*
  
  OPENVSWITCH
 -M:	Pravin Shelar <pshelar at nicira.com>
 +M:	Pravin B Shelar <pshelar at ovn.org>
  L:	netdev at vger.kernel.org
  L:	dev at openvswitch.org
  W:	http://openvswitch.org
@@@ -10591,12 -10567,8 +10606,12 @@@ T:	git git://git.kernel.org/pub/scm/lin
  S:	Supported
  F:	Documentation/devicetree/bindings/pci/
  F:	Documentation/PCI/
 +F:	drivers/acpi/pci*
  F:	drivers/pci/
 +F:	include/asm-generic/pci*
  F:	include/linux/pci*
 +F:	include/uapi/linux/pci*
 +F:	lib/pci*
  F:	arch/x86/pci/
  F:	arch/x86/kernel/quirks.c
  
@@@ -10935,7 -10907,6 +10950,7 @@@ F:	include/linux/pm.
  F:	include/linux/pm_*
  F:	include/linux/powercap.h
  F:	drivers/powercap/
 +F:	kernel/configs/nopm.config
  
  POWER STATE COORDINATION INTERFACE (PSCI)
  M:	Mark Rutland <mark.rutland at arm.com>
@@@ -12383,14 -12354,6 +12398,14 @@@ T:	git git://linuxtv.org/anttip/media_t
  S:	Maintained
  F:	drivers/media/tuners/si2157*
  
 +SI2165 MEDIA DRIVER
 +M:	Matthias Schwarzott <zzam at gentoo.org>
 +L:	linux-media at vger.kernel.org
 +W:	https://linuxtv.org
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +S:	Maintained
 +F:	drivers/media/dvb-frontends/si2165*
 +
  SI2168 MEDIA DRIVER
  M:	Antti Palosaari <crope at iki.fi>
  L:	linux-media at vger.kernel.org
@@@ -12921,6 -12884,12 +12936,6 @@@ S:	Odd Fixe
  F:	Documentation/devicetree/bindings/staging/iio/
  F:	drivers/staging/iio/
  
 -STAGING - LIRC (LINUX INFRARED REMOTE CONTROL) DRIVERS
 -M:	Jarod Wilson <jarod at wilsonet.com>
 -W:	http://www.lirc.org/
 -S:	Odd Fixes
 -F:	drivers/staging/media/lirc/
 -
  STAGING - LUSTRE PARALLEL FILESYSTEM
  M:	Oleg Drokin <oleg.drokin at intel.com>
  M:	Andreas Dilger <andreas.dilger at intel.com>
@@@ -13302,15 -13271,6 +13317,15 @@@ T:	git git://linuxtv.org/anttip/media_t
  S:	Maintained
  F:	drivers/media/tuners/tda18218*
  
 +TDA18250 MEDIA DRIVER
 +M:	Olli Salonen <olli.salonen at iki.fi>
 +L:	linux-media at vger.kernel.org
 +W:	https://linuxtv.org
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +T:	git git://linuxtv.org/media_tree.git
 +S:	Maintained
 +F:	drivers/media/tuners/tda18250*
 +
  TDA18271 MEDIA DRIVER
  M:	Michael Krufky <mkrufky at linuxtv.org>
  L:	linux-media at vger.kernel.org
@@@ -13548,7 -13508,6 +13563,7 @@@ M:	Mika Westerberg <mika.westerberg at lin
  M:	Yehezkel Bernat <yehezkel.bernat at intel.com>
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
  S:	Maintained
 +F:	Documentation/admin-guide/thunderbolt.rst
  F:	drivers/thunderbolt/
  F:	include/linux/thunderbolt.h
  
diff --combined arch/arm/boot/dts/imx25.dtsi
index c43cf704b768,fcaff1c66bcb..9445f8e1473c
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@@ -122,7 -122,7 +122,7 @@@
  			};
  
  			can1: can at 43f88000 {
- 				compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+ 				compatible = "fsl,imx25-flexcan";
  				reg = <0x43f88000 0x4000>;
  				interrupts = <43>;
  				clocks = <&clks 75>, <&clks 75>;
@@@ -131,7 -131,7 +131,7 @@@
  			};
  
  			can2: can at 43f8c000 {
- 				compatible = "fsl,imx25-flexcan", "fsl,p1010-flexcan";
+ 				compatible = "fsl,imx25-flexcan";
  				reg = <0x43f8c000 0x4000>;
  				interrupts = <44>;
  				clocks = <&clks 76>, <&clks 76>;
@@@ -628,13 -628,11 +628,13 @@@
  		usbphy0: usb-phy at 0 {
  			reg = <0>;
  			compatible = "usb-nop-xceiv";
 +			#phy-cells = <0>;
  		};
  
  		usbphy1: usb-phy at 1 {
  			reg = <1>;
  			compatible = "usb-nop-xceiv";
 +			#phy-cells = <0>;
  		};
  	};
  };
diff --combined arch/arm/boot/dts/imx35.dtsi
index f049c692c6b0,1f0e2203b576..e08c0c193767
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@@ -303,7 -303,7 +303,7 @@@
  			};
  
  			can1: can at 53fe4000 {
- 				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+ 				compatible = "fsl,imx35-flexcan";
  				reg = <0x53fe4000 0x1000>;
  				clocks = <&clks 33>, <&clks 33>;
  				clock-names = "ipg", "per";
@@@ -312,7 -312,7 +312,7 @@@
  			};
  
  			can2: can at 53fe8000 {
- 				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+ 				compatible = "fsl,imx35-flexcan";
  				reg = <0x53fe8000 0x1000>;
  				clocks = <&clks 34>, <&clks 34>;
  				clock-names = "ipg", "per";
@@@ -402,13 -402,11 +402,13 @@@
  		usbphy0: usb-phy at 0 {
  			reg = <0>;
  			compatible = "usb-nop-xceiv";
 +			#phy-cells = <0>;
  		};
  
  		usbphy1: usb-phy at 1 {
  			reg = <1>;
  			compatible = "usb-nop-xceiv";
 +			#phy-cells = <0>;
  		};
  	};
  };
diff --combined arch/arm/boot/dts/imx53.dtsi
index 38b31a37339b,85071ff8c639..1040251f2951
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@@ -116,28 -116,6 +116,28 @@@
  		};
  	};
  
 +	pmu {
 +		compatible = "arm,cortex-a8-pmu";
 +		interrupt-parent = <&tzic>;
 +		interrupts = <77>;
 +	};
 +
 +	usbphy0: usbphy-0 {
 +		compatible = "usb-nop-xceiv";
 +		clocks = <&clks IMX5_CLK_USB_PHY1_GATE>;
 +		clock-names = "main_clk";
 +		#phy-cells = <0>;
 +		status = "okay";
 +	};
 +
 +	usbphy1: usbphy-1 {
 +		compatible = "usb-nop-xceiv";
 +		clocks = <&clks IMX5_CLK_USB_PHY2_GATE>;
 +		clock-names = "main_clk";
 +		#phy-cells = <0>;
 +		status = "okay";
 +	};
 +
  	soc {
  		#address-cells = <1>;
  		#size-cells = <1>;
@@@ -321,6 -299,20 +321,6 @@@
  				reg = <0x53f00000 0x60>;
  			};
  
 -			usbphy0: usbphy-0 {
 -				compatible = "usb-nop-xceiv";
 -				clocks = <&clks IMX5_CLK_USB_PHY1_GATE>;
 -				clock-names = "main_clk";
 -				status = "okay";
 -			};
 -
 -			usbphy1: usbphy-1 {
 -				compatible = "usb-nop-xceiv";
 -				clocks = <&clks IMX5_CLK_USB_PHY2_GATE>;
 -				clock-names = "main_clk";
 -				status = "okay";
 -			};
 -
  			usbotg: usb at 53f80000 {
  				compatible = "fsl,imx53-usb", "fsl,imx27-usb";
  				reg = <0x53f80000 0x0200>;
@@@ -441,13 -433,6 +441,13 @@@
  				clock-names = "ipg", "per";
  			};
  
 +			srtc: rtc at 53fa4000 {
 +				compatible = "fsl,imx53-rtc";
 +				reg = <0x53fa4000 0x4000>;
 +				interrupts = <24>;
 +				clocks = <&clks IMX5_CLK_SRTC_GATE>;
 +			};
 +
  			iomuxc: iomuxc at 53fa8000 {
  				compatible = "fsl,imx53-iomuxc";
  				reg = <0x53fa8000 0x4000>;
@@@ -551,7 -536,7 +551,7 @@@
  			};
  
  			can1: can at 53fc8000 {
- 				compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+ 				compatible = "fsl,imx53-flexcan";
  				reg = <0x53fc8000 0x4000>;
  				interrupts = <82>;
  				clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
@@@ -561,7 -546,7 +561,7 @@@
  			};
  
  			can2: can at 53fcc000 {
- 				compatible = "fsl,imx53-flexcan", "fsl,p1010-flexcan";
+ 				compatible = "fsl,imx53-flexcan";
  				reg = <0x53fcc000 0x4000>;
  				interrupts = <83>;
  				clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
@@@ -828,5 -813,10 +828,5 @@@
  			reg = <0xf8000000 0x20000>;
  			clocks = <&clks IMX5_CLK_OCRAM>;
  		};
 -
 -		pmu {
 -			compatible = "arm,cortex-a8-pmu";
 -			interrupts = <77>;
 -		};
  	};
  };
diff --combined arch/arm/boot/dts/ls1021a-qds.dts
index bf15dc27ca53,4f211e3c903a..499f41a2c6f0
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@@ -215,7 -215,7 +215,7 @@@
  				reg = <0x2a>;
  				VDDA-supply = <&reg_3p3v>;
  				VDDIO-supply = <&reg_3p3v>;
 -				clocks = <&sys_mclk 1>;
 +				clocks = <&sys_mclk>;
  			};
  		};
  	};
@@@ -239,11 -239,6 +239,11 @@@
  		device-width = <1>;
  	};
  
 +	nand at 2,0 {
 +		compatible = "fsl,ifc-nand";
 +		reg = <0x2 0x0 0x10000>;
 +	};
 +
  	fpga: board-control at 3,0 {
  		#address-cells = <1>;
  		#size-cells = <1>;
@@@ -336,3 -331,19 +336,19 @@@
  &uart1 {
  	status = "okay";
  };
+ 
+ &can0 {
+ 	status = "okay";
+ };
+ 
+ &can1 {
+ 	status = "okay";
+ };
+ 
+ &can2 {
+ 	status = "disabled";
+ };
+ 
+ &can3 {
+ 	status = "disabled";
+ };
diff --combined arch/arm/boot/dts/ls1021a-twr.dts
index b186c370ad54,7202d9c504be..f0c949d74833
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@@ -187,7 -187,7 +187,7 @@@
  		reg = <0x0a>;
  		VDDA-supply = <&reg_3p3v>;
  		VDDIO-supply = <&reg_3p3v>;
 -		clocks = <&sys_mclk 1>;
 +		clocks = <&sys_mclk>;
  	};
  };
  
@@@ -228,10 -228,6 +228,10 @@@
  	};
  };
  
 +&esdhc {
 +        status = "okay";
 +};
 +
  &sai1 {
  	status = "okay";
  };
@@@ -247,3 -243,19 +247,19 @@@
  &uart1 {
  	status = "okay";
  };
+ 
+ &can0 {
+ 	status = "okay";
+ };
+ 
+ &can1 {
+ 	status = "okay";
+ };
+ 
+ &can2 {
+ 	status = "disabled";
+ };
+ 
+ &can3 {
+ 	status = "disabled";
+ };
diff --combined arch/arm/boot/dts/ls1021a.dtsi
index c5edfa9a68a6,7789031898b0..c31dad98f989
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@@ -106,14 -106,6 +106,14 @@@
  		compatible = "arm,cortex-a7-pmu";
  		interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
  			     <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
 +		interrupt-affinity = <&cpu0>, <&cpu1>;
 +	};
 +
 +	reboot {
 +		compatible = "syscon-reboot";
 +		regmap = <&dcfg>;
 +		offset = <0xb0>;
 +		mask = <0x02>;
  	};
  
  	soc {
@@@ -162,22 -154,8 +162,22 @@@
  			big-endian;
  		};
  
 +		qspi: quadspi at 1550000 {
 +			compatible = "fsl,ls1021a-qspi";
 +			#address-cells = <1>;
 +			#size-cells = <0>;
 +			reg = <0x0 0x1550000 0x0 0x10000>,
 +			      <0x0 0x40000000 0x0 0x40000000>;
 +			reg-names = "QuadSPI", "QuadSPI-memory";
 +			interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
 +			clock-names = "qspi_en", "qspi";
 +			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
 +			big-endian;
 +			status = "disabled";
 +		};
 +
  		esdhc: esdhc at 1560000 {
 -			compatible = "fsl,esdhc";
 +			compatible = "fsl,ls1021a-esdhc", "fsl,esdhc";
  			reg = <0x0 0x1560000 0x0 0x10000>;
  			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
  			clock-frequency = <0>;
@@@ -597,7 -575,7 +597,7 @@@
  			fsl,tclk-period = <5>;
  			fsl,tmr-prsc    = <2>;
  			fsl,tmr-add     = <0xaaaaaaab>;
 -			fsl,tmr-fiper1  = <999999990>;
 +			fsl,tmr-fiper1  = <999999995>;
  			fsl,tmr-fiper2  = <99990>;
  			fsl,max-adj     = <499999999>;
  		};
@@@ -690,7 -668,7 +690,7 @@@
  			};
  		};
  
 -		usb at 8600000 {
 +		usb2: usb at 8600000 {
  			compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr";
  			reg = <0x0 0x8600000 0x0 0x1000>;
  			interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
@@@ -698,7 -676,7 +698,7 @@@
  			phy_type = "ulpi";
  		};
  
 -		usb3 at 3100000 {
 +		usb3: usb3 at 3100000 {
  			compatible = "snps,dwc3";
  			reg = <0x0 0x3100000 0x0 0x10000>;
  			interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
@@@ -752,5 -730,41 +752,41 @@@
  					<0000 0 0 3 &gic GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
  					<0000 0 0 4 &gic GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
  		};
+ 
+ 		can0: can at 2a70000 {
+ 			compatible = "fsl,ls1021ar2-flexcan";
+ 			reg = <0x0 0x2a70000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ 			clock-names = "ipg", "per";
+ 			big-endian;
+ 		};
+ 
+ 		can1: can at 2a80000 {
+ 			compatible = "fsl,ls1021ar2-flexcan";
+ 			reg = <0x0 0x2a80000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ 			clock-names = "ipg", "per";
+ 			big-endian;
+ 		};
+ 
+ 		can2: can at 2a90000 {
+ 			compatible = "fsl,ls1021ar2-flexcan";
+ 			reg = <0x0 0x2a90000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ 			clock-names = "ipg", "per";
+ 			big-endian;
+ 		};
+ 
+ 		can3: can at 2aa0000 {
+ 			compatible = "fsl,ls1021ar2-flexcan";
+ 			reg = <0x0 0x2aa0000 0x0 0x1000>;
+ 			interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&clockgen 4 1>, <&clockgen 4 1>;
+ 			clock-names = "ipg", "per";
+ 			big-endian;
+ 		};
  	};
  };
diff --combined drivers/net/can/flexcan.c
index 760d2c07e3a2,3cd371c94e83..634c51e6b8ae
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@@ -190,6 -190,7 +190,7 @@@
   *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
   *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
   *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
+  * LS1021A FlexCAN2  03.00.04.00     no       yes        no       no       yes
   *
   * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
   */
@@@ -279,6 -280,10 +280,10 @@@ struct flexcan_priv 
  	struct clk *clk_per;
  	const struct flexcan_devtype_data *devtype_data;
  	struct regulator *reg_xceiver;
+ 
+ 	/* Read and Write APIs */
+ 	u32 (*read)(void __iomem *addr);
+ 	void (*write)(u32 val, void __iomem *addr);
  };
  
  static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
@@@ -301,6 -306,12 +306,12 @@@ static const struct flexcan_devtype_dat
  		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
  };
  
+ static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
+ 	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ 		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ 		FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ };
+ 
  static const struct can_bittiming_const flexcan_bittiming_const = {
  	.name = DRV_NAME,
  	.tseg1_min = 4,
@@@ -313,39 -324,45 +324,45 @@@
  	.brp_inc = 1,
  };
  
- /* Abstract off the read/write for arm versus ppc. This
-  * assumes that PPC uses big-endian registers and everything
-  * else uses little-endian registers, independent of CPU
-  * endianness.
+ /* FlexCAN module is essentially modelled as a little-endian IP in most
+  * SoCs, i.e the registers as well as the message buffer areas are
+  * implemented in a little-endian fashion.
+  *
+  * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN
+  * module in a big-endian fashion (i.e the registers as well as the
+  * message buffer areas are implemented in a big-endian way).
+  *
+  * In addition, the FlexCAN module can be found on SoCs having ARM or
+  * PPC cores. So, we need to abstract off the register read/write
+  * functions, ensuring that these cater to all the combinations of module
+  * endianness and underlying CPU endianness.
   */
- #if defined(CONFIG_PPC)
- static inline u32 flexcan_read(void __iomem *addr)
+ static inline u32 flexcan_read_be(void __iomem *addr)
  {
- 	return in_be32(addr);
+ 	return ioread32be(addr);
  }
  
- static inline void flexcan_write(u32 val, void __iomem *addr)
+ static inline void flexcan_write_be(u32 val, void __iomem *addr)
  {
- 	out_be32(addr, val);
+ 	iowrite32be(val, addr);
  }
- #else
- static inline u32 flexcan_read(void __iomem *addr)
+ 
+ static inline u32 flexcan_read_le(void __iomem *addr)
  {
- 	return readl(addr);
+ 	return ioread32(addr);
  }
  
- static inline void flexcan_write(u32 val, void __iomem *addr)
+ static inline void flexcan_write_le(u32 val, void __iomem *addr)
  {
- 	writel(val, addr);
+ 	iowrite32(val, addr);
  }
- #endif
  
  static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
  {
  	struct flexcan_regs __iomem *regs = priv->regs;
  	u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
  
- 	flexcan_write(reg_ctrl, &regs->ctrl);
+ 	priv->write(reg_ctrl, &regs->ctrl);
  }
  
  static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
@@@ -353,7 -370,7 +370,7 @@@
  	struct flexcan_regs __iomem *regs = priv->regs;
  	u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
  
- 	flexcan_write(reg_ctrl, &regs->ctrl);
+ 	priv->write(reg_ctrl, &regs->ctrl);
  }
  
  static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
@@@ -378,14 -395,14 +395,14 @@@ static int flexcan_chip_enable(struct f
  	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
  	u32 reg;
  
- 	reg = flexcan_read(&regs->mcr);
+ 	reg = priv->read(&regs->mcr);
  	reg &= ~FLEXCAN_MCR_MDIS;
- 	flexcan_write(reg, &regs->mcr);
+ 	priv->write(reg, &regs->mcr);
  
- 	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ 	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
  		udelay(10);
  
- 	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+ 	if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
  		return -ETIMEDOUT;
  
  	return 0;
@@@ -397,14 -414,14 +414,14 @@@ static int flexcan_chip_disable(struct 
  	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
  	u32 reg;
  
- 	reg = flexcan_read(&regs->mcr);
+ 	reg = priv->read(&regs->mcr);
  	reg |= FLEXCAN_MCR_MDIS;
- 	flexcan_write(reg, &regs->mcr);
+ 	priv->write(reg, &regs->mcr);
  
- 	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ 	while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
  		udelay(10);
  
- 	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ 	if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
  		return -ETIMEDOUT;
  
  	return 0;
@@@ -416,14 -433,14 +433,14 @@@ static int flexcan_chip_freeze(struct f
  	unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
  	u32 reg;
  
- 	reg = flexcan_read(&regs->mcr);
+ 	reg = priv->read(&regs->mcr);
  	reg |= FLEXCAN_MCR_HALT;
- 	flexcan_write(reg, &regs->mcr);
+ 	priv->write(reg, &regs->mcr);
  
- 	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ 	while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
  		udelay(100);
  
- 	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ 	if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
  		return -ETIMEDOUT;
  
  	return 0;
@@@ -435,14 -452,14 +452,14 @@@ static int flexcan_chip_unfreeze(struc
  	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
  	u32 reg;
  
- 	reg = flexcan_read(&regs->mcr);
+ 	reg = priv->read(&regs->mcr);
  	reg &= ~FLEXCAN_MCR_HALT;
- 	flexcan_write(reg, &regs->mcr);
+ 	priv->write(reg, &regs->mcr);
  
- 	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ 	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
  		udelay(10);
  
- 	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+ 	if (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
  		return -ETIMEDOUT;
  
  	return 0;
@@@ -453,11 -470,11 +470,11 @@@ static int flexcan_chip_softreset(struc
  	struct flexcan_regs __iomem *regs = priv->regs;
  	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
  
- 	flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
- 	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+ 	priv->write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+ 	while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
  		udelay(10);
  
- 	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+ 	if (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
  		return -ETIMEDOUT;
  
  	return 0;
@@@ -468,7 -485,7 +485,7 @@@ static int __flexcan_get_berr_counter(c
  {
  	const struct flexcan_priv *priv = netdev_priv(dev);
  	struct flexcan_regs __iomem *regs = priv->regs;
- 	u32 reg = flexcan_read(&regs->ecr);
+ 	u32 reg = priv->read(&regs->ecr);
  
  	bec->txerr = (reg >> 0) & 0xff;
  	bec->rxerr = (reg >> 8) & 0xff;
@@@ -524,24 -541,24 +541,24 @@@ static int flexcan_start_xmit(struct sk
  
  	if (cf->can_dlc > 0) {
  		data = be32_to_cpup((__be32 *)&cf->data[0]);
- 		flexcan_write(data, &priv->tx_mb->data[0]);
+ 		priv->write(data, &priv->tx_mb->data[0]);
  	}
 -	if (cf->can_dlc > 3) {
 +	if (cf->can_dlc > 4) {
  		data = be32_to_cpup((__be32 *)&cf->data[4]);
- 		flexcan_write(data, &priv->tx_mb->data[1]);
+ 		priv->write(data, &priv->tx_mb->data[1]);
  	}
  
  	can_put_echo_skb(skb, dev, 0);
  
- 	flexcan_write(can_id, &priv->tx_mb->can_id);
- 	flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
+ 	priv->write(can_id, &priv->tx_mb->can_id);
+ 	priv->write(ctrl, &priv->tx_mb->can_ctrl);
  
  	/* Errata ERR005829 step8:
  	 * Write twice INACTIVE(0x8) code to first MB.
  	 */
- 	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ 	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
  		      &priv->tx_mb_reserved->can_ctrl);
- 	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ 	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
  		      &priv->tx_mb_reserved->can_ctrl);
  
  	return NETDEV_TX_OK;
@@@ -660,7 -677,7 +677,7 @@@ static unsigned int flexcan_mailbox_rea
  		u32 code;
  
  		do {
- 			reg_ctrl = flexcan_read(&mb->can_ctrl);
+ 			reg_ctrl = priv->read(&mb->can_ctrl);
  		} while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
  
  		/* is this MB empty? */
@@@ -675,17 -692,17 +692,17 @@@
  			offload->dev->stats.rx_errors++;
  		}
  	} else {
- 		reg_iflag1 = flexcan_read(&regs->iflag1);
+ 		reg_iflag1 = priv->read(&regs->iflag1);
  		if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
  			return 0;
  
- 		reg_ctrl = flexcan_read(&mb->can_ctrl);
+ 		reg_ctrl = priv->read(&mb->can_ctrl);
  	}
  
  	/* increase timstamp to full 32 bit */
  	*timestamp = reg_ctrl << 16;
  
- 	reg_id = flexcan_read(&mb->can_id);
+ 	reg_id = priv->read(&mb->can_id);
  	if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
  		cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
  	else
@@@ -695,19 -712,19 +712,19 @@@
  		cf->can_id |= CAN_RTR_FLAG;
  	cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
  
- 	*(__be32 *)(cf->data + 0) = cpu_to_be32(flexcan_read(&mb->data[0]));
- 	*(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
+ 	*(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0]));
+ 	*(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1]));
  
  	/* mark as read */
  	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
  		/* Clear IRQ */
  		if (n < 32)
- 			flexcan_write(BIT(n), &regs->iflag1);
+ 			priv->write(BIT(n), &regs->iflag1);
  		else
- 			flexcan_write(BIT(n - 32), &regs->iflag2);
+ 			priv->write(BIT(n - 32), &regs->iflag2);
  	} else {
- 		flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
- 		flexcan_read(&regs->timer);
+ 		priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+ 		priv->read(&regs->timer);
  	}
  
  	return 1;
@@@ -719,8 -736,8 +736,8 @@@ static inline u64 flexcan_read_reg_ifla
  	struct flexcan_regs __iomem *regs = priv->regs;
  	u32 iflag1, iflag2;
  
- 	iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
- 	iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+ 	iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
+ 	iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
  		~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
  
  	return (u64)iflag2 << 32 | iflag1;
@@@ -736,7 -753,7 +753,7 @@@ static irqreturn_t flexcan_irq(int irq
  	u32 reg_iflag1, reg_esr;
  	enum can_state last_state = priv->can.state;
  
- 	reg_iflag1 = flexcan_read(&regs->iflag1);
+ 	reg_iflag1 = priv->read(&regs->iflag1);
  
  	/* reception interrupt */
  	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@@ -759,7 -776,8 +776,8 @@@
  		/* FIFO overflow interrupt */
  		if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
  			handled = IRQ_HANDLED;
- 			flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+ 			priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW,
+ 				    &regs->iflag1);
  			dev->stats.rx_over_errors++;
  			dev->stats.rx_errors++;
  		}
@@@ -773,18 -791,18 +791,18 @@@
  		can_led_event(dev, CAN_LED_EVENT_TX);
  
  		/* after sending a RTR frame MB is in RX mode */
- 		flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- 			      &priv->tx_mb->can_ctrl);
- 		flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+ 		priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ 			    &priv->tx_mb->can_ctrl);
+ 		priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
  		netif_wake_queue(dev);
  	}
  
- 	reg_esr = flexcan_read(&regs->esr);
+ 	reg_esr = priv->read(&regs->esr);
  
  	/* ACK all bus error and state change IRQ sources */
  	if (reg_esr & FLEXCAN_ESR_ALL_INT) {
  		handled = IRQ_HANDLED;
- 		flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+ 		priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
  	}
  
  	/* state change interrupt or broken error state quirk fix is enabled */
@@@ -846,7 -864,7 +864,7 @@@ static void flexcan_set_bittiming(struc
  	struct flexcan_regs __iomem *regs = priv->regs;
  	u32 reg;
  
- 	reg = flexcan_read(&regs->ctrl);
+ 	reg = priv->read(&regs->ctrl);
  	reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
  		 FLEXCAN_CTRL_RJW(0x3) |
  		 FLEXCAN_CTRL_PSEG1(0x7) |
@@@ -870,11 -888,11 +888,11 @@@
  		reg |= FLEXCAN_CTRL_SMP;
  
  	netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
- 	flexcan_write(reg, &regs->ctrl);
+ 	priv->write(reg, &regs->ctrl);
  
  	/* print chip status */
  	netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
- 		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+ 		   priv->read(&regs->mcr), priv->read(&regs->ctrl));
  }
  
  /* flexcan_chip_start
@@@ -913,7 -931,7 +931,7 @@@ static int flexcan_chip_start(struct ne
  	 * choose format C
  	 * set max mailbox number
  	 */
- 	reg_mcr = flexcan_read(&regs->mcr);
+ 	reg_mcr = priv->read(&regs->mcr);
  	reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
  	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
  		FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
@@@ -927,7 -945,7 +945,7 @@@
  			FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
  	}
  	netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
- 	flexcan_write(reg_mcr, &regs->mcr);
+ 	priv->write(reg_mcr, &regs->mcr);
  
  	/* CTRL
  	 *
@@@ -940,7 -958,7 +958,7 @@@
  	 * enable bus off interrupt
  	 * (== FLEXCAN_CTRL_ERR_STATE)
  	 */
- 	reg_ctrl = flexcan_read(&regs->ctrl);
+ 	reg_ctrl = priv->read(&regs->ctrl);
  	reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
  	reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
  		FLEXCAN_CTRL_ERR_STATE;
@@@ -960,45 -978,45 +978,45 @@@
  	/* leave interrupts disabled for now */
  	reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
  	netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
- 	flexcan_write(reg_ctrl, &regs->ctrl);
+ 	priv->write(reg_ctrl, &regs->ctrl);
  
  	if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
- 		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ 		reg_ctrl2 = priv->read(&regs->ctrl2);
  		reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
- 		flexcan_write(reg_ctrl2, &regs->ctrl2);
+ 		priv->write(reg_ctrl2, &regs->ctrl2);
  	}
  
  	/* clear and invalidate all mailboxes first */
  	for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
- 		flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
- 			      &regs->mb[i].can_ctrl);
+ 		priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+ 			    &regs->mb[i].can_ctrl);
  	}
  
  	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
  		for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
- 			flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
- 				      &regs->mb[i].can_ctrl);
+ 			priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
+ 				    &regs->mb[i].can_ctrl);
  	}
  
  	/* Errata ERR005829: mark first TX mailbox as INACTIVE */
- 	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- 		      &priv->tx_mb_reserved->can_ctrl);
+ 	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ 		    &priv->tx_mb_reserved->can_ctrl);
  
  	/* mark TX mailbox as INACTIVE */
- 	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
- 		      &priv->tx_mb->can_ctrl);
+ 	priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
+ 		    &priv->tx_mb->can_ctrl);
  
  	/* acceptance mask/acceptance code (accept everything) */
- 	flexcan_write(0x0, &regs->rxgmask);
- 	flexcan_write(0x0, &regs->rx14mask);
- 	flexcan_write(0x0, &regs->rx15mask);
+ 	priv->write(0x0, &regs->rxgmask);
+ 	priv->write(0x0, &regs->rx14mask);
+ 	priv->write(0x0, &regs->rx15mask);
  
  	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
- 		flexcan_write(0x0, &regs->rxfgmask);
+ 		priv->write(0x0, &regs->rxfgmask);
  
  	/* clear acceptance filters */
  	for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
- 		flexcan_write(0, &regs->rximr[i]);
+ 		priv->write(0, &regs->rximr[i]);
  
  	/* On Vybrid, disable memory error detection interrupts
  	 * and freeze mode.
@@@ -1011,16 -1029,16 +1029,16 @@@
  		 * and Correction of Memory Errors" to write to
  		 * MECR register
  		 */
- 		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+ 		reg_ctrl2 = priv->read(&regs->ctrl2);
  		reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
- 		flexcan_write(reg_ctrl2, &regs->ctrl2);
+ 		priv->write(reg_ctrl2, &regs->ctrl2);
  
- 		reg_mecr = flexcan_read(&regs->mecr);
+ 		reg_mecr = priv->read(&regs->mecr);
  		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
- 		flexcan_write(reg_mecr, &regs->mecr);
+ 		priv->write(reg_mecr, &regs->mecr);
  		reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
  			      FLEXCAN_MECR_FANCEI_MSK);
- 		flexcan_write(reg_mecr, &regs->mecr);
+ 		priv->write(reg_mecr, &regs->mecr);
  	}
  
  	err = flexcan_transceiver_enable(priv);
@@@ -1036,14 -1054,14 +1054,14 @@@
  
  	/* enable interrupts atomically */
  	disable_irq(dev->irq);
- 	flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
- 	flexcan_write(priv->reg_imask1_default, &regs->imask1);
- 	flexcan_write(priv->reg_imask2_default, &regs->imask2);
+ 	priv->write(priv->reg_ctrl_default, &regs->ctrl);
+ 	priv->write(priv->reg_imask1_default, &regs->imask1);
+ 	priv->write(priv->reg_imask2_default, &regs->imask2);
  	enable_irq(dev->irq);
  
  	/* print chip status */
  	netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
- 		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+ 		   priv->read(&regs->mcr), priv->read(&regs->ctrl));
  
  	return 0;
  
@@@ -1068,10 -1086,10 +1086,10 @@@ static void flexcan_chip_stop(struct ne
  	flexcan_chip_disable(priv);
  
  	/* Disable all interrupts */
- 	flexcan_write(0, &regs->imask2);
- 	flexcan_write(0, &regs->imask1);
- 	flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
- 		      &regs->ctrl);
+ 	priv->write(0, &regs->imask2);
+ 	priv->write(0, &regs->imask1);
+ 	priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ 		    &regs->ctrl);
  
  	flexcan_transceiver_disable(priv);
  	priv->can.state = CAN_STATE_STOPPED;
@@@ -1186,26 -1204,26 +1204,26 @@@ static int register_flexcandev(struct n
  	err = flexcan_chip_disable(priv);
  	if (err)
  		goto out_disable_per;
- 	reg = flexcan_read(&regs->ctrl);
+ 	reg = priv->read(&regs->ctrl);
  	reg |= FLEXCAN_CTRL_CLK_SRC;
- 	flexcan_write(reg, &regs->ctrl);
+ 	priv->write(reg, &regs->ctrl);
  
  	err = flexcan_chip_enable(priv);
  	if (err)
  		goto out_chip_disable;
  
  	/* set freeze, halt and activate FIFO, restrict register access */
- 	reg = flexcan_read(&regs->mcr);
+ 	reg = priv->read(&regs->mcr);
  	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
  		FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
- 	flexcan_write(reg, &regs->mcr);
+ 	priv->write(reg, &regs->mcr);
  
  	/* Currently we only support newer versions of this core
  	 * featuring a RX hardware FIFO (although this driver doesn't
  	 * make use of it on some cores). Older cores, found on some
  	 * Coldfire derivates are not tested.
  	 */
- 	reg = flexcan_read(&regs->mcr);
+ 	reg = priv->read(&regs->mcr);
  	if (!(reg & FLEXCAN_MCR_FEN)) {
  		netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
  		err = -ENODEV;
@@@ -1233,8 -1251,12 +1251,12 @@@ static void unregister_flexcandev(struc
  static const struct of_device_id flexcan_of_match[] = {
  	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
  	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+ 	{ .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
+ 	{ .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
+ 	{ .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
  	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
  	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+ 	{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
  	{ /* sentinel */ },
  };
  MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@@ -1314,6 -1336,21 +1336,21 @@@ static int flexcan_probe(struct platfor
  	dev->flags |= IFF_ECHO;
  
  	priv = netdev_priv(dev);
+ 
+ 	if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
+ 		priv->read = flexcan_read_be;
+ 		priv->write = flexcan_write_be;
+ 	} else {
+ 		if (of_device_is_compatible(pdev->dev.of_node,
+ 					    "fsl,p1010-flexcan")) {
+ 			priv->read = flexcan_read_be;
+ 			priv->write = flexcan_write_be;
+ 		} else {
+ 			priv->read = flexcan_read_le;
+ 			priv->write = flexcan_write_le;
+ 		}
+ 	}
+ 
  	priv->can.clock.freq = clock_freq;
  	priv->can.bittiming_const = &flexcan_bittiming_const;
  	priv->can.do_set_mode = flexcan_set_mode;
diff --combined drivers/net/can/vxcan.c
index b4c4a2c76437,5d1753cfacea..ed6828821fbd
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@@ -194,7 -194,7 +194,7 @@@ static int vxcan_newlink(struct net *ne
  		tbp = peer_tb;
  	}
  
 -	if (tbp[IFLA_IFNAME]) {
 +	if (ifmp && tbp[IFLA_IFNAME]) {
  		nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
  		name_assign_type = NET_NAME_USER;
  	} else {
@@@ -227,10 -227,8 +227,8 @@@
  	netif_carrier_off(peer);
  
  	err = rtnl_configure_link(peer, ifmp);
- 	if (err < 0) {
- 		unregister_netdevice(peer);
- 		return err;
- 	}
+ 	if (err < 0)
+ 		goto unregister_network_device;
  
  	/* register first device */
  	if (tb[IFLA_IFNAME])
@@@ -239,10 -237,8 +237,8 @@@
  		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
  
  	err = register_netdevice(dev);
- 	if (err < 0) {
- 		unregister_netdevice(peer);
- 		return err;
- 	}
+ 	if (err < 0)
+ 		goto unregister_network_device;
  
  	netif_carrier_off(dev);
  
@@@ -254,6 -250,10 +250,10 @@@
  	rcu_assign_pointer(priv->peer, dev);
  
  	return 0;
+ 
+ unregister_network_device:
+ 	unregister_netdevice(peer);
+ 	return err;
  }
  
  static void vxcan_dellink(struct net_device *dev, struct list_head *head)
diff --combined drivers/net/dsa/b53/b53_common.c
index 4498ab897d94,561b05089cb6..db830a1141d9
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@@ -1029,8 -1029,7 +1029,7 @@@ int b53_vlan_filtering(struct dsa_switc
  EXPORT_SYMBOL(b53_vlan_filtering);
  
  int b53_vlan_prepare(struct dsa_switch *ds, int port,
- 		     const struct switchdev_obj_port_vlan *vlan,
- 		     struct switchdev_trans *trans)
+ 		     const struct switchdev_obj_port_vlan *vlan)
  {
  	struct b53_device *dev = ds->priv;
  
@@@ -1047,8 -1046,7 +1046,7 @@@
  EXPORT_SYMBOL(b53_vlan_prepare);
  
  void b53_vlan_add(struct dsa_switch *ds, int port,
- 		  const struct switchdev_obj_port_vlan *vlan,
- 		  struct switchdev_trans *trans)
+ 		  const struct switchdev_obj_port_vlan *vlan)
  {
  	struct b53_device *dev = ds->priv;
  	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@@ -1495,18 -1493,14 +1493,17 @@@ static bool b53_can_enable_brcm_tags(st
  	return false;
  }
  
- static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
- 						  int port)
+ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port)
  {
  	struct b53_device *dev = ds->priv;
  
 -	/* Older models support a different tag format that we do not
 -	 * support in net/dsa/tag_brcm.c yet.
 +	/* Older models (5325, 5365) support a different tag format that we do
 +	 * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
 +	 * mode to be turned on which means we need to specifically manage ARL
 +	 * misses on multicast addresses (TBD).
  	 */
 -	if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
 +	if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
 +	    !b53_can_enable_brcm_tags(ds, port))
  		return DSA_TAG_PROTO_NONE;
  
  	/* Broadcom BCM58xx chips have a flow accelerator on Port 8
@@@ -1517,6 -1511,7 +1514,7 @@@
  
  	return DSA_TAG_PROTO_BRCM;
  }
+ EXPORT_SYMBOL(b53_get_tag_protocol);
  
  int b53_mirror_add(struct dsa_switch *ds, int port,
  		   struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
diff --combined drivers/net/ethernet/amazon/ena/ena_netdev.c
index fbe21a817bd8,a6f283232cb7..6975150d144e
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@@ -75,9 -75,6 +75,9 @@@ static struct workqueue_struct *ena_wq
  MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
  
  static int ena_rss_init_default(struct ena_adapter *adapter);
 +static void check_for_admin_com_state(struct ena_adapter *adapter);
 +static void ena_destroy_device(struct ena_adapter *adapter);
 +static int ena_restore_device(struct ena_adapter *adapter);
  
  static void ena_tx_timeout(struct net_device *dev)
  {
@@@ -161,6 -158,8 +161,8 @@@ static void ena_init_io_rings_common(st
  	ring->per_napi_packets = 0;
  	ring->per_napi_bytes = 0;
  	ring->cpu = 0;
+ 	ring->first_interrupt = false;
+ 	ring->no_interrupt_event_cnt = 0;
  	u64_stats_init(&ring->syncp);
  }
  
@@@ -1277,6 -1276,9 +1279,9 @@@ static irqreturn_t ena_intr_msix_io(in
  {
  	struct ena_napi *ena_napi = data;
  
+ 	ena_napi->tx_ring->first_interrupt = true;
+ 	ena_napi->rx_ring->first_interrupt = true;
+ 
  	napi_schedule_irqoff(&ena_napi->napi);
  
  	return IRQ_HANDLED;
@@@ -1568,7 -1570,7 +1573,7 @@@ static int ena_rss_configure(struct ena
  
  static int ena_up_complete(struct ena_adapter *adapter)
  {
 -	int rc, i;
 +	int rc;
  
  	rc = ena_rss_configure(adapter);
  	if (rc)
@@@ -1587,6 -1589,17 +1592,6 @@@
  
  	ena_napi_enable_all(adapter);
  
 -	/* Enable completion queues interrupt */
 -	for (i = 0; i < adapter->num_queues; i++)
 -		ena_unmask_interrupt(&adapter->tx_ring[i],
 -				     &adapter->rx_ring[i]);
 -
 -	/* schedule napi in case we had pending packets
 -	 * from the last time we disable napi
 -	 */
 -	for (i = 0; i < adapter->num_queues; i++)
 -		napi_schedule(&adapter->ena_napi[i].napi);
 -
  	return 0;
  }
  
@@@ -1723,7 -1736,7 +1728,7 @@@ create_err
  
  static int ena_up(struct ena_adapter *adapter)
  {
 -	int rc;
 +	int rc, i;
  
  	netdev_dbg(adapter->netdev, "%s\n", __func__);
  
@@@ -1766,17 -1779,6 +1771,17 @@@
  
  	set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
  
 +	/* Enable completion queues interrupt */
 +	for (i = 0; i < adapter->num_queues; i++)
 +		ena_unmask_interrupt(&adapter->tx_ring[i],
 +				     &adapter->rx_ring[i]);
 +
 +	/* schedule napi in case we had pending packets
 +	 * from the last time we disable napi
 +	 */
 +	for (i = 0; i < adapter->num_queues; i++)
 +		napi_schedule(&adapter->ena_napi[i].napi);
 +
  	return rc;
  
  err_up:
@@@ -1887,17 -1889,6 +1892,17 @@@ static int ena_close(struct net_device 
  	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
  		ena_down(adapter);
  
 +	/* Check for device status and issue reset if needed*/
 +	check_for_admin_com_state(adapter);
 +	if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
 +		netif_err(adapter, ifdown, adapter->netdev,
 +			  "Destroy failure, restarting device\n");
 +		ena_dump_stats_to_dmesg(adapter);
 +		/* rtnl lock already obtained in dev_ioctl() layer */
 +		ena_destroy_device(adapter);
 +		ena_restore_device(adapter);
 +	}
 +
  	return 0;
  }
  
@@@ -2558,12 -2549,11 +2563,12 @@@ static void ena_destroy_device(struct e
  
  	ena_com_set_admin_running_state(ena_dev, false);
  
 -	ena_close(netdev);
 +	if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
 +		ena_down(adapter);
  
  	/* Before releasing the ENA resources, a device reset is required.
  	 * (to prevent the device from accessing them).
 -	 * In case the reset flag is set and the device is up, ena_close
 +	 * In case the reset flag is set and the device is up, ena_down()
  	 * already perform the reset, so it can be skipped.
  	 */
  	if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
@@@ -2663,8 -2653,32 +2668,32 @@@ static void ena_fw_reset_device(struct 
  	rtnl_unlock();
  }
  
- static int check_missing_comp_in_queue(struct ena_adapter *adapter,
- 				       struct ena_ring *tx_ring)
+ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
+ 					struct ena_ring *rx_ring)
+ {
+ 	if (likely(rx_ring->first_interrupt))
+ 		return 0;
+ 
+ 	if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
+ 		return 0;
+ 
+ 	rx_ring->no_interrupt_event_cnt++;
+ 
+ 	if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
+ 		netif_err(adapter, rx_err, adapter->netdev,
+ 			  "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
+ 			  rx_ring->qid);
+ 		adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+ 		smp_mb__before_atomic();
+ 		set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ 		return -EIO;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+ 					  struct ena_ring *tx_ring)
  {
  	struct ena_tx_buffer *tx_buf;
  	unsigned long last_jiffies;
@@@ -2674,8 -2688,27 +2703,27 @@@
  	for (i = 0; i < tx_ring->ring_size; i++) {
  		tx_buf = &tx_ring->tx_buffer_info[i];
  		last_jiffies = tx_buf->last_jiffies;
- 		if (unlikely(last_jiffies &&
- 			     time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
+ 
+ 		if (last_jiffies == 0)
+ 			/* no pending Tx at this location */
+ 			continue;
+ 
+ 		if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
+ 			     2 * adapter->missing_tx_completion_to))) {
+ 			/* If after graceful period interrupt is still not
+ 			 * received, we schedule a reset
+ 			 */
+ 			netif_err(adapter, tx_err, adapter->netdev,
+ 				  "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
+ 				  tx_ring->qid);
+ 			adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+ 			smp_mb__before_atomic();
+ 			set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ 			return -EIO;
+ 		}
+ 
+ 		if (unlikely(time_is_before_jiffies(last_jiffies +
+ 				adapter->missing_tx_completion_to))) {
  			if (!tx_buf->print_once)
  				netif_notice(adapter, tx_err, adapter->netdev,
  					     "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
@@@ -2704,9 -2737,10 +2752,10 @@@
  	return rc;
  }
  
- static void check_for_missing_tx_completions(struct ena_adapter *adapter)
+ static void check_for_missing_completions(struct ena_adapter *adapter)
  {
  	struct ena_ring *tx_ring;
+ 	struct ena_ring *rx_ring;
  	int i, budget, rc;
  
  	/* Make sure the driver doesn't turn the device in other process */
@@@ -2725,8 -2759,13 +2774,13 @@@
  
  	for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
  		tx_ring = &adapter->tx_ring[i];
+ 		rx_ring = &adapter->rx_ring[i];
+ 
+ 		rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
+ 		if (unlikely(rc))
+ 			return;
  
- 		rc = check_missing_comp_in_queue(adapter, tx_ring);
+ 		rc = check_for_rx_interrupt_queue(adapter, rx_ring);
  		if (unlikely(rc))
  			return;
  
@@@ -2885,7 -2924,7 +2939,7 @@@ static void ena_timer_service(struct ti
  
  	check_for_admin_com_state(adapter);
  
- 	check_for_missing_tx_completions(adapter);
+ 	check_for_missing_completions(adapter);
  
  	check_for_empty_rx_ring(adapter);
  
diff --combined drivers/net/ethernet/broadcom/bcmsysport.c
index 9d7a834c5f62,f15a8fc6dfc9..c2969b260aed
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@@ -1156,7 -1156,7 +1156,7 @@@ static struct sk_buff *bcm_sysport_inse
  	memset(tsb, 0, sizeof(*tsb));
  
  	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 -		ip_ver = htons(skb->protocol);
 +		ip_ver = ntohs(skb->protocol);
  		switch (ip_ver) {
  		case ETH_P_IP:
  			ip_proto = ip_hdr(skb)->protocol;
@@@ -1216,18 -1216,6 +1216,6 @@@ static netdev_tx_t bcm_sysport_xmit(str
  		goto out;
  	}
  
- 	/* The Ethernet switch we are interfaced with needs packets to be at
- 	 * least 64 bytes (including FCS) otherwise they will be discarded when
- 	 * they enter the switch port logic. When Broadcom tags are enabled, we
- 	 * need to make sure that packets are at least 68 bytes
- 	 * (including FCS and tag) because the length verification is done after
- 	 * the Broadcom tag is stripped off the ingress packet.
- 	 */
- 	if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
- 		ret = NETDEV_TX_OK;
- 		goto out;
- 	}
- 
  	/* Insert TSB and checksum infos */
  	if (priv->tsb_en) {
  		skb = bcm_sysport_insert_tsb(skb, dev);
diff --combined drivers/net/ethernet/broadcom/tg3.c
index 86ff8b49ee57,a77ee2f8fb8d..2bd77d9990f2
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@@ -3227,7 -3227,7 +3227,7 @@@ static int tg3_nvram_read_using_eeprom(
  	return 0;
  }
  
- #define NVRAM_CMD_TIMEOUT 5000
+ #define NVRAM_CMD_TIMEOUT 10000
  
  static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
  {
@@@ -3744,7 -3744,7 +3744,7 @@@ static int tg3_load_firmware_cpu(struc
  	}
  
  	do {
 -		u32 *fw_data = (u32 *)(fw_hdr + 1);
 +		__be32 *fw_data = (__be32 *)(fw_hdr + 1);
  		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
  			write_op(tp, cpu_scratch_base +
  				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
@@@ -14789,7 -14789,7 +14789,7 @@@ static void tg3_get_5717_nvram_info(str
  
  static void tg3_get_5720_nvram_info(struct tg3 *tp)
  {
- 	u32 nvcfg1, nvmpinstrp;
+ 	u32 nvcfg1, nvmpinstrp, nv_status;
  
  	nvcfg1 = tr32(NVRAM_CFG1);
  	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
@@@ -14801,6 -14801,23 +14801,23 @@@
  		}
  
  		switch (nvmpinstrp) {
+ 		case FLASH_5762_MX25L_100:
+ 		case FLASH_5762_MX25L_200:
+ 		case FLASH_5762_MX25L_400:
+ 		case FLASH_5762_MX25L_800:
+ 		case FLASH_5762_MX25L_160_320:
+ 			tp->nvram_pagesize = 4096;
+ 			tp->nvram_jedecnum = JEDEC_MACRONIX;
+ 			tg3_flag_set(tp, NVRAM_BUFFERED);
+ 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+ 			tg3_flag_set(tp, FLASH);
+ 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
+ 			tp->nvram_size =
+ 				(1 << (nv_status >> AUTOSENSE_DEVID &
+ 						AUTOSENSE_DEVID_MASK)
+ 					<< AUTOSENSE_SIZE_IN_MB);
+ 			return;
+ 
  		case FLASH_5762_EEPROM_HD:
  			nvmpinstrp = FLASH_5720_EEPROM_HD;
  			break;
diff --combined drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d8424ed16c33,69d0b64e6986..1ff71825868c
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@@ -77,7 -77,8 +77,8 @@@ enum 
  	MEM_EDC1,
  	MEM_MC,
  	MEM_MC0 = MEM_MC,
- 	MEM_MC1
+ 	MEM_MC1,
+ 	MEM_HMA,
  };
  
  enum {
@@@ -344,6 -345,7 +345,6 @@@ struct adapter_params 
  
  	unsigned int sf_size;             /* serial flash size in bytes */
  	unsigned int sf_nsec;             /* # of flash sectors */
 -	unsigned int sf_fw_start;         /* start of FW image in flash */
  
  	unsigned int fw_vers;		  /* firmware version */
  	unsigned int bs_vers;		  /* bootstrap version */
@@@ -1422,6 -1424,21 +1423,21 @@@ static inline void init_rspq(struct ada
  	q->size = size;
  }
  
+ /**
+  *     t4_is_inserted_mod_type - is a plugged in Firmware Module Type
+  *     @fw_mod_type: the Firmware Mofule Type
+  *
+  *     Return whether the Firmware Module Type represents a real Transceiver
+  *     Module/Cable Module Type which has been inserted.
+  */
+ static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
+ {
+ 	return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ 		fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
+ 		fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
+ 		fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+ }
+ 
  void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
  		       unsigned int data_reg, const u32 *vals,
  		       unsigned int nregs, unsigned int start_idx);
@@@ -1511,6 -1528,7 +1527,7 @@@ int t4_init_portinfo(struct port_info *
  		     int port, int pf, int vf, u8 mac[]);
  int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
  void t4_fatal_err(struct adapter *adapter);
+ unsigned int t4_chip_rss_size(struct adapter *adapter);
  int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
  			int start, int n, const u16 *rspq, unsigned int nrspq);
  int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@@ -1652,7 -1670,7 +1669,7 @@@ int t4_ctrl_eq_free(struct adapter *ada
  		    unsigned int vf, unsigned int eqid);
  int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  		    unsigned int vf, unsigned int eqid);
- int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
  void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
  int t4_update_port_info(struct port_info *pi);
  int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
@@@ -1695,6 -1713,9 +1712,9 @@@ void t4_uld_mem_free(struct adapter *ad
  int t4_uld_mem_alloc(struct adapter *adap);
  void t4_uld_clean_up(struct adapter *adap);
  void t4_register_netevent_notifier(void);
+ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ 	      unsigned int devid, unsigned int offset,
+ 	      unsigned int len, u8 *buf);
  void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
  void free_tx_desc(struct adapter *adap, struct sge_txq *q,
  		  unsigned int n, bool unmap);
diff --combined drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 375ef86a84da,f2a60e01d563..0e9f64a46ac5
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@@ -524,11 -524,14 +524,14 @@@ int t4_memory_rw(struct adapter *adap, 
  	 * MEM_EDC1 = 1
  	 * MEM_MC   = 2 -- MEM_MC for chips with only 1 memory controller
  	 * MEM_MC1  = 3 -- for chips with 2 memory controllers (e.g. T5)
+ 	 * MEM_HMA  = 4
  	 */
  	edc_size  = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
- 	if (mtype != MEM_MC1)
+ 	if (mtype == MEM_HMA) {
+ 		memoffset = 2 * (edc_size * 1024 * 1024);
+ 	} else if (mtype != MEM_MC1) {
  		memoffset = (mtype * (edc_size * 1024 * 1024));
- 	else {
+ 	} else {
  		mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
  						      MA_EXT_MEMORY0_BAR_A));
  		memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
@@@ -2844,6 -2847,8 +2847,6 @@@ enum 
  	SF_RD_DATA_FAST = 0xb,        /* read flash */
  	SF_RD_ID        = 0x9f,       /* read ID */
  	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
 -
 -	FW_MAX_SIZE = 16 * SF_SEC_SIZE,
  };
  
  /**
@@@ -3556,9 -3561,8 +3559,9 @@@ int t4_load_fw(struct adapter *adap, co
  	const __be32 *p = (const __be32 *)fw_data;
  	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
  	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
 -	unsigned int fw_img_start = adap->params.sf_fw_start;
 -	unsigned int fw_start_sec = fw_img_start / sf_sec_size;
 +	unsigned int fw_start_sec = FLASH_FW_START_SEC;
 +	unsigned int fw_size = FLASH_FW_MAX_SIZE;
 +	unsigned int fw_start = FLASH_FW_START;
  
  	if (!size) {
  		dev_err(adap->pdev_dev, "FW image has no data\n");
@@@ -3574,9 -3578,9 +3577,9 @@@
  			"FW image size differs from size in FW header\n");
  		return -EINVAL;
  	}
 -	if (size > FW_MAX_SIZE) {
 +	if (size > fw_size) {
  		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
 -			FW_MAX_SIZE);
 +			fw_size);
  		return -EFBIG;
  	}
  	if (!t4_fw_matches_chip(adap, hdr))
@@@ -3603,11 -3607,11 +3606,11 @@@
  	 */
  	memcpy(first_page, fw_data, SF_PAGE_SIZE);
  	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
 -	ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
 +	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
  	if (ret)
  		goto out;
  
 -	addr = fw_img_start;
 +	addr = fw_start;
  	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
  		addr += SF_PAGE_SIZE;
  		fw_data += SF_PAGE_SIZE;
@@@ -3617,7 -3621,7 +3620,7 @@@
  	}
  
  	ret = t4_write_flash(adap,
 -			     fw_img_start + offsetof(struct fw_hdr, fw_ver),
 +			     fw_start + offsetof(struct fw_hdr, fw_ver),
  			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
  out:
  	if (ret)
@@@ -4923,6 -4927,14 +4926,14 @@@ void t4_intr_disable(struct adapter *ad
  	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
  }
  
+ unsigned int t4_chip_rss_size(struct adapter *adap)
+ {
+ 	if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ 		return RSS_NENTRIES;
+ 	else
+ 		return T6_RSS_NENTRIES;
+ }
+ 
  /**
   *	t4_config_rss_range - configure a portion of the RSS mapping table
   *	@adapter: the adapter
@@@ -5061,10 -5073,11 +5072,11 @@@ static int rd_rss_row(struct adapter *a
   */
  int t4_read_rss(struct adapter *adapter, u16 *map)
  {
+ 	int i, ret, nentries;
  	u32 val;
- 	int i, ret;
  
- 	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+ 	nentries = t4_chip_rss_size(adapter);
+ 	for (i = 0; i < nentries / 2; ++i) {
  		ret = rd_rss_row(adapter, i, &val);
  		if (ret)
  			return ret;
@@@ -6071,6 -6084,7 +6083,7 @@@ const char *t4_get_port_type_descriptio
  		"CR2_QSFP",
  		"SFP28",
  		"KR_SFP28",
+ 		"KR_XLAUI"
  	};
  
  	if (port_type < ARRAY_SIZE(port_type_description))
@@@ -6526,18 -6540,21 +6539,21 @@@ void t4_sge_decode_idma_state(struct ad
   *      t4_sge_ctxt_flush - flush the SGE context cache
   *      @adap: the adapter
   *      @mbox: mailbox to use for the FW command
+  *      @ctx_type: Egress or Ingress
   *
   *      Issues a FW command through the given mailbox to flush the
   *      SGE context cache.
   */
- int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
  {
  	int ret;
  	u32 ldst_addrspace;
  	struct fw_ldst_cmd c;
  
  	memset(&c, 0, sizeof(c));
- 	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ 	ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
+ 						 FW_LDST_ADDRSPC_SGE_EGRC :
+ 						 FW_LDST_ADDRSPC_SGE_INGC);
  	c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  					FW_CMD_REQUEST_F | FW_CMD_READ_F |
  					ldst_addrspace);
@@@ -8491,22 -8508,6 +8507,6 @@@ found
  	return 0;
  }
  
- static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
- {
- 	u16 val;
- 	u32 pcie_cap;
- 
- 	pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- 	if (pcie_cap) {
- 		pci_read_config_word(adapter->pdev,
- 				     pcie_cap + PCI_EXP_DEVCTL2, &val);
- 		val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
- 		val |= range;
- 		pci_write_config_word(adapter->pdev,
- 				      pcie_cap + PCI_EXP_DEVCTL2, val);
- 	}
- }
- 
  /**
   *	t4_prep_adapter - prepare SW and HW for operation
   *	@adapter: the adapter
@@@ -8592,8 -8593,9 +8592,9 @@@ int t4_prep_adapter(struct adapter *ada
  	adapter->params.portvec = 1;
  	adapter->params.vpd.cclk = 50000;
  
- 	/* Set pci completion timeout value to 4 seconds. */
- 	set_pcie_completion_timeout(adapter, 0xd);
+ 	/* Set PCIe completion timeout to 4 seconds. */
+ 	pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
+ 					   PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
  	return 0;
  }
  
@@@ -9736,3 -9738,59 +9737,59 @@@ int t4_sched_params(struct adapter *ada
  	return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
  			       NULL, 1);
  }
+ 
+ /**
+  *	t4_i2c_rd - read I2C data from adapter
+  *	@adap: the adapter
+  *	@port: Port number if per-port device; <0 if not
+  *	@devid: per-port device ID or absolute device ID
+  *	@offset: byte offset into device I2C space
+  *	@len: byte length of I2C space data
+  *	@buf: buffer in which to return I2C data
+  *
+  *	Reads the I2C data from the indicated device and location.
+  */
+ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ 	      unsigned int devid, unsigned int offset,
+ 	      unsigned int len, u8 *buf)
+ {
+ 	struct fw_ldst_cmd ldst_cmd, ldst_rpl;
+ 	unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
+ 	int ret = 0;
+ 
+ 	if (len > I2C_PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	/* Dont allow reads that spans multiple pages */
+ 	if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
+ 		return -EINVAL;
+ 
+ 	memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ 	ldst_cmd.op_to_addrspace =
+ 		cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ 			    FW_CMD_REQUEST_F |
+ 			    FW_CMD_READ_F |
+ 			    FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
+ 	ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+ 	ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
+ 	ldst_cmd.u.i2c.did = devid;
+ 
+ 	while (len > 0) {
+ 		unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
+ 
+ 		ldst_cmd.u.i2c.boffset = offset;
+ 		ldst_cmd.u.i2c.blen = i2c_len;
+ 
+ 		ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
+ 				 &ldst_rpl);
+ 		if (ret)
+ 			break;
+ 
+ 		memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
+ 		offset += i2c_len;
+ 		buf += i2c_len;
+ 		len -= i2c_len;
+ 	}
+ 
+ 	return ret;
+ }
diff --combined drivers/net/ethernet/freescale/fec_main.c
index a74300a4459c,e17d10b8b041..90aa69a08922
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@@ -1868,6 -1868,8 +1868,8 @@@ static int fec_enet_clk_enable(struct n
  		ret = clk_prepare_enable(fep->clk_ref);
  		if (ret)
  			goto failed_clk_ref;
+ 
+ 		phy_reset_after_clk_enable(ndev->phydev);
  	} else {
  		clk_disable_unprepare(fep->clk_ahb);
  		clk_disable_unprepare(fep->clk_enet_out);
@@@ -2840,6 -2842,7 +2842,7 @@@ fec_enet_open(struct net_device *ndev
  {
  	struct fec_enet_private *fep = netdev_priv(ndev);
  	int ret;
+ 	bool reset_again;
  
  	ret = pm_runtime_get_sync(&fep->pdev->dev);
  	if (ret < 0)
@@@ -2850,6 -2853,17 +2853,17 @@@
  	if (ret)
  		goto clk_enable;
  
+ 	/* During the first fec_enet_open call the PHY isn't probed at this
+ 	 * point. Therefore the phy_reset_after_clk_enable() call within
+ 	 * fec_enet_clk_enable() fails. As we need this reset in order to be
+ 	 * sure the PHY is working correctly we check if we need to reset again
+ 	 * later when the PHY is probed
+ 	 */
+ 	if (ndev->phydev && ndev->phydev->drv)
+ 		reset_again = false;
+ 	else
+ 		reset_again = true;
+ 
  	/* I should reset the ring buffers here, but I don't yet know
  	 * a simple way to do that.
  	 */
@@@ -2866,6 -2880,12 +2880,12 @@@
  	if (ret)
  		goto err_enet_mii_probe;
  
+ 	/* Call phy_reset_after_clk_enable() again if it failed during
+ 	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+ 	 */
+ 	if (reset_again)
+ 		phy_reset_after_clk_enable(ndev->phydev);
+ 
  	if (fep->quirks & FEC_QUIRK_ERR006687)
  		imx6q_cpuidle_fec_irqs_used();
  
@@@ -3469,10 -3489,6 +3489,10 @@@ fec_probe(struct platform_device *pdev
  			goto failed_regulator;
  		}
  	} else {
 +		if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
 +			ret = -EPROBE_DEFER;
 +			goto failed_regulator;
 +		}
  		fep->reg_phy = NULL;
  	}
  
@@@ -3556,9 -3572,8 +3576,9 @@@ failed_clk_ipg
  failed_clk:
  	if (of_phy_is_fixed_link(np))
  		of_phy_deregister_fixed_link(np);
 -failed_phy:
  	of_node_put(phy_node);
 +failed_phy:
 +	dev_id--;
  failed_ioremap:
  	free_netdev(ndev);
  
diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3837ca7a705,d373df7b11bd..aa96cc6a0fa6
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@@ -1571,14 -1571,11 +1571,11 @@@ mlxsw_sp_port_add_cls_matchall_mirror(s
  				      const struct tc_action *a,
  				      bool ingress)
  {
- 	struct net *net = dev_net(mlxsw_sp_port->dev);
  	enum mlxsw_sp_span_type span_type;
  	struct mlxsw_sp_port *to_port;
  	struct net_device *to_dev;
- 	int ifindex;
  
- 	ifindex = tcf_mirred_ifindex(a);
- 	to_dev = __dev_get_by_index(net, ifindex);
+ 	to_dev = tcf_mirred_dev(a);
  	if (!to_dev) {
  		netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
  		return -EINVAL;
@@@ -1838,6 -1835,54 +1835,54 @@@ static int mlxsw_sp_setup_tc(struct net
  	}
  }
  
+ 
+ static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
+ {
+ 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ 
+ 	if (!enable && (mlxsw_sp_port->acl_rule_count ||
+ 			!list_empty(&mlxsw_sp_port->mall_tc_list))) {
+ 		netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+ 		return -EINVAL;
+ 	}
+ 	return 0;
+ }
+ 
+ typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
+ 
+ static int mlxsw_sp_handle_feature(struct net_device *dev,
+ 				   netdev_features_t wanted_features,
+ 				   netdev_features_t feature,
+ 				   mlxsw_sp_feature_handler feature_handler)
+ {
+ 	netdev_features_t changes = wanted_features ^ dev->features;
+ 	bool enable = !!(wanted_features & feature);
+ 	int err;
+ 
+ 	if (!(changes & feature))
+ 		return 0;
+ 
+ 	err = feature_handler(dev, enable);
+ 	if (err) {
+ 		netdev_err(dev, "%s feature %pNF failed, err %d\n",
+ 			   enable ? "Enable" : "Disable", &feature, err);
+ 		return err;
+ 	}
+ 
+ 	if (enable)
+ 		dev->features |= feature;
+ 	else
+ 		dev->features &= ~feature;
+ 
+ 	return 0;
+ }
+ static int mlxsw_sp_set_features(struct net_device *dev,
+ 				 netdev_features_t features)
+ {
+ 	return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+ 				       mlxsw_sp_feature_hw_tc);
+ }
+ 
  static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
  	.ndo_open		= mlxsw_sp_port_open,
  	.ndo_stop		= mlxsw_sp_port_stop,
@@@ -1852,6 -1897,7 +1897,7 @@@
  	.ndo_vlan_rx_add_vid	= mlxsw_sp_port_add_vid,
  	.ndo_vlan_rx_kill_vid	= mlxsw_sp_port_kill_vid,
  	.ndo_get_phys_port_name	= mlxsw_sp_port_get_phys_port_name,
+ 	.ndo_set_features	= mlxsw_sp_set_features,
  };
  
  static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@@ -4376,10 -4422,7 +4422,10 @@@ static int mlxsw_sp_netdevice_port_uppe
  		}
  		if (!info->linking)
  			break;
 -		if (netdev_has_any_upper_dev(upper_dev)) {
 +		if (netdev_has_any_upper_dev(upper_dev) &&
 +		    (!netif_is_bridge_master(upper_dev) ||
 +		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
 +							  upper_dev))) {
  			NL_SET_ERR_MSG(extack,
  				       "spectrum: Enslaving a port to a device that already has an upper device is not supported");
  			return -EINVAL;
@@@ -4507,7 -4550,6 +4553,7 @@@ static int mlxsw_sp_netdevice_port_vlan
  					      u16 vid)
  {
  	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 +	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
  	struct netdev_notifier_changeupper_info *info = ptr;
  	struct netlink_ext_ack *extack;
  	struct net_device *upper_dev;
@@@ -4524,10 -4566,7 +4570,10 @@@
  		}
  		if (!info->linking)
  			break;
 -		if (netdev_has_any_upper_dev(upper_dev)) {
 +		if (netdev_has_any_upper_dev(upper_dev) &&
 +		    (!netif_is_bridge_master(upper_dev) ||
 +		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
 +							  upper_dev))) {
  			NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
  			return -EINVAL;
  		}
diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 05ce1befd9b3,a0adcd886589..346b8b688b6f
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@@ -270,6 -270,7 +270,7 @@@ struct mlxsw_sp_port 
  	struct mlxsw_sp_port_sample *sample;
  	struct list_head vlans_list;
  	struct mlxsw_sp_qdisc root_qdisc;
+ 	unsigned acl_rule_count;
  };
  
  static inline bool
@@@ -365,8 -366,6 +366,8 @@@ int mlxsw_sp_port_bridge_join(struct ml
  void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
  				struct net_device *brport_dev,
  				struct net_device *br_dev);
 +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
 +					 const struct net_device *br_dev);
  
  /* spectrum.c */
  int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
diff --combined drivers/net/ethernet/renesas/sh_eth.c
index f21c1db91c3f,d47bbbb22e7c..ccace13c6481
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@@ -3225,37 -3225,18 +3225,37 @@@ static int sh_eth_drv_probe(struct plat
  	/* ioremap the TSU registers */
  	if (mdp->cd->tsu) {
  		struct resource *rtsu;
 +
  		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 -		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
 -		if (IS_ERR(mdp->tsu_addr)) {
 -			ret = PTR_ERR(mdp->tsu_addr);
 +		if (!rtsu) {
 +			dev_err(&pdev->dev, "no TSU resource\n");
 +			ret = -ENODEV;
 +			goto out_release;
 +		}
 +		/* We can only request the  TSU region  for the first port
 +		 * of the two  sharing this TSU for the probe to succeed...
 +		 */
 +		if (devno % 2 == 0 &&
 +		    !devm_request_mem_region(&pdev->dev, rtsu->start,
 +					     resource_size(rtsu),
 +					     dev_name(&pdev->dev))) {
 +			dev_err(&pdev->dev, "can't request TSU resource.\n");
 +			ret = -EBUSY;
 +			goto out_release;
 +		}
 +		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
 +					     resource_size(rtsu));
 +		if (!mdp->tsu_addr) {
 +			dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
 +			ret = -ENOMEM;
  			goto out_release;
  		}
  		mdp->port = devno % 2;
  		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
  	}
  
 -	/* initialize first or needed device */
 -	if (!devno || pd->needs_init) {
 +	/* Need to init only the first port of the two sharing a TSU */
 +	if (devno % 2 == 0) {
  		if (mdp->cd->chip_reset)
  			mdp->cd->chip_reset(ndev);
  
@@@ -3301,8 -3282,7 +3301,7 @@@ out_napi_del
  
  out_release:
  	/* net_dev free */
- 	if (ndev)
- 		free_netdev(ndev);
+ 	free_netdev(ndev);
  
  	pm_runtime_put(&pdev->dev);
  	pm_runtime_disable(&pdev->dev);
diff --combined drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 5bbcaf8298f6,2fd8456999f6..b419229d7457
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@@ -241,13 -241,12 +241,13 @@@ static int dwmac4_rx_check_timestamp(vo
  	u32 own, ctxt;
  	int ret = 1;
  
 -	own = p->des3 & RDES3_OWN;
 -	ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
 +	own = le32_to_cpu(p->des3) & RDES3_OWN;
 +	ctxt = ((le32_to_cpu(p->des3) & RDES3_CONTEXT_DESCRIPTOR)
  		>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
  
  	if (likely(!own && ctxt)) {
 -		if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
 +		if ((p->des0 == cpu_to_le32(0xffffffff)) &&
 +		    (p->des1 == cpu_to_le32(0xffffffff)))
  			/* Corrupted value */
  			ret = -EINVAL;
  		else
@@@ -266,7 -265,7 +266,7 @@@ static int dwmac4_wrback_get_rx_timesta
  	int ret = -EINVAL;
  
  	/* Get the status from normal w/b descriptor */
 -	if (likely(p->des3 & TDES3_RS1V)) {
 +	if (likely(p->des3 & cpu_to_le32(TDES3_RS1V))) {
  		if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
  			int i = 0;
  
@@@ -407,7 -406,7 +407,7 @@@ static void dwmac4_display_ring(void *h
  	pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
  
  	for (i = 0; i < size; i++) {
- 		pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ 		pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
  			i, (unsigned int)virt_to_phys(p),
  			le32_to_cpu(p->des0), le32_to_cpu(p->des1),
  			le32_to_cpu(p->des2), le32_to_cpu(p->des3));
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 323464576fc0,d9c98fd810bb..cf0e16d1a068
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@@ -364,15 -364,9 +364,15 @@@ static void stmmac_eee_ctrl_timer(struc
  bool stmmac_eee_init(struct stmmac_priv *priv)
  {
  	struct net_device *ndev = priv->dev;
 +	int interface = priv->plat->interface;
  	unsigned long flags;
  	bool ret = false;
  
 +	if ((interface != PHY_INTERFACE_MODE_MII) &&
 +	    (interface != PHY_INTERFACE_MODE_GMII) &&
 +	    !phy_interface_mode_is_rgmii(interface))
 +		goto out;
 +
  	/* Using PCS we cannot dial with the phy registers at this stage
  	 * so we do not support extra feature like EEE.
  	 */
@@@ -2003,22 -1997,60 +2003,60 @@@ static void stmmac_set_dma_operation_mo
  static void stmmac_dma_interrupt(struct stmmac_priv *priv)
  {
  	u32 tx_channel_count = priv->plat->tx_queues_to_use;
- 	int status;
+ 	u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ 	u32 channels_to_check = tx_channel_count > rx_channel_count ?
+ 				tx_channel_count : rx_channel_count;
  	u32 chan;
+ 	bool poll_scheduled = false;
+ 	int status[channels_to_check];
+ 
+ 	/* Each DMA channel can be used for rx and tx simultaneously, yet
+ 	 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
+ 	 * stmmac_channel struct.
+ 	 * Because of this, stmmac_poll currently checks (and possibly wakes)
+ 	 * all tx queues rather than just a single tx queue.
+ 	 */
+ 	for (chan = 0; chan < channels_to_check; chan++)
+ 		status[chan] = priv->hw->dma->dma_interrupt(priv->ioaddr,
+ 							    &priv->xstats,
+ 							    chan);
  
- 	for (chan = 0; chan < tx_channel_count; chan++) {
- 		struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ 	for (chan = 0; chan < rx_channel_count; chan++) {
+ 		if (likely(status[chan] & handle_rx)) {
+ 			struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
  
- 		status = priv->hw->dma->dma_interrupt(priv->ioaddr,
- 						      &priv->xstats, chan);
- 		if (likely((status & handle_rx)) || (status & handle_tx)) {
  			if (likely(napi_schedule_prep(&rx_q->napi))) {
  				stmmac_disable_dma_irq(priv, chan);
  				__napi_schedule(&rx_q->napi);
+ 				poll_scheduled = true;
  			}
  		}
+ 	}
  
- 		if (unlikely(status & tx_hard_error_bump_tc)) {
+ 	/* If we scheduled poll, we already know that tx queues will be checked.
+ 	 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
+ 	 * completed transmission, if so, call stmmac_poll (once).
+ 	 */
+ 	if (!poll_scheduled) {
+ 		for (chan = 0; chan < tx_channel_count; chan++) {
+ 			if (status[chan] & handle_tx) {
+ 				/* It doesn't matter what rx queue we choose
+ 				 * here. We use 0 since it always exists.
+ 				 */
+ 				struct stmmac_rx_queue *rx_q =
+ 					&priv->rx_queue[0];
+ 
+ 				if (likely(napi_schedule_prep(&rx_q->napi))) {
+ 					stmmac_disable_dma_irq(priv, chan);
+ 					__napi_schedule(&rx_q->napi);
+ 				}
+ 				break;
+ 			}
+ 		}
+ 	}
+ 
+ 	for (chan = 0; chan < tx_channel_count; chan++) {
+ 		if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
  			/* Try to bump up the dma threshold on this failure */
  			if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
  			    (tc <= 256)) {
@@@ -2035,7 -2067,7 +2073,7 @@@
  								    chan);
  				priv->xstats.threshold = tc;
  			}
- 		} else if (unlikely(status == tx_hard_error)) {
+ 		} else if (unlikely(status[chan] == tx_hard_error)) {
  			stmmac_tx_err(priv, chan);
  		}
  	}
@@@ -2539,7 -2571,7 +2577,7 @@@ static int stmmac_hw_setup(struct net_d
  	}
  
  	if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
 -		priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
 +		priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1, priv->hw->ps, 0);
  
  	/* set TX and RX rings length */
  	stmmac_set_rings_length(priv);
@@@ -3404,9 -3436,8 +3442,8 @@@ static int stmmac_rx(struct stmmac_pri
  			if (netif_msg_rx_status(priv)) {
  				netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
  					   p, entry, des);
- 				if (frame_len > ETH_FRAME_LEN)
- 					netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
- 						   frame_len, status);
+ 				netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
+ 					   frame_len, status);
  			}
  
  			/* The zero-copy is always used for all the sizes
diff --combined drivers/net/geneve.c
index 0a48b3073d3d,667c44f68dbc..195e0d0add8d
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@@ -825,13 -825,6 +825,13 @@@ static int geneve_xmit_skb(struct sk_bu
  	if (IS_ERR(rt))
  		return PTR_ERR(rt);
  
 +	if (skb_dst(skb)) {
 +		int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
 +			  GENEVE_BASE_HLEN - info->options_len - 14;
 +
 +		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 +	}
 +
  	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
  	if (geneve->collect_md) {
  		tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@@ -871,13 -864,6 +871,13 @@@ static int geneve6_xmit_skb(struct sk_b
  	if (IS_ERR(dst))
  		return PTR_ERR(dst);
  
 +	if (skb_dst(skb)) {
 +		int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
 +			  GENEVE_BASE_HLEN - info->options_len - 14;
 +
 +		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
 +	}
 +
  	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
  	if (geneve->collect_md) {
  		prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
@@@ -1652,19 -1638,16 +1652,16 @@@ static __net_init int geneve_init_net(s
  	return 0;
  }
  
- static void __net_exit geneve_exit_net(struct net *net)
+ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
  {
  	struct geneve_net *gn = net_generic(net, geneve_net_id);
  	struct geneve_dev *geneve, *next;
  	struct net_device *dev, *aux;
- 	LIST_HEAD(list);
- 
- 	rtnl_lock();
  
  	/* gather any geneve devices that were moved into this ns */
  	for_each_netdev_safe(net, dev, aux)
  		if (dev->rtnl_link_ops == &geneve_link_ops)
- 			unregister_netdevice_queue(dev, &list);
+ 			unregister_netdevice_queue(dev, head);
  
  	/* now gather any other geneve devices that were created in this ns */
  	list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
@@@ -1672,18 -1655,29 +1669,29 @@@
  		 * to the list by the previous loop.
  		 */
  		if (!net_eq(dev_net(geneve->dev), net))
- 			unregister_netdevice_queue(geneve->dev, &list);
+ 			unregister_netdevice_queue(geneve->dev, head);
  	}
  
+ 	WARN_ON_ONCE(!list_empty(&gn->sock_list));
+ }
+ 
+ static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
+ {
+ 	struct net *net;
+ 	LIST_HEAD(list);
+ 
+ 	rtnl_lock();
+ 	list_for_each_entry(net, net_list, exit_list)
+ 		geneve_destroy_tunnels(net, &list);
+ 
  	/* unregister the devices gathered above */
  	unregister_netdevice_many(&list);
  	rtnl_unlock();
- 	WARN_ON_ONCE(!list_empty(&gn->sock_list));
  }
  
  static struct pernet_operations geneve_net_ops = {
  	.init = geneve_init_net,
- 	.exit = geneve_exit_net,
+ 	.exit_batch = geneve_exit_batch_net,
  	.id   = &geneve_net_id,
  	.size = sizeof(struct geneve_net),
  };
diff --combined drivers/net/phy/phylink.c
index 150cd95a6e1e,d1f9466f2fbf..4e8c459bf062
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@@ -36,7 -36,11 +36,11 @@@ enum 
  	PHYLINK_DISABLE_LINK,
  };
  
+ /**
+  * struct phylink - internal data type for phylink
+  */
  struct phylink {
+ 	/* private: */
  	struct net_device *netdev;
  	const struct phylink_mac_ops *ops;
  
@@@ -50,6 -54,8 +54,8 @@@
  	/* The link configuration settings */
  	struct phylink_link_state link_config;
  	struct gpio_desc *link_gpio;
+ 	void (*get_fixed_state)(struct net_device *dev,
+ 				struct phylink_link_state *s);
  
  	struct mutex state_mutex;
  	struct phylink_link_state phy_state;
@@@ -87,6 -93,13 +93,13 @@@ static inline bool linkmode_empty(cons
  	return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS);
  }
  
+ /**
+  * phylink_set_port_modes() - set the port type modes in the ethtool mask
+  * @mask: ethtool link mode mask
+  *
+  * Sets all the port type modes in the ethtool mask.  MAC drivers should
+  * use this in their 'validate' callback.
+  */
  void phylink_set_port_modes(unsigned long *mask)
  {
  	phylink_set(mask, TP);
@@@ -117,8 -130,7 +130,7 @@@ static const char *phylink_an_mode_str(
  	static const char *modestr[] = {
  		[MLO_AN_PHY] = "phy",
  		[MLO_AN_FIXED] = "fixed",
- 		[MLO_AN_SGMII] = "SGMII",
- 		[MLO_AN_8023Z] = "802.3z",
+ 		[MLO_AN_INBAND] = "inband",
  	};
  
  	return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
@@@ -132,59 -144,64 +144,64 @@@ static int phylink_validate(struct phyl
  	return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
  }
  
- static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
+ static int phylink_parse_fixedlink(struct phylink *pl,
+ 				   struct fwnode_handle *fwnode)
  {
- 	struct device_node *fixed_node;
+ 	struct fwnode_handle *fixed_node;
  	const struct phy_setting *s;
  	struct gpio_desc *desc;
- 	const __be32 *fixed_prop;
  	u32 speed;
- 	int ret, len;
+ 	int ret;
  
- 	fixed_node = of_get_child_by_name(np, "fixed-link");
+ 	fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
  	if (fixed_node) {
- 		ret = of_property_read_u32(fixed_node, "speed", &speed);
+ 		ret = fwnode_property_read_u32(fixed_node, "speed", &speed);
  
  		pl->link_config.speed = speed;
  		pl->link_config.duplex = DUPLEX_HALF;
  
- 		if (of_property_read_bool(fixed_node, "full-duplex"))
+ 		if (fwnode_property_read_bool(fixed_node, "full-duplex"))
  			pl->link_config.duplex = DUPLEX_FULL;
  
  		/* We treat the "pause" and "asym-pause" terminology as
  		 * defining the link partner's ability. */
- 		if (of_property_read_bool(fixed_node, "pause"))
+ 		if (fwnode_property_read_bool(fixed_node, "pause"))
  			pl->link_config.pause |= MLO_PAUSE_SYM;
- 		if (of_property_read_bool(fixed_node, "asym-pause"))
+ 		if (fwnode_property_read_bool(fixed_node, "asym-pause"))
  			pl->link_config.pause |= MLO_PAUSE_ASYM;
  
  		if (ret == 0) {
- 			desc = fwnode_get_named_gpiod(&fixed_node->fwnode,
- 						      "link-gpios", 0,
- 						      GPIOD_IN, "?");
+ 			desc = fwnode_get_named_gpiod(fixed_node, "link-gpios",
+ 						      0, GPIOD_IN, "?");
  
  			if (!IS_ERR(desc))
  				pl->link_gpio = desc;
  			else if (desc == ERR_PTR(-EPROBE_DEFER))
  				ret = -EPROBE_DEFER;
  		}
- 		of_node_put(fixed_node);
+ 		fwnode_handle_put(fixed_node);
  
  		if (ret)
  			return ret;
  	} else {
- 		fixed_prop = of_get_property(np, "fixed-link", &len);
- 		if (!fixed_prop) {
+ 		u32 prop[5];
+ 
+ 		ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+ 						     NULL, 0);
+ 		if (ret != ARRAY_SIZE(prop)) {
  			netdev_err(pl->netdev, "broken fixed-link?\n");
  			return -EINVAL;
  		}
- 		if (len == 5 * sizeof(*fixed_prop)) {
- 			pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ?
+ 
+ 		ret = fwnode_property_read_u32_array(fwnode, "fixed-link",
+ 						     prop, ARRAY_SIZE(prop));
+ 		if (!ret) {
+ 			pl->link_config.duplex = prop[1] ?
  						DUPLEX_FULL : DUPLEX_HALF;
- 			pl->link_config.speed = be32_to_cpu(fixed_prop[2]);
- 			if (be32_to_cpu(fixed_prop[3]))
+ 			pl->link_config.speed = prop[2];
+ 			if (prop[3])
  				pl->link_config.pause |= MLO_PAUSE_SYM;
- 			if (be32_to_cpu(fixed_prop[4]))
+ 			if (prop[4])
  				pl->link_config.pause |= MLO_PAUSE_ASYM;
  		}
  	}
@@@ -220,17 -237,17 +237,17 @@@
  	return 0;
  }
  
- static int phylink_parse_mode(struct phylink *pl, struct device_node *np)
+ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
  {
- 	struct device_node *dn;
+ 	struct fwnode_handle *dn;
  	const char *managed;
  
- 	dn = of_get_child_by_name(np, "fixed-link");
- 	if (dn || of_find_property(np, "fixed-link", NULL))
+ 	dn = fwnode_get_named_child_node(fwnode, "fixed-link");
+ 	if (dn || fwnode_property_present(fwnode, "fixed-link"))
  		pl->link_an_mode = MLO_AN_FIXED;
- 	of_node_put(dn);
+ 	fwnode_handle_put(dn);
  
- 	if (of_property_read_string(np, "managed", &managed) == 0 &&
+ 	if (fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
  	    strcmp(managed, "in-band-status") == 0) {
  		if (pl->link_an_mode == MLO_AN_FIXED) {
  			netdev_err(pl->netdev,
@@@ -244,6 -261,7 +261,7 @@@
  		phylink_set(pl->supported, Asym_Pause);
  		phylink_set(pl->supported, Pause);
  		pl->link_config.an_enabled = true;
+ 		pl->link_an_mode = MLO_AN_INBAND;
  
  		switch (pl->link_config.interface) {
  		case PHY_INTERFACE_MODE_SGMII:
@@@ -253,17 -271,14 +271,14 @@@
  			phylink_set(pl->supported, 100baseT_Full);
  			phylink_set(pl->supported, 1000baseT_Half);
  			phylink_set(pl->supported, 1000baseT_Full);
- 			pl->link_an_mode = MLO_AN_SGMII;
  			break;
  
  		case PHY_INTERFACE_MODE_1000BASEX:
  			phylink_set(pl->supported, 1000baseX_Full);
- 			pl->link_an_mode = MLO_AN_8023Z;
  			break;
  
  		case PHY_INTERFACE_MODE_2500BASEX:
  			phylink_set(pl->supported, 2500baseX_Full);
- 			pl->link_an_mode = MLO_AN_8023Z;
  			break;
  
  		case PHY_INTERFACE_MODE_10GKR:
@@@ -280,7 -295,6 +295,6 @@@
  			phylink_set(pl->supported, 10000baseLR_Full);
  			phylink_set(pl->supported, 10000baseLRM_Full);
  			phylink_set(pl->supported, 10000baseER_Full);
- 			pl->link_an_mode = MLO_AN_SGMII;
  			break;
  
  		default:
@@@ -320,8 -334,7 +334,7 @@@ static void phylink_mac_config(struct p
  static void phylink_mac_an_restart(struct phylink *pl)
  {
  	if (pl->link_config.an_enabled &&
- 	    (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX ||
- 	     pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX))
+ 	    phy_interface_mode_is_8023z(pl->link_config.interface))
  		pl->ops->mac_an_restart(pl->netdev);
  }
  
@@@ -339,12 -352,14 +352,14 @@@ static int phylink_get_mac_state(struc
  }
  
  /* The fixed state is... fixed except for the link state,
-  * which may be determined by a GPIO.
+  * which may be determined by a GPIO or a callback.
   */
  static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state)
  {
  	*state = pl->link_config;
- 	if (pl->link_gpio)
+ 	if (pl->get_fixed_state)
+ 		pl->get_fixed_state(pl->netdev, state);
+ 	else if (pl->link_gpio)
  		state->link = !!gpiod_get_value(pl->link_gpio);
  }
  
@@@ -423,7 -438,7 +438,7 @@@ static void phylink_resolve(struct work
  			phylink_mac_config(pl, &link_state);
  			break;
  
- 		case MLO_AN_SGMII:
+ 		case MLO_AN_INBAND:
  			phylink_get_mac_state(pl, &link_state);
  			if (pl->phydev) {
  				bool changed = false;
@@@ -449,10 -464,6 +464,6 @@@
  				}
  			}
  			break;
- 
- 		case MLO_AN_8023Z:
- 			phylink_get_mac_state(pl, &link_state);
- 			break;
  		}
  	}
  
@@@ -489,15 -500,27 +500,27 @@@ static void phylink_run_resolve(struct 
  
  static const struct sfp_upstream_ops sfp_phylink_ops;
  
- static int phylink_register_sfp(struct phylink *pl, struct device_node *np)
+ static int phylink_register_sfp(struct phylink *pl,
+ 				struct fwnode_handle *fwnode)
  {
- 	struct device_node *sfp_np;
+ 	struct fwnode_reference_args ref;
+ 	int ret;
  
- 	sfp_np = of_parse_phandle(np, "sfp", 0);
- 	if (!sfp_np)
+ 	if (!fwnode)
  		return 0;
  
- 	pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl,
+ 	ret = fwnode_property_get_reference_args(fwnode, "sfp", NULL,
+ 						 0, 0, &ref);
+ 	if (ret < 0) {
+ 		if (ret == -ENOENT)
+ 			return 0;
+ 
+ 		netdev_err(pl->netdev, "unable to parse \"sfp\" node: %d\n",
+ 			   ret);
+ 		return ret;
+ 	}
+ 
+ 	pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl->netdev, pl,
  					    &sfp_phylink_ops);
  	if (!pl->sfp_bus)
  		return -ENOMEM;
@@@ -505,7 -528,22 +528,22 @@@
  	return 0;
  }
  
- struct phylink *phylink_create(struct net_device *ndev, struct device_node *np,
+ /**
+  * phylink_create() - create a phylink instance
+  * @ndev: a pointer to the &struct net_device
+  * @fwnode: a pointer to a &struct fwnode_handle describing the network
+  *	interface
+  * @iface: the desired link mode defined by &typedef phy_interface_t
+  * @ops: a pointer to a &struct phylink_mac_ops for the MAC.
+  *
+  * Create a new phylink instance, and parse the link parameters found in @np.
+  * This will parse in-band modes, fixed-link or SFP configuration.
+  *
+  * Returns a pointer to a &struct phylink, or an error-pointer value. Users
+  * must use IS_ERR() to check for errors from this function.
+  */
+ struct phylink *phylink_create(struct net_device *ndev,
+ 			       struct fwnode_handle *fwnode,
  			       phy_interface_t iface,
  			       const struct phylink_mac_ops *ops)
  {
@@@ -521,7 -559,10 +559,10 @@@
  	pl->netdev = ndev;
  	pl->phy_state.interface = iface;
  	pl->link_interface = iface;
- 	pl->link_port = PORT_MII;
+ 	if (iface == PHY_INTERFACE_MODE_MOCA)
+ 		pl->link_port = PORT_BNC;
+ 	else
+ 		pl->link_port = PORT_MII;
  	pl->link_config.interface = iface;
  	pl->link_config.pause = MLO_PAUSE_AN;
  	pl->link_config.speed = SPEED_UNKNOWN;
@@@ -534,21 -575,21 +575,21 @@@
  	linkmode_copy(pl->link_config.advertising, pl->supported);
  	phylink_validate(pl, pl->supported, &pl->link_config);
  
- 	ret = phylink_parse_mode(pl, np);
+ 	ret = phylink_parse_mode(pl, fwnode);
  	if (ret < 0) {
  		kfree(pl);
  		return ERR_PTR(ret);
  	}
  
  	if (pl->link_an_mode == MLO_AN_FIXED) {
- 		ret = phylink_parse_fixedlink(pl, np);
+ 		ret = phylink_parse_fixedlink(pl, fwnode);
  		if (ret < 0) {
  			kfree(pl);
  			return ERR_PTR(ret);
  		}
  	}
  
- 	ret = phylink_register_sfp(pl, np);
+ 	ret = phylink_register_sfp(pl, fwnode);
  	if (ret < 0) {
  		kfree(pl);
  		return ERR_PTR(ret);
@@@ -558,6 -599,13 +599,13 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_create);
  
+ /**
+  * phylink_destroy() - cleanup and destroy the phylink instance
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  *
+  * Destroy a phylink instance. Any PHY that has been attached must have been
+  * cleaned up via phylink_disconnect_phy() prior to calling this function.
+  */
  void phylink_destroy(struct phylink *pl)
  {
  	if (pl->sfp_bus)
@@@ -654,10 -702,39 +702,39 @@@ static int phylink_bringup_phy(struct p
  	return 0;
  }
  
+ /**
+  * phylink_connect_phy() - connect a PHY to the phylink instance
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @phy: a pointer to a &struct phy_device.
+  *
+  * Connect @phy to the phylink instance specified by @pl by calling
+  * phy_attach_direct(). Configure the @phy according to the MAC driver's
+  * capabilities, start the PHYLIB state machine and enable any interrupts
+  * that the PHY supports.
+  *
+  * This updates the phylink's ethtool supported and advertising link mode
+  * masks.
+  *
+  * Returns 0 on success or a negative errno.
+  */
  int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
  {
  	int ret;
  
+ 	if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+ 		    (pl->link_an_mode == MLO_AN_INBAND &&
+ 		     phy_interface_mode_is_8023z(pl->link_interface))))
+ 		return -EINVAL;
+ 
+ 	if (pl->phydev)
+ 		return -EBUSY;
+ 
+ 	/* Use PHY device/driver interface */
+ 	if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
+ 		pl->link_interface = phy->interface;
+ 		pl->link_config.interface = pl->link_interface;
+ 	}
+ 
  	ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
  	if (ret)
  		return ret;
@@@ -670,14 -747,29 +747,29 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_connect_phy);
  
- int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn)
+ /**
+  * phylink_of_phy_connect() - connect the PHY specified in the DT mode.
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @dn: a pointer to a &struct device_node.
+  * @flags: PHY-specific flags to communicate to the PHY device driver
+  *
+  * Connect the phy specified in the device node @dn to the phylink instance
+  * specified by @pl. Actions specified in phylink_connect_phy() will be
+  * performed.
+  *
+  * Returns 0 on success or a negative errno.
+  */
+ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
+ 			   u32 flags)
  {
  	struct device_node *phy_node;
  	struct phy_device *phy_dev;
  	int ret;
  
- 	/* Fixed links are handled without needing a PHY */
- 	if (pl->link_an_mode == MLO_AN_FIXED)
+ 	/* Fixed links and 802.3z are handled without needing a PHY */
+ 	if (pl->link_an_mode == MLO_AN_FIXED ||
+ 	    (pl->link_an_mode == MLO_AN_INBAND &&
+ 	     phy_interface_mode_is_8023z(pl->link_interface)))
  		return 0;
  
  	phy_node = of_parse_phandle(dn, "phy-handle", 0);
@@@ -687,14 -779,13 +779,13 @@@
  		phy_node = of_parse_phandle(dn, "phy-device", 0);
  
  	if (!phy_node) {
- 		if (pl->link_an_mode == MLO_AN_PHY) {
- 			netdev_err(pl->netdev, "unable to find PHY node\n");
+ 		if (pl->link_an_mode == MLO_AN_PHY)
  			return -ENODEV;
- 		}
  		return 0;
  	}
  
- 	phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface);
+ 	phy_dev = of_phy_attach(pl->netdev, phy_node, flags,
+ 				pl->link_interface);
  	/* We're done with the phy_node handle */
  	of_node_put(phy_node);
  
@@@ -709,11 -800,18 +800,18 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
  
+ /**
+  * phylink_disconnect_phy() - disconnect any PHY attached to the phylink
+  *   instance.
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  *
+  * Disconnect any current PHY from the phylink instance described by @pl.
+  */
  void phylink_disconnect_phy(struct phylink *pl)
  {
  	struct phy_device *phy;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	phy = pl->phydev;
  	if (phy) {
@@@ -730,6 -828,40 +828,40 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_disconnect_phy);
  
+ /**
+  * phylink_fixed_state_cb() - allow setting a fixed link callback
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @cb: callback to execute to determine the fixed link state.
+  *
+  * The MAC driver should call this driver when the state of its link
+  * can be determined through e.g: an out of band MMIO register.
+  */
+ int phylink_fixed_state_cb(struct phylink *pl,
+ 			   void (*cb)(struct net_device *dev,
+ 				      struct phylink_link_state *state))
+ {
+ 	/* It does not make sense to let the link be overriden unless we use
+ 	 * MLO_AN_FIXED
+ 	 */
+ 	if (pl->link_an_mode != MLO_AN_FIXED)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&pl->state_mutex);
+ 	pl->get_fixed_state = cb;
+ 	mutex_unlock(&pl->state_mutex);
+ 
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(phylink_fixed_state_cb);
+ 
+ /**
+  * phylink_mac_change() - notify phylink of a change in MAC state
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @up: indicates whether the link is currently up.
+  *
+  * The MAC driver should call this driver when the state of its link
+  * changes (eg, link failure, new negotiation results, etc.)
+  */
  void phylink_mac_change(struct phylink *pl, bool up)
  {
  	if (!up)
@@@ -739,9 -871,17 +871,17 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_mac_change);
  
+ /**
+  * phylink_start() - start a phylink instance
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  *
+  * Start the phylink instance specified by @pl, configuring the MAC for the
+  * desired link mode(s) and negotiation style. This should be called from the
+  * network device driver's &struct net_device_ops ndo_open() method.
+  */
  void phylink_start(struct phylink *pl)
  {
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	netdev_info(pl->netdev, "configuring for %s/%s link mode\n",
  		    phylink_an_mode_str(pl->link_an_mode),
@@@ -754,6 -894,12 +894,12 @@@
  	phylink_resolve_flow(pl, &pl->link_config);
  	phylink_mac_config(pl, &pl->link_config);
  
+ 	/* Restart autonegotiation if using 802.3z to ensure that the link
+ 	 * parameters are properly negotiated.  This is necessary for DSA
+ 	 * switches using 802.3z negotiation to ensure they see our modes.
+ 	 */
+ 	phylink_mac_an_restart(pl);
+ 
  	clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
  	phylink_run_resolve(pl);
  
@@@ -764,9 -910,18 +910,18 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_start);
  
+ /**
+  * phylink_stop() - stop a phylink instance
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  *
+  * Stop the phylink instance specified by @pl. This should be called from the
+  * network device driver's &struct net_device_ops ndo_stop() method.  The
+  * network device's carrier state should not be changed prior to calling this
+  * function.
+  */
  void phylink_stop(struct phylink *pl)
  {
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (pl->phydev)
  		phy_stop(pl->phydev);
@@@ -779,9 -934,18 +934,18 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_stop);
  
+ /**
+  * phylink_ethtool_get_wol() - get the wake on lan parameters for the PHY
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @wol: a pointer to &struct ethtool_wolinfo to hold the read parameters
+  *
+  * Read the wake on lan parameters from the PHY attached to the phylink
+  * instance specified by @pl. If no PHY is currently attached, report no
+  * support for wake on lan.
+  */
  void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
  {
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	wol->supported = 0;
  	wol->wolopts = 0;
@@@ -791,11 -955,22 +955,22 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol);
  
+ /**
+  * phylink_ethtool_set_wol() - set wake on lan parameters
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @wol: a pointer to &struct ethtool_wolinfo for the desired parameters
+  *
+  * Set the wake on lan parameters for the PHY attached to the phylink
+  * instance specified by @pl. If no PHY is attached, returns %EOPNOTSUPP
+  * error.
+  *
+  * Returns zero on success or negative errno code.
+  */
  int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol)
  {
  	int ret = -EOPNOTSUPP;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (pl->phydev)
  		ret = phy_ethtool_set_wol(pl->phydev, wol);
@@@ -826,12 -1001,21 +1001,21 @@@ static void phylink_get_ksettings(cons
  				AUTONEG_DISABLE;
  }
  
+ /**
+  * phylink_ethtool_ksettings_get() - get the current link settings
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @kset: a pointer to a &struct ethtool_link_ksettings to hold link settings
+  *
+  * Read the current link settings for the phylink instance specified by @pl.
+  * This will be the link settings read from the MAC, PHY or fixed link
+  * settings depending on the current negotiation mode.
+  */
  int phylink_ethtool_ksettings_get(struct phylink *pl,
  				  struct ethtool_link_ksettings *kset)
  {
  	struct phylink_link_state link_state;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (pl->phydev) {
  		phy_ethtool_ksettings_get(pl->phydev, kset);
@@@ -851,14 -1035,13 +1035,13 @@@
  		phylink_get_ksettings(&link_state, kset);
  		break;
  
- 	case MLO_AN_SGMII:
+ 	case MLO_AN_INBAND:
  		/* If there is a phy attached, then use the reported
  		 * settings from the phy with no modification.
  		 */
  		if (pl->phydev)
  			break;
  
- 	case MLO_AN_8023Z:
  		phylink_get_mac_state(pl, &link_state);
  
  		/* The MAC is reporting the link results from its own PCS
@@@ -873,6 -1056,11 +1056,11 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
  
+ /**
+  * phylink_ethtool_ksettings_set() - set the link settings
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @kset: a pointer to a &struct ethtool_link_ksettings for the desired modes
+  */
  int phylink_ethtool_ksettings_set(struct phylink *pl,
  				  const struct ethtool_link_ksettings *kset)
  {
@@@ -880,7 -1068,7 +1068,7 @@@
  	struct phylink_link_state config;
  	int ret;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (kset->base.autoneg != AUTONEG_DISABLE &&
  	    kset->base.autoneg != AUTONEG_ENABLE)
@@@ -967,11 -1155,22 +1155,22 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set);
  
+ /**
+  * phylink_ethtool_nway_reset() - restart negotiation
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  *
+  * Restart negotiation for the phylink instance specified by @pl. This will
+  * cause any attached phy to restart negotiation with the link partner, and
+  * if the MAC is in a BaseX mode, the MAC will also be requested to restart
+  * negotiation.
+  *
+  * Returns zero on success, or negative error code.
+  */
  int phylink_ethtool_nway_reset(struct phylink *pl)
  {
  	int ret = 0;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (pl->phydev)
  		ret = phy_restart_aneg(pl->phydev);
@@@ -981,10 -1180,15 +1180,15 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset);
  
+ /**
+  * phylink_ethtool_get_pauseparam() - get the current pause parameters
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @pause: a pointer to a &struct ethtool_pauseparam
+  */
  void phylink_ethtool_get_pauseparam(struct phylink *pl,
  				    struct ethtool_pauseparam *pause)
  {
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN);
  	pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX);
@@@ -992,12 -1196,17 +1196,17 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam);
  
+ /**
+  * phylink_ethtool_set_pauseparam() - set the current pause parameters
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @pause: a pointer to a &struct ethtool_pauseparam
+  */
  int phylink_ethtool_set_pauseparam(struct phylink *pl,
  				   struct ethtool_pauseparam *pause)
  {
  	struct phylink_link_state *config = &pl->link_config;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (!phylink_test(pl->supported, Pause) &&
  	    !phylink_test(pl->supported, Asym_Pause))
@@@ -1030,8 -1239,7 +1239,7 @@@
  			phylink_mac_config(pl, config);
  			break;
  
- 		case MLO_AN_SGMII:
- 		case MLO_AN_8023Z:
+ 		case MLO_AN_INBAND:
  			phylink_mac_config(pl, config);
  			phylink_mac_an_restart(pl);
  			break;
@@@ -1070,24 -1278,21 +1278,21 @@@ int phylink_ethtool_get_module_eeprom(s
  }
  EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom);
  
- int phylink_init_eee(struct phylink *pl, bool clk_stop_enable)
- {
- 	int ret = -EPROTONOSUPPORT;
- 
- 	WARN_ON(!lockdep_rtnl_is_held());
- 
- 	if (pl->phydev)
- 		ret = phy_init_eee(pl->phydev, clk_stop_enable);
- 
- 	return ret;
- }
- EXPORT_SYMBOL_GPL(phylink_init_eee);
- 
+ /**
+  * phylink_ethtool_get_eee_err() - read the energy efficient ethernet error
+  *   counter
+  * @pl: a pointer to a &struct phylink returned from phylink_create().
+  *
+  * Read the Energy Efficient Ethernet error counter from the PHY associated
+  * with the phylink instance specified by @pl.
+  *
+  * Returns positive error counter value, or negative error code.
+  */
  int phylink_get_eee_err(struct phylink *pl)
  {
  	int ret = 0;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (pl->phydev)
  		ret = phy_get_eee_err(pl->phydev);
@@@ -1096,11 -1301,16 +1301,16 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_get_eee_err);
  
+ /**
+  * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @eee: a pointer to a &struct ethtool_eee for the read parameters
+  */
  int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee)
  {
  	int ret = -EOPNOTSUPP;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (pl->phydev)
  		ret = phy_ethtool_get_eee(pl->phydev, eee);
@@@ -1109,11 -1319,16 +1319,16 @@@
  }
  EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee);
  
+ /**
+  * phylink_ethtool_set_eee() - set the energy efficient ethernet parameters
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @eee: a pointer to a &struct ethtool_eee for the desired parameters
+  */
  int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee)
  {
  	int ret = -EOPNOTSUPP;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (pl->phydev)
  		ret = phy_ethtool_set_eee(pl->phydev, eee);
@@@ -1248,9 -1463,7 +1463,7 @@@ static int phylink_mii_read(struct phyl
  	case MLO_AN_PHY:
  		return -EOPNOTSUPP;
  
- 	case MLO_AN_SGMII:
- 		/* No phy, fall through to 8023z method */
- 	case MLO_AN_8023Z:
+ 	case MLO_AN_INBAND:
  		if (phy_id == 0) {
  			val = phylink_get_mac_state(pl, &state);
  			if (val < 0)
@@@ -1275,24 -1488,40 +1488,40 @@@ static int phylink_mii_write(struct phy
  	case MLO_AN_PHY:
  		return -EOPNOTSUPP;
  
- 	case MLO_AN_SGMII:
- 		/* No phy, fall through to 8023z method */
- 	case MLO_AN_8023Z:
+ 	case MLO_AN_INBAND:
  		break;
  	}
  
  	return 0;
  }
  
+ /**
+  * phylink_mii_ioctl() - generic mii ioctl interface
+  * @pl: a pointer to a &struct phylink returned from phylink_create()
+  * @ifr: a pointer to a &struct ifreq for socket ioctls
+  * @cmd: ioctl cmd to execute
+  *
+  * Perform the specified MII ioctl on the PHY attached to the phylink instance
+  * specified by @pl. If no PHY is attached, emulate the presence of the PHY.
+  *
+  * Returns: zero on success or negative error code.
+  *
+  * %SIOCGMIIPHY:
+  *  read register from the current PHY.
+  * %SIOCGMIIREG:
+  *  read register from the specified PHY.
+  * %SIOCSMIIREG:
+  *  set a register on the specified PHY.
+  */
  int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
  {
  	struct mii_ioctl_data *mii = if_mii(ifr);
  	int  ret;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	if (pl->phydev) {
- 		/* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */
+ 		/* PHYs only exist for MLO_AN_PHY and SGMII */
  		switch (cmd) {
  		case SIOCGMIIPHY:
  			mii->phy_id = pl->phydev->mdio.addr;
@@@ -1349,7 -1578,7 +1578,7 @@@ static int phylink_sfp_module_insert(vo
  	__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
  	struct phylink_link_state config;
  	phy_interface_t iface;
- 	int mode, ret = 0;
+ 	int ret = 0;
  	bool changed;
  	u8 port;
  
@@@ -1357,14 -1586,13 +1586,13 @@@
  	port = sfp_parse_port(pl->sfp_bus, id, support);
  	iface = sfp_parse_interface(pl->sfp_bus, id);
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	switch (iface) {
  	case PHY_INTERFACE_MODE_SGMII:
- 		mode = MLO_AN_SGMII;
- 		break;
  	case PHY_INTERFACE_MODE_1000BASEX:
- 		mode = MLO_AN_8023Z;
+ 	case PHY_INTERFACE_MODE_2500BASEX:
+ 	case PHY_INTERFACE_MODE_10GKR:
  		break;
  	default:
  		return -EINVAL;
@@@ -1382,16 -1610,18 +1610,18 @@@
  	ret = phylink_validate(pl, support, &config);
  	if (ret) {
  		netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
- 			   phylink_an_mode_str(mode), phy_modes(config.interface),
+ 			   phylink_an_mode_str(MLO_AN_INBAND),
+ 			   phy_modes(config.interface),
  			   __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
  		return ret;
  	}
  
  	netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n",
- 		   phylink_an_mode_str(mode), phy_modes(config.interface),
+ 		   phylink_an_mode_str(MLO_AN_INBAND),
+ 		   phy_modes(config.interface),
  		   __ETHTOOL_LINK_MODE_MASK_NBITS, support);
  
- 	if (mode == MLO_AN_8023Z && pl->phydev)
+ 	if (phy_interface_mode_is_8023z(iface) && pl->phydev)
  		return -EINVAL;
  
  	changed = !bitmap_equal(pl->supported, support,
@@@ -1401,15 -1631,15 +1631,15 @@@
  		linkmode_copy(pl->link_config.advertising, config.advertising);
  	}
  
- 	if (pl->link_an_mode != mode ||
+ 	if (pl->link_an_mode != MLO_AN_INBAND ||
  	    pl->link_config.interface != config.interface) {
  		pl->link_config.interface = config.interface;
- 		pl->link_an_mode = mode;
+ 		pl->link_an_mode = MLO_AN_INBAND;
  
  		changed = true;
  
  		netdev_info(pl->netdev, "switched to %s/%s link mode\n",
- 			    phylink_an_mode_str(mode),
+ 			    phylink_an_mode_str(MLO_AN_INBAND),
  			    phy_modes(config.interface));
  	}
  
@@@ -1426,18 -1656,19 +1656,18 @@@ static void phylink_sfp_link_down(void 
  {
  	struct phylink *pl = upstream;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
 +	queue_work(system_power_efficient_wq, &pl->resolve);
  	flush_work(&pl->resolve);
 -
 -	netif_carrier_off(pl->netdev);
  }
  
  static void phylink_sfp_link_up(void *upstream)
  {
  	struct phylink *pl = upstream;
  
- 	WARN_ON(!lockdep_rtnl_is_held());
+ 	ASSERT_RTNL();
  
  	clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
  	phylink_run_resolve(pl);
diff --combined drivers/net/phy/sfp-bus.c
index ab64a142b832,3ecc378e0716..bdc4bb3c8288
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@@ -8,10 -8,14 +8,14 @@@
  
  #include "sfp.h"
  
+ /**
+  * struct sfp_bus - internal representation of a sfp bus
+  */
  struct sfp_bus {
+ 	/* private: */
  	struct kref kref;
  	struct list_head node;
- 	struct device_node *device_node;
+ 	struct fwnode_handle *fwnode;
  
  	const struct sfp_socket_ops *socket_ops;
  	struct device *sfp_dev;
@@@ -26,6 -30,20 +30,20 @@@
  	bool started;
  };
  
+ /**
+  * sfp_parse_port() - Parse the EEPROM base ID, setting the port type
+  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+  * @id: a pointer to the module's &struct sfp_eeprom_id
+  * @support: optional pointer to an array of unsigned long for the
+  *   ethtool support mask
+  *
+  * Parse the EEPROM identification given in @id, and return one of
+  * %PORT_TP, %PORT_FIBRE or %PORT_OTHER. If @support is non-%NULL,
+  * also set the ethtool %ETHTOOL_LINK_MODE_xxx_BIT corresponding with
+  * the connector type.
+  *
+  * If the port type is not known, returns %PORT_OTHER.
+  */
  int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
  		   unsigned long *support)
  {
@@@ -39,21 -57,19 +57,19 @@@
  	case SFP_CONNECTOR_MT_RJ:
  	case SFP_CONNECTOR_MU:
  	case SFP_CONNECTOR_OPTICAL_PIGTAIL:
- 		if (support)
- 			phylink_set(support, FIBRE);
  		port = PORT_FIBRE;
  		break;
  
  	case SFP_CONNECTOR_RJ45:
- 		if (support)
- 			phylink_set(support, TP);
  		port = PORT_TP;
  		break;
  
+ 	case SFP_CONNECTOR_COPPER_PIGTAIL:
+ 		port = PORT_DA;
+ 		break;
+ 
  	case SFP_CONNECTOR_UNSPEC:
  		if (id->base.e1000_base_t) {
- 			if (support)
- 				phylink_set(support, TP);
  			port = PORT_TP;
  			break;
  		}
@@@ -62,7 -78,6 +78,6 @@@
  	case SFP_CONNECTOR_MPO_1X12:
  	case SFP_CONNECTOR_MPO_2X16:
  	case SFP_CONNECTOR_HSSDC_II:
- 	case SFP_CONNECTOR_COPPER_PIGTAIL:
  	case SFP_CONNECTOR_NOSEPARATE:
  	case SFP_CONNECTOR_MXC_2X16:
  		port = PORT_OTHER;
@@@ -74,10 -89,40 +89,40 @@@
  		break;
  	}
  
+ 	if (support) {
+ 		switch (port) {
+ 		case PORT_FIBRE:
+ 			phylink_set(support, FIBRE);
+ 			break;
+ 
+ 		case PORT_TP:
+ 			phylink_set(support, TP);
+ 			break;
+ 		}
+ 	}
+ 
  	return port;
  }
  EXPORT_SYMBOL_GPL(sfp_parse_port);
  
+ /**
+  * sfp_parse_interface() - Parse the phy_interface_t
+  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+  * @id: a pointer to the module's &struct sfp_eeprom_id
+  *
+  * Derive the phy_interface_t mode for the information found in the
+  * module's identifying EEPROM. There is no standard or defined way
+  * to derive this information, so we use some heuristics.
+  *
+  * If the encoding is 64b66b, then the module must be >= 10G, so
+  * return %PHY_INTERFACE_MODE_10GKR.
+  *
+  * If it's 8b10b, then it's 1G or slower. If it's definitely a fibre
+  * module, return %PHY_INTERFACE_MODE_1000BASEX mode, otherwise return
+  * %PHY_INTERFACE_MODE_SGMII mode.
+  *
+  * If the encoding is not known, return %PHY_INTERFACE_MODE_NA.
+  */
  phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
  				    const struct sfp_eeprom_id *id)
  {
@@@ -107,6 -152,11 +152,11 @@@
  		break;
  
  	default:
+ 		if (id->base.e1000_base_cx) {
+ 			iface = PHY_INTERFACE_MODE_1000BASEX;
+ 			break;
+ 		}
+ 
  		iface = PHY_INTERFACE_MODE_NA;
  		dev_err(bus->sfp_dev,
  			"SFP module encoding does not support 8b10b nor 64b66b\n");
@@@ -117,13 -167,38 +167,38 @@@
  }
  EXPORT_SYMBOL_GPL(sfp_parse_interface);
  
+ /**
+  * sfp_parse_support() - Parse the eeprom id for supported link modes
+  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+  * @id: a pointer to the module's &struct sfp_eeprom_id
+  * @support: pointer to an array of unsigned long for the ethtool support mask
+  *
+  * Parse the EEPROM identification information and derive the supported
+  * ethtool link modes for the module.
+  */
  void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
  		       unsigned long *support)
  {
+ 	unsigned int br_min, br_nom, br_max;
+ 
  	phylink_set(support, Autoneg);
  	phylink_set(support, Pause);
  	phylink_set(support, Asym_Pause);
  
+ 	/* Decode the bitrate information to MBd */
+ 	br_min = br_nom = br_max = 0;
+ 	if (id->base.br_nominal) {
+ 		if (id->base.br_nominal != 255) {
+ 			br_nom = id->base.br_nominal * 100;
+ 			br_min = br_nom + id->base.br_nominal * id->ext.br_min;
+ 			br_max = br_nom + id->base.br_nominal * id->ext.br_max;
+ 		} else if (id->ext.br_max) {
+ 			br_nom = 250 * id->ext.br_max;
+ 			br_max = br_nom + br_nom * id->ext.br_min / 100;
+ 			br_min = br_nom - br_nom * id->ext.br_min / 100;
+ 		}
+ 	}
+ 
  	/* Set ethtool support from the compliance fields. */
  	if (id->base.e10g_base_sr)
  		phylink_set(support, 10000baseSR_Full);
@@@ -142,6 -217,34 +217,34 @@@
  		phylink_set(support, 1000baseT_Full);
  	}
  
+ 	/* 1000Base-PX or 1000Base-BX10 */
+ 	if ((id->base.e_base_px || id->base.e_base_bx10) &&
+ 	    br_min <= 1300 && br_max >= 1200)
+ 		phylink_set(support, 1000baseX_Full);
+ 
+ 	/* For active or passive cables, select the link modes
+ 	 * based on the bit rates and the cable compliance bytes.
+ 	 */
+ 	if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
+ 		/* This may look odd, but some manufacturers use 12000MBd */
+ 		if (br_min <= 12000 && br_max >= 10300)
+ 			phylink_set(support, 10000baseCR_Full);
+ 		if (br_min <= 3200 && br_max >= 3100)
+ 			phylink_set(support, 2500baseX_Full);
+ 		if (br_min <= 1300 && br_max >= 1200)
+ 			phylink_set(support, 1000baseX_Full);
+ 	}
+ 	if (id->base.sfp_ct_passive) {
+ 		if (id->base.passive.sff8431_app_e)
+ 			phylink_set(support, 10000baseCR_Full);
+ 	}
+ 	if (id->base.sfp_ct_active) {
+ 		if (id->base.active.sff8431_app_e ||
+ 		    id->base.active.sff8431_lim) {
+ 			phylink_set(support, 10000baseCR_Full);
+ 		}
+ 	}
+ 
  	switch (id->base.extended_cc) {
  	case 0x00: /* Unspecified */
  		break;
@@@ -175,35 -278,6 +278,6 @@@
  		if (id->base.br_nominal >= 12)
  			phylink_set(support, 1000baseX_Full);
  	}
- 
- 	switch (id->base.connector) {
- 	case SFP_CONNECTOR_SC:
- 	case SFP_CONNECTOR_FIBERJACK:
- 	case SFP_CONNECTOR_LC:
- 	case SFP_CONNECTOR_MT_RJ:
- 	case SFP_CONNECTOR_MU:
- 	case SFP_CONNECTOR_OPTICAL_PIGTAIL:
- 		break;
- 
- 	case SFP_CONNECTOR_UNSPEC:
- 		if (id->base.e1000_base_t)
- 			break;
- 
- 	case SFP_CONNECTOR_SG: /* guess */
- 	case SFP_CONNECTOR_MPO_1X12:
- 	case SFP_CONNECTOR_MPO_2X16:
- 	case SFP_CONNECTOR_HSSDC_II:
- 	case SFP_CONNECTOR_COPPER_PIGTAIL:
- 	case SFP_CONNECTOR_NOSEPARATE:
- 	case SFP_CONNECTOR_MXC_2X16:
- 	default:
- 		/* a guess at the supported link modes */
- 		dev_warn(bus->sfp_dev,
- 			 "Guessing link modes, please report...\n");
- 		phylink_set(support, 1000baseT_Half);
- 		phylink_set(support, 1000baseT_Full);
- 		break;
- 	}
  }
  EXPORT_SYMBOL_GPL(sfp_parse_support);
  
@@@ -215,7 -289,7 +289,7 @@@ static const struct sfp_upstream_ops *s
  	return bus->registered ? bus->upstream_ops : NULL;
  }
  
- static struct sfp_bus *sfp_bus_get(struct device_node *np)
+ static struct sfp_bus *sfp_bus_get(struct fwnode_handle *fwnode)
  {
  	struct sfp_bus *sfp, *new, *found = NULL;
  
@@@ -224,7 -298,7 +298,7 @@@
  	mutex_lock(&sfp_mutex);
  
  	list_for_each_entry(sfp, &sfp_buses, node) {
- 		if (sfp->device_node == np) {
+ 		if (sfp->fwnode == fwnode) {
  			kref_get(&sfp->kref);
  			found = sfp;
  			break;
@@@ -233,7 -307,7 +307,7 @@@
  
  	if (!found && new) {
  		kref_init(&new->kref);
- 		new->device_node = np;
+ 		new->fwnode = fwnode;
  		list_add(&new->node, &sfp_buses);
  		found = new;
  		new = NULL;
@@@ -246,7 -320,7 +320,7 @@@
  	return found;
  }
  
- static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex)
+ static void sfp_bus_release(struct kref *kref)
  {
  	struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref);
  
@@@ -293,6 -367,16 +367,16 @@@ static void sfp_unregister_bus(struct s
  	bus->registered = false;
  }
  
+ /**
+  * sfp_get_module_info() - Get the ethtool_modinfo for a SFP module
+  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+  * @modinfo: a &struct ethtool_modinfo
+  *
+  * Fill in the type and eeprom_len parameters in @modinfo for a module on
+  * the sfp bus specified by @bus.
+  *
+  * Returns 0 on success or a negative errno number.
+  */
  int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo)
  {
  	if (!bus->registered)
@@@ -301,6 -385,17 +385,17 @@@
  }
  EXPORT_SYMBOL_GPL(sfp_get_module_info);
  
+ /**
+  * sfp_get_module_eeprom() - Read the SFP module EEPROM
+  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+  * @ee: a &struct ethtool_eeprom
+  * @data: buffer to contain the EEPROM data (must be at least @ee->len bytes)
+  *
+  * Read the EEPROM as specified by the supplied @ee. See the documentation
+  * for &struct ethtool_eeprom for the region to be read.
+  *
+  * Returns 0 on success or a negative errno number.
+  */
  int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
  			  u8 *data)
  {
@@@ -310,6 -405,15 +405,15 @@@
  }
  EXPORT_SYMBOL_GPL(sfp_get_module_eeprom);
  
+ /**
+  * sfp_upstream_start() - Inform the SFP that the network device is up
+  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+  *
+  * Inform the SFP socket that the network device is now up, so that the
+  * module can be enabled by allowing TX_DISABLE to be deasserted. This
+  * should be called from the network device driver's &struct net_device_ops
+  * ndo_open() method.
+  */
  void sfp_upstream_start(struct sfp_bus *bus)
  {
  	if (bus->registered)
@@@ -318,6 -422,15 +422,15 @@@
  }
  EXPORT_SYMBOL_GPL(sfp_upstream_start);
  
+ /**
+  * sfp_upstream_stop() - Inform the SFP that the network device is down
+  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+  *
+  * Inform the SFP socket that the network device is now up, so that the
+  * module can be disabled by asserting TX_DISABLE, disabling the laser
+  * in optical modules. This should be called from the network device
+  * driver's &struct net_device_ops ndo_stop() method.
+  */
  void sfp_upstream_stop(struct sfp_bus *bus)
  {
  	if (bus->registered)
@@@ -326,11 -439,24 +439,24 @@@
  }
  EXPORT_SYMBOL_GPL(sfp_upstream_stop);
  
- struct sfp_bus *sfp_register_upstream(struct device_node *np,
+ /**
+  * sfp_register_upstream() - Register the neighbouring device
+  * @np: device node for the SFP bus
+  * @ndev: network device associated with the interface
+  * @upstream: the upstream private data
+  * @ops: the upstream's &struct sfp_upstream_ops
+  *
+  * Register the upstream device (eg, PHY) with the SFP bus. MAC drivers
+  * should use phylink, which will call this function for them. Returns
+  * a pointer to the allocated &struct sfp_bus.
+  *
+  * On error, returns %NULL.
+  */
+ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
  				      struct net_device *ndev, void *upstream,
  				      const struct sfp_upstream_ops *ops)
  {
- 	struct sfp_bus *bus = sfp_bus_get(np);
+ 	struct sfp_bus *bus = sfp_bus_get(fwnode);
  	int ret = 0;
  
  	if (bus) {
@@@ -353,11 -479,17 +479,18 @@@
  }
  EXPORT_SYMBOL_GPL(sfp_register_upstream);
  
+ /**
+  * sfp_unregister_upstream() - Unregister sfp bus
+  * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+  *
+  * Unregister a previously registered upstream connection for the SFP
+  * module. @bus is returned from sfp_register_upstream().
+  */
  void sfp_unregister_upstream(struct sfp_bus *bus)
  {
  	rtnl_lock();
 -	sfp_unregister_bus(bus);
 +	if (bus->sfp)
 +		sfp_unregister_bus(bus);
  	bus->upstream = NULL;
  	bus->netdev = NULL;
  	rtnl_unlock();
@@@ -434,7 -566,7 +567,7 @@@ EXPORT_SYMBOL_GPL(sfp_module_remove)
  struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
  				    const struct sfp_socket_ops *ops)
  {
- 	struct sfp_bus *bus = sfp_bus_get(dev->of_node);
+ 	struct sfp_bus *bus = sfp_bus_get(dev->fwnode);
  	int ret = 0;
  
  	if (bus) {
@@@ -460,8 -592,7 +593,8 @@@ EXPORT_SYMBOL_GPL(sfp_register_socket)
  void sfp_unregister_socket(struct sfp_bus *bus)
  {
  	rtnl_lock();
 -	sfp_unregister_bus(bus);
 +	if (bus->netdev)
 +		sfp_unregister_bus(bus);
  	bus->sfp_dev = NULL;
  	bus->sfp = NULL;
  	bus->socket_ops = NULL;
diff --combined drivers/net/tun.c
index 2ffe5dba7e09,e367d6310353..164fef1d1cf3
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@@ -195,6 -195,11 +195,11 @@@ struct tun_flow_entry 
  
  #define TUN_NUM_FLOW_ENTRIES 1024
  
+ struct tun_steering_prog {
+ 	struct rcu_head rcu;
+ 	struct bpf_prog *prog;
+ };
+ 
  /* Since the socket were moved to tun_file, to preserve the behavior of persist
   * device, socket filter, sndbuf and vnet header size were restore when the
   * file were attached to a persist device.
@@@ -232,6 -237,7 +237,7 @@@ struct tun_struct 
  	u32 rx_batched;
  	struct tun_pcpu_stats __percpu *pcpu_stats;
  	struct bpf_prog __rcu *xdp_prog;
+ 	struct tun_steering_prog __rcu *steering_prog;
  };
  
  static int tun_napi_receive(struct napi_struct *napi, int budget)
@@@ -537,15 -543,12 +543,12 @@@ static inline void tun_flow_save_rps_rx
   * different rxq no. here. If we could not get rxhash, then we would
   * hope the rxq no. may help here.
   */
- static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- 			    void *accel_priv, select_queue_fallback_t fallback)
+ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
  {
- 	struct tun_struct *tun = netdev_priv(dev);
  	struct tun_flow_entry *e;
  	u32 txq = 0;
  	u32 numqueues = 0;
  
- 	rcu_read_lock();
  	numqueues = READ_ONCE(tun->numqueues);
  
  	txq = __skb_get_hash_symmetric(skb);
@@@ -563,10 -566,37 +566,37 @@@
  			txq -= numqueues;
  	}
  
- 	rcu_read_unlock();
  	return txq;
  }
  
+ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
+ {
+ 	struct tun_steering_prog *prog;
+ 	u16 ret = 0;
+ 
+ 	prog = rcu_dereference(tun->steering_prog);
+ 	if (prog)
+ 		ret = bpf_prog_run_clear_cb(prog->prog, skb);
+ 
+ 	return ret % tun->numqueues;
+ }
+ 
+ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
+ 			    void *accel_priv, select_queue_fallback_t fallback)
+ {
+ 	struct tun_struct *tun = netdev_priv(dev);
+ 	u16 ret;
+ 
+ 	rcu_read_lock();
+ 	if (rcu_dereference(tun->steering_prog))
+ 		ret = tun_ebpf_select_queue(tun, skb);
+ 	else
+ 		ret = tun_automq_select_queue(tun, skb);
+ 	rcu_read_unlock();
+ 
+ 	return ret;
+ }
+ 
  static inline bool tun_not_capable(struct tun_struct *tun)
  {
  	const struct cred *cred = current_cred();
@@@ -673,7 -703,6 +703,6 @@@ static void tun_detach(struct tun_file 
  static void tun_detach_all(struct net_device *dev)
  {
  	struct tun_struct *tun = netdev_priv(dev);
- 	struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog);
  	struct tun_file *tfile, *tmp;
  	int i, n = tun->numqueues;
  
@@@ -708,9 -737,6 +737,6 @@@
  	}
  	BUG_ON(tun->numdisabled != 0);
  
- 	if (xdp_prog)
- 		bpf_prog_put(xdp_prog);
- 
  	if (tun->flags & IFF_PERSIST)
  		module_put(THIS_MODULE);
  }
@@@ -937,23 -963,10 +963,10 @@@ static int tun_net_close(struct net_dev
  }
  
  /* Net device start xmit */
- static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
  {
- 	struct tun_struct *tun = netdev_priv(dev);
- 	int txq = skb->queue_mapping;
- 	struct tun_file *tfile;
- 	u32 numqueues = 0;
- 
- 	rcu_read_lock();
- 	tfile = rcu_dereference(tun->tfiles[txq]);
- 	numqueues = READ_ONCE(tun->numqueues);
- 
- 	/* Drop packet if interface is not attached */
- 	if (txq >= numqueues)
- 		goto drop;
- 
  #ifdef CONFIG_RPS
- 	if (numqueues == 1 && static_key_false(&rps_needed)) {
+ 	if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
  		/* Select queue was not called for the skbuff, so we extract the
  		 * RPS hash and save it into the flow_table here.
  		 */
@@@ -969,6 -982,24 +982,24 @@@
  		}
  	}
  #endif
+ }
+ 
+ /* Net device start xmit */
+ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ 	struct tun_struct *tun = netdev_priv(dev);
+ 	int txq = skb->queue_mapping;
+ 	struct tun_file *tfile;
+ 
+ 	rcu_read_lock();
+ 	tfile = rcu_dereference(tun->tfiles[txq]);
+ 
+ 	/* Drop packet if interface is not attached */
+ 	if (txq >= tun->numqueues)
+ 		goto drop;
+ 
+ 	if (!rcu_dereference(tun->steering_prog))
+ 		tun_automq_xmit(tun, skb);
  
  	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
  
@@@ -1248,12 -1279,12 +1279,12 @@@ static void tun_net_init(struct net_dev
  /* Character device part */
  
  /* Poll */
 -static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
 +static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
  {
  	struct tun_file *tfile = file->private_data;
  	struct tun_struct *tun = tun_get(tfile);
  	struct sock *sk;
 -	unsigned int mask = 0;
 +	__poll_t mask = 0;
  
  	if (!tun)
  		return POLLERR;
@@@ -1551,7 -1582,7 +1582,7 @@@ static ssize_t tun_get_user(struct tun_
  	int copylen;
  	bool zerocopy = false;
  	int err;
- 	u32 rxhash;
+ 	u32 rxhash = 0;
  	int skb_xdp = 1;
  	bool frags = tun_napi_frags_enabled(tun);
  
@@@ -1739,7 -1770,10 +1770,10 @@@
  		rcu_read_unlock();
  	}
  
- 	rxhash = __skb_get_hash_symmetric(skb);
+ 	rcu_read_lock();
+ 	if (!rcu_dereference(tun->steering_prog))
+ 		rxhash = __skb_get_hash_symmetric(skb);
+ 	rcu_read_unlock();
  
  	if (frags) {
  		/* Exercise flow dissector code path. */
@@@ -1783,7 -1817,9 +1817,9 @@@
  	u64_stats_update_end(&stats->syncp);
  	put_cpu_ptr(stats);
  
- 	tun_flow_update(tun, rxhash, tfile);
+ 	if (rxhash)
+ 		tun_flow_update(tun, rxhash, tfile);
+ 
  	return total_len;
  }
  
@@@ -1991,6 -2027,39 +2027,39 @@@ static ssize_t tun_chr_read_iter(struc
  	return ret;
  }
  
+ static void tun_steering_prog_free(struct rcu_head *rcu)
+ {
+ 	struct tun_steering_prog *prog = container_of(rcu,
+ 					 struct tun_steering_prog, rcu);
+ 
+ 	bpf_prog_destroy(prog->prog);
+ 	kfree(prog);
+ }
+ 
+ static int __tun_set_steering_ebpf(struct tun_struct *tun,
+ 				   struct bpf_prog *prog)
+ {
+ 	struct tun_steering_prog *old, *new = NULL;
+ 
+ 	if (prog) {
+ 		new = kmalloc(sizeof(*new), GFP_KERNEL);
+ 		if (!new)
+ 			return -ENOMEM;
+ 		new->prog = prog;
+ 	}
+ 
+ 	spin_lock_bh(&tun->lock);
+ 	old = rcu_dereference_protected(tun->steering_prog,
+ 					lockdep_is_held(&tun->lock));
+ 	rcu_assign_pointer(tun->steering_prog, new);
+ 	spin_unlock_bh(&tun->lock);
+ 
+ 	if (old)
+ 		call_rcu(&old->rcu, tun_steering_prog_free);
+ 
+ 	return 0;
+ }
+ 
  static void tun_free_netdev(struct net_device *dev)
  {
  	struct tun_struct *tun = netdev_priv(dev);
@@@ -1999,6 -2068,7 +2068,7 @@@
  	free_percpu(tun->pcpu_stats);
  	tun_flow_uninit(tun);
  	security_tun_dev_free_security(tun->security);
+ 	__tun_set_steering_ebpf(tun, NULL);
  }
  
  static void tun_setup(struct net_device *dev)
@@@ -2287,6 -2357,7 +2357,7 @@@ static int tun_set_iff(struct net *net
  		tun->filter_attached = false;
  		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
  		tun->rx_batched = 0;
+ 		RCU_INIT_POINTER(tun->steering_prog, NULL);
  
  		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
  		if (!tun->pcpu_stats) {
@@@ -2479,6 -2550,25 +2550,25 @@@ unlock
  	return ret;
  }
  
+ static int tun_set_steering_ebpf(struct tun_struct *tun, void __user *data)
+ {
+ 	struct bpf_prog *prog;
+ 	int fd;
+ 
+ 	if (copy_from_user(&fd, data, sizeof(fd)))
+ 		return -EFAULT;
+ 
+ 	if (fd == -1) {
+ 		prog = NULL;
+ 	} else {
+ 		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
+ 		if (IS_ERR(prog))
+ 			return PTR_ERR(prog);
+ 	}
+ 
+ 	return __tun_set_steering_ebpf(tun, prog);
+ }
+ 
  static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
  			    unsigned long arg, int ifreq_len)
  {
@@@ -2755,6 -2845,10 +2845,10 @@@
  		ret = 0;
  		break;
  
+ 	case TUNSETSTEERINGEBPF:
+ 		ret = tun_set_steering_ebpf(tun, argp);
+ 		break;
+ 
  	default:
  		ret = -EINVAL;
  		break;
diff --combined drivers/net/usb/qmi_wwan.c
index 728819feab44,cfaa07f230e5..ae0580b577b8
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@@ -826,7 -826,7 +826,7 @@@ err
  
  static const struct driver_info	qmi_wwan_info = {
  	.description	= "WWAN/QMI device",
- 	.flags		= FLAG_WWAN,
+ 	.flags		= FLAG_WWAN | FLAG_SEND_ZLP,
  	.bind		= qmi_wwan_bind,
  	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
@@@ -835,7 -835,7 +835,7 @@@
  
  static const struct driver_info	qmi_wwan_info_quirk_dtr = {
  	.description	= "WWAN/QMI device",
- 	.flags		= FLAG_WWAN,
+ 	.flags		= FLAG_WWAN | FLAG_SEND_ZLP,
  	.bind		= qmi_wwan_bind,
  	.unbind		= qmi_wwan_unbind,
  	.manage_power	= qmi_wwan_manage_power,
@@@ -1100,7 -1100,6 +1100,7 @@@ static const struct usb_device_id produ
  	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
  	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
  	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
 +	{QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)},	/* YUGA CLM920-NC5 */
  	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
  	{QMI_FIXED_INTF(0x12d1, 0x140c, 1)},	/* Huawei E173 */
  	{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},	/* Huawei E1820 */
diff --combined drivers/net/wireless/ath/wcn36xx/main.c
index 987f1252a3cf,5bed323f1100..ab5be6d2c691
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@@ -384,18 -384,6 +384,18 @@@ static int wcn36xx_config(struct ieee80
  		}
  	}
  
 +	if (changed & IEEE80211_CONF_CHANGE_PS) {
 +		list_for_each_entry(tmp, &wcn->vif_list, list) {
 +			vif = wcn36xx_priv_to_vif(tmp);
 +			if (hw->conf.flags & IEEE80211_CONF_PS) {
 +				if (vif->bss_conf.ps) /* ps allowed ? */
 +					wcn36xx_pmc_enter_bmps_state(wcn, vif);
 +			} else {
 +				wcn36xx_pmc_exit_bmps_state(wcn, vif);
 +			}
 +		}
 +	}
 +
  	mutex_unlock(&wcn->conf_mutex);
  
  	return 0;
@@@ -641,7 -629,6 +641,6 @@@ static int wcn36xx_hw_scan(struct ieee8
  			   struct ieee80211_scan_request *hw_req)
  {
  	struct wcn36xx *wcn = hw->priv;
- 
  	mutex_lock(&wcn->scan_lock);
  	if (wcn->scan_req) {
  		mutex_unlock(&wcn->scan_lock);
@@@ -650,11 -637,16 +649,16 @@@
  
  	wcn->scan_aborted = false;
  	wcn->scan_req = &hw_req->req;
+ 
  	mutex_unlock(&wcn->scan_lock);
  
- 	schedule_work(&wcn->scan_work);
+ 	if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ 		/* legacy manual/sw scan */
+ 		schedule_work(&wcn->scan_work);
+ 		return 0;
+ 	}
  
- 	return 0;
+ 	return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
  }
  
  static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
@@@ -662,6 -654,12 +666,12 @@@
  {
  	struct wcn36xx *wcn = hw->priv;
  
+ 	if (!wcn36xx_smd_stop_hw_scan(wcn)) {
+ 		struct cfg80211_scan_info scan_info = { .aborted = true };
+ 
+ 		ieee80211_scan_completed(wcn->hw, &scan_info);
+ 	}
+ 
  	mutex_lock(&wcn->scan_lock);
  	wcn->scan_aborted = true;
  	mutex_unlock(&wcn->scan_lock);
@@@ -759,6 -757,17 +769,6 @@@ static void wcn36xx_bss_info_changed(st
  		vif_priv->dtim_period = bss_conf->dtim_period;
  	}
  
 -	if (changed & BSS_CHANGED_PS) {
 -		wcn36xx_dbg(WCN36XX_DBG_MAC,
 -			    "mac bss PS set %d\n",
 -			    bss_conf->ps);
 -		if (bss_conf->ps) {
 -			wcn36xx_pmc_enter_bmps_state(wcn, vif);
 -		} else {
 -			wcn36xx_pmc_exit_bmps_state(wcn, vif);
 -		}
 -	}
 -
  	if (changed & BSS_CHANGED_BSSID) {
  		wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
  			    bss_conf->bssid);
diff --combined fs/btrfs/disk-io.c
index e5a4faf9e304,5da18ebc9222..bf31663de6b7
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@@ -30,6 -30,7 +30,7 @@@
  #include <linux/ratelimit.h>
  #include <linux/uuid.h>
  #include <linux/semaphore.h>
+ #include <linux/bpf.h>
  #include <asm/unaligned.h>
  #include "ctree.h"
  #include "disk-io.h"
@@@ -220,7 -221,7 +221,7 @@@ void btrfs_set_buffer_lockdep_class(u6
   * extents on the btree inode are pretty simple, there's one extent
   * that covers the entire device
   */
 -static struct extent_map *btree_get_extent(struct btrfs_inode *inode,
 +struct extent_map *btree_get_extent(struct btrfs_inode *inode,
  		struct page *page, size_t pg_offset, u64 start, u64 len,
  		int create)
  {
@@@ -285,7 -286,7 +286,7 @@@ static int csum_tree_block(struct btrfs
  			   int verify)
  {
  	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
 -	char *result = NULL;
 +	char result[BTRFS_CSUM_SIZE];
  	unsigned long len;
  	unsigned long cur_len;
  	unsigned long offset = BTRFS_CSUM_SIZE;
@@@ -294,6 -295,7 +295,6 @@@
  	unsigned long map_len;
  	int err;
  	u32 crc = ~(u32)0;
 -	unsigned long inline_result;
  
  	len = buf->len - offset;
  	while (len > 0) {
@@@ -307,7 -309,13 +308,7 @@@
  		len -= cur_len;
  		offset += cur_len;
  	}
 -	if (csum_size > sizeof(inline_result)) {
 -		result = kzalloc(csum_size, GFP_NOFS);
 -		if (!result)
 -			return -ENOMEM;
 -	} else {
 -		result = (char *)&inline_result;
 -	}
 +	memset(result, 0, BTRFS_CSUM_SIZE);
  
  	btrfs_csum_final(crc, result);
  
@@@ -322,12 -330,15 +323,12 @@@
  				"%s checksum verify failed on %llu wanted %X found %X level %d",
  				fs_info->sb->s_id, buf->start,
  				val, found, btrfs_header_level(buf));
 -			if (result != (char *)&inline_result)
 -				kfree(result);
  			return -EUCLEAN;
  		}
  	} else {
  		write_extent_buffer(buf, result, 0, csum_size);
  	}
 -	if (result != (char *)&inline_result)
 -		kfree(result);
 +
  	return 0;
  }
  
@@@ -381,7 -392,7 +382,7 @@@ static int verify_parent_transid(struc
  		clear_extent_buffer_uptodate(eb);
  out:
  	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
 -			     &cached_state, GFP_NOFS);
 +			     &cached_state);
  	if (need_lock)
  		btrfs_tree_read_unlock_blocking(eb);
  	return ret;
@@@ -445,7 -456,7 +446,7 @@@ static int btree_read_extent_buffer_pag
  	io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
  	while (1) {
  		ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
 -					       btree_get_extent, mirror_num);
 +					       mirror_num);
  		if (!ret) {
  			if (!verify_parent_transid(io_tree, eb,
  						   parent_transid, 0))
@@@ -855,8 -866,6 +856,8 @@@ static blk_status_t btree_submit_bio_ho
  	int async = check_async_write(BTRFS_I(inode));
  	blk_status_t ret;
  
 +	bio_associate_blkcg(bio, blkcg_root_css);
 +
  	if (bio_op(bio) != REQ_OP_WRITE) {
  		/*
  		 * called for a read, do the setup so that checksum validation
@@@ -1004,7 -1013,7 +1005,7 @@@ void readahead_tree_block(struct btrfs_
  	if (IS_ERR(buf))
  		return;
  	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
 -				 buf, WAIT_NONE, btree_get_extent, 0);
 +				 buf, WAIT_NONE, 0);
  	free_extent_buffer(buf);
  }
  
@@@ -1023,7 -1032,7 +1024,7 @@@ int reada_tree_block_flagged(struct btr
  	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
  
  	ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
 -				       btree_get_extent, mirror_num);
 +				       mirror_num);
  	if (ret) {
  		free_extent_buffer(buf);
  		return ret;
@@@ -1160,7 -1169,6 +1161,7 @@@ static void __setup_root(struct btrfs_r
  	spin_lock_init(&root->accounting_lock);
  	spin_lock_init(&root->log_extents_lock[0]);
  	spin_lock_init(&root->log_extents_lock[1]);
 +	spin_lock_init(&root->qgroup_meta_rsv_lock);
  	mutex_init(&root->objectid_mutex);
  	mutex_init(&root->log_mutex);
  	mutex_init(&root->ordered_extent_mutex);
@@@ -1177,6 -1185,7 +1178,6 @@@
  	atomic_set(&root->orphan_inodes, 0);
  	refcount_set(&root->refs, 1);
  	atomic_set(&root->will_be_snapshotted, 0);
 -	atomic64_set(&root->qgroup_meta_rsv, 0);
  	root->log_transid = 0;
  	root->log_transid_committed = -1;
  	root->last_log_commit = 0;
@@@ -1235,7 -1244,7 +1236,7 @@@ struct btrfs_root *btrfs_create_tree(st
  	struct btrfs_root *root;
  	struct btrfs_key key;
  	int ret = 0;
 -	uuid_le uuid;
 +	uuid_le uuid = NULL_UUID_LE;
  
  	root = btrfs_alloc_root(fs_info, GFP_KERNEL);
  	if (!root)
@@@ -1276,8 -1285,7 +1277,8 @@@
  	btrfs_set_root_used(&root->root_item, leaf->len);
  	btrfs_set_root_last_snapshot(&root->root_item, 0);
  	btrfs_set_root_dirid(&root->root_item, 0);
 -	uuid_le_gen(&uuid);
 +	if (is_fstree(objectid))
 +		uuid_le_gen(&uuid);
  	memcpy(root->root_item.uuid, uuid.b, BTRFS_UUID_SIZE);
  	root->root_item.drop_level = 0;
  
@@@ -2868,7 -2876,7 +2869,7 @@@ retry_root_backup
  		goto fail_sysfs;
  	}
  
 -	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info)) {
 +	if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
  		btrfs_warn(fs_info,
  		"writeable mount is not allowed due to too many missing devices");
  		goto fail_sysfs;
@@@ -3116,6 -3124,7 +3117,7 @@@ recovery_tree_root
  		goto fail_block_groups;
  	goto retry_root_backup;
  }
+ BPF_ALLOW_ERROR_INJECTION(open_ctree);
  
  static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
  {
@@@ -3343,8 -3352,6 +3345,8 @@@ static void write_dev_flush(struct btrf
  		return;
  
  	bio_reset(bio);
 +	bio_associate_blkcg(bio, blkcg_root_css);
 +
  	bio->bi_end_io = btrfs_end_empty_barrier;
  	bio_set_dev(bio, device->bdev);
  	bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
@@@ -3352,7 -3359,7 +3354,7 @@@
  	bio->bi_private = &device->flush_wait;
  
  	btrfsic_submit_bio(bio);
 -	device->flush_bio_sent = 1;
 +	set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
  }
  
  /*
@@@ -3362,10 -3369,10 +3364,10 @@@ static blk_status_t wait_dev_flush(stru
  {
  	struct bio *bio = device->flush_bio;
  
 -	if (!device->flush_bio_sent)
 +	if (!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state))
  		return BLK_STS_OK;
  
 -	device->flush_bio_sent = 0;
 +	clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state);
  	wait_for_completion_io(&device->flush_wait);
  
  	return bio->bi_status;
@@@ -3373,7 -3380,7 +3375,7 @@@
  
  static int check_barrier_error(struct btrfs_fs_info *fs_info)
  {
 -	if (!btrfs_check_rw_degradable(fs_info))
 +	if (!btrfs_check_rw_degradable(fs_info, NULL))
  		return -EIO;
  	return 0;
  }
@@@ -3389,16 -3396,14 +3391,16 @@@ static int barrier_all_devices(struct b
  	int errors_wait = 0;
  	blk_status_t ret;
  
 +	lockdep_assert_held(&info->fs_devices->device_list_mutex);
  	/* send down all the barriers */
  	head = &info->fs_devices->devices;
 -	list_for_each_entry_rcu(dev, head, dev_list) {
 -		if (dev->missing)
 +	list_for_each_entry(dev, head, dev_list) {
 +		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
  			continue;
  		if (!dev->bdev)
  			continue;
 -		if (!dev->in_fs_metadata || !dev->writeable)
 +		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
 +		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
  			continue;
  
  		write_dev_flush(dev);
@@@ -3406,15 -3411,14 +3408,15 @@@
  	}
  
  	/* wait for all the barriers */
 -	list_for_each_entry_rcu(dev, head, dev_list) {
 -		if (dev->missing)
 +	list_for_each_entry(dev, head, dev_list) {
 +		if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
  			continue;
  		if (!dev->bdev) {
  			errors_wait++;
  			continue;
  		}
 -		if (!dev->in_fs_metadata || !dev->writeable)
 +		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
 +		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
  			continue;
  
  		ret = wait_dev_flush(dev);
@@@ -3506,13 -3510,12 +3508,13 @@@ int write_all_supers(struct btrfs_fs_in
  		}
  	}
  
 -	list_for_each_entry_rcu(dev, head, dev_list) {
 +	list_for_each_entry(dev, head, dev_list) {
  		if (!dev->bdev) {
  			total_errors++;
  			continue;
  		}
 -		if (!dev->in_fs_metadata || !dev->writeable)
 +		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
 +		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
  			continue;
  
  		btrfs_set_stack_device_generation(dev_item, 0);
@@@ -3548,11 -3551,10 +3550,11 @@@
  	}
  
  	total_errors = 0;
 -	list_for_each_entry_rcu(dev, head, dev_list) {
 +	list_for_each_entry(dev, head, dev_list) {
  		if (!dev->bdev)
  			continue;
 -		if (!dev->in_fs_metadata || !dev->writeable)
 +		if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
 +		    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
  			continue;
  
  		ret = wait_dev_supers(dev, max_mirrors);
diff --combined fs/btrfs/free-space-cache.c
index 9e8c1f046e02,fb1382893bfc..9088b0b0d10f
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@@ -22,6 -22,7 +22,7 @@@
  #include <linux/slab.h>
  #include <linux/math64.h>
  #include <linux/ratelimit.h>
+ #include <linux/bpf.h>
  #include "ctree.h"
  #include "free-space-cache.h"
  #include "transaction.h"
@@@ -332,6 -333,7 +333,7 @@@ static int io_ctl_init(struct btrfs_io_
  
  	return 0;
  }
+ BPF_ALLOW_ERROR_INJECTION(io_ctl_init);
  
  static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
  {
@@@ -993,7 -995,8 +995,7 @@@ update_cache_item(struct btrfs_trans_ha
  	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
  	if (ret < 0) {
  		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
 -				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
 -				 GFP_NOFS);
 +				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL);
  		goto fail;
  	}
  	leaf = path->nodes[0];
@@@ -1007,7 -1010,7 +1009,7 @@@
  			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
  					 inode->i_size - 1,
  					 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
 -					 NULL, GFP_NOFS);
 +					 NULL);
  			btrfs_release_path(path);
  			goto fail;
  		}
@@@ -1104,7 -1107,8 +1106,7 @@@ static int flush_dirty_cache(struct ino
  	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
  	if (ret)
  		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
 -				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
 -				 GFP_NOFS);
 +				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL);
  
  	return ret;
  }
@@@ -1125,7 -1129,8 +1127,7 @@@ cleanup_write_cache_enospc(struct inod
  {
  	io_ctl_drop_pages(io_ctl);
  	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
 -			     i_size_read(inode) - 1, cached_state,
 -			     GFP_NOFS);
 +			     i_size_read(inode) - 1, cached_state);
  }
  
  static int __btrfs_wait_cache_io(struct btrfs_root *root,
@@@ -1319,7 -1324,7 +1321,7 @@@ static int __btrfs_write_out_cache(stru
  	io_ctl_drop_pages(io_ctl);
  
  	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
 -			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
 +			     i_size_read(inode) - 1, &cached_state);
  
  	/*
  	 * at this point the pages are under IO and we're happy,
@@@ -3545,7 -3550,7 +3547,7 @@@ int btrfs_write_out_ino_cache(struct bt
  	if (ret) {
  		if (release_metadata)
  			btrfs_delalloc_release_metadata(BTRFS_I(inode),
 -					inode->i_size);
 +					inode->i_size, true);
  #ifdef DEBUG
  		btrfs_err(fs_info,
  			  "failed to write free ino cache for root %llu",
diff --combined include/linux/bpf.h
index b63a592ad29d,da54ef644fcd..0dcd1d7c9825
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@@ -200,6 -200,9 +200,9 @@@ struct bpf_prog_aux 
  	u32 max_ctx_offset;
  	u32 stack_depth;
  	u32 id;
+ 	u32 func_cnt;
+ 	struct bpf_prog **func;
+ 	void *jit_data; /* JIT specific data. arch dependent */
  	struct latch_tree_node ksym_tnode;
  	struct list_head ksym_lnode;
  	const struct bpf_prog_ops *ops;
@@@ -285,6 -288,9 +288,9 @@@ int bpf_prog_array_copy_to_user(struct 
  
  void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
  				struct bpf_prog *old_prog);
+ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+ 			     __u32 __user *prog_ids, u32 request_cnt,
+ 			     __u32 __user *prog_cnt);
  int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
  			struct bpf_prog *exclude_prog,
  			struct bpf_prog *include_prog,
@@@ -399,6 -405,7 +405,7 @@@ static inline void bpf_long_memcpy(voi
  
  /* verify correctness of eBPF program */
  int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
  
  /* Map specifics */
  struct net_device  *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
@@@ -419,8 -426,6 +426,8 @@@ static inline int bpf_map_attr_numa_nod
  		attr->numa_node : NUMA_NO_NODE;
  }
  
 +struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
 +
  #else /* !CONFIG_BPF_SYSCALL */
  static inline struct bpf_prog *bpf_prog_get(u32 ufd)
  {
@@@ -508,12 -513,6 +515,12 @@@ static inline int cpu_map_enqueue(struc
  {
  	return 0;
  }
 +
 +static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
 +				enum bpf_prog_type type)
 +{
 +	return ERR_PTR(-EOPNOTSUPP);
 +}
  #endif /* CONFIG_BPF_SYSCALL */
  
  static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@@ -522,8 -521,6 +529,8 @@@
  	return bpf_prog_get_type_dev(ufd, type, false);
  }
  
 +bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
 +
  int bpf_prog_offload_compile(struct bpf_prog *prog);
  void bpf_prog_offload_destroy(struct bpf_prog *prog);
  
@@@ -586,4 -583,15 +593,15 @@@ extern const struct bpf_func_proto bpf_
  void bpf_user_rnd_init_once(void);
  u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
  
+ #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+ #ifdef CONFIG_BPF_KPROBE_OVERRIDE
+ #define BPF_ALLOW_ERROR_INJECTION(fname)				\
+ static unsigned long __used						\
+ 	__attribute__((__section__("_kprobe_error_inject_list")))	\
+ 	_eil_addr_##fname = (unsigned long)fname;
+ #else
+ #define BPF_ALLOW_ERROR_INJECTION(fname)
+ #endif
+ #endif
+ 
  #endif /* _LINUX_BPF_H */
diff --combined include/linux/module.h
index e6249795f9e2,548fa09fa806..0fd65481c045
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@@ -475,6 -475,11 +475,11 @@@ struct module 
  	ctor_fn_t *ctors;
  	unsigned int num_ctors;
  #endif
+ 
+ #ifdef CONFIG_BPF_KPROBE_OVERRIDE
+ 	unsigned int num_kprobe_ei_funcs;
+ 	unsigned long *kprobe_ei_funcs;
+ #endif
  } ____cacheline_aligned __randomize_layout;
  #ifndef MODULE_ARCH_INIT
  #define MODULE_ARCH_INIT {}
@@@ -606,9 -611,6 +611,9 @@@ int ref_module(struct module *a, struc
  	__mod ? __mod->name : "kernel";		\
  })
  
 +/* Dereference module function descriptor */
 +void *dereference_module_function_descriptor(struct module *mod, void *ptr);
 +
  /* For kallsyms to ask for address resolution.  namebuf should be at
   * least KSYM_NAME_LEN long: a pointer to namebuf is returned if
   * found, otherwise NULL. */
@@@ -763,13 -765,6 +768,13 @@@ static inline bool is_module_sig_enforc
  	return false;
  }
  
 +/* Dereference module function descriptor */
 +static inline
 +void *dereference_module_function_descriptor(struct module *mod, void *ptr)
 +{
 +	return ptr;
 +}
 +
  #endif /* CONFIG_MODULES */
  
  #ifdef CONFIG_SYSFS
diff --combined include/linux/pci.h
index 95807535d175,0314e0716c30..66cca1c6f742
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@@ -48,17 -48,17 +48,17 @@@
   * In the interest of not exposing interfaces to user-space unnecessarily,
   * the following kernel-only defines are being added here.
   */
 -#define PCI_DEVID(bus, devfn)  ((((u16)(bus)) << 8) | (devfn))
 +#define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
  /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
  #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
  
  /* pci_slot represents a physical slot */
  struct pci_slot {
 -	struct pci_bus *bus;		/* The bus this slot is on */
 -	struct list_head list;		/* node in list of slots on this bus */
 -	struct hotplug_slot *hotplug;	/* Hotplug info (migrate over time) */
 -	unsigned char number;		/* PCI_SLOT(pci_dev->devfn) */
 -	struct kobject kobj;
 +	struct pci_bus		*bus;		/* Bus this slot is on */
 +	struct list_head	list;		/* Node in list of slots */
 +	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
 +	unsigned char		number;		/* PCI_SLOT(pci_dev->devfn) */
 +	struct kobject		kobj;
  };
  
  static inline const char *pci_slot_name(const struct pci_slot *slot)
@@@ -72,7 -72,9 +72,7 @@@ enum pci_mmap_state 
  	pci_mmap_mem
  };
  
 -/*
 - *  For PCI devices, the region numbers are assigned this way:
 - */
 +/* For PCI devices, the region numbers are assigned this way: */
  enum {
  	/* #0-5: standard PCI resources */
  	PCI_STD_RESOURCES,
@@@ -81,23 -83,23 +81,23 @@@
  	/* #6: expansion ROM resource */
  	PCI_ROM_RESOURCE,
  
 -	/* device specific resources */
 +	/* Device-specific resources */
  #ifdef CONFIG_PCI_IOV
  	PCI_IOV_RESOURCES,
  	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
  #endif
  
 -	/* resources assigned to buses behind the bridge */
 +	/* Resources assigned to buses behind the bridge */
  #define PCI_BRIDGE_RESOURCE_NUM 4
  
  	PCI_BRIDGE_RESOURCES,
  	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
  				  PCI_BRIDGE_RESOURCE_NUM - 1,
  
 -	/* total resources associated with a PCI device */
 +	/* Total resources associated with a PCI device */
  	PCI_NUM_RESOURCES,
  
 -	/* preserve this for compatibility */
 +	/* Preserve this for compatibility */
  	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
  };
  
@@@ -150,10 -152,9 +150,10 @@@ static inline const char *pci_power_nam
  #define PCI_PM_D3COLD_WAIT	100
  #define PCI_PM_BUS_WAIT		50
  
 -/** The pci_channel state describes connectivity between the CPU and
 - *  the pci device.  If some PCI bus between here and the pci device
 - *  has crashed or locked up, this info is reflected here.
 +/**
 + * The pci_channel state describes connectivity between the CPU and
 + * the PCI device.  If some PCI bus between here and the PCI device
 + * has crashed or locked up, this info is reflected here.
   */
  typedef unsigned int __bitwise pci_channel_state_t;
  
@@@ -183,7 -184,9 +183,7 @@@ enum pcie_reset_state 
  
  typedef unsigned short __bitwise pci_dev_flags_t;
  enum pci_dev_flags {
 -	/* INTX_DISABLE in PCI_COMMAND register disables MSI
 -	 * generation too.
 -	 */
 +	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
  	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
  	/* Device configuration is irrevocably lost if disabled into D3 */
  	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
@@@ -199,7 -202,7 +199,7 @@@
  	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
  	/* Get VPD from function 0 VPD */
  	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
 -	/* a non-root bridge where translation occurs, stop alias search here */
 +	/* A non-root bridge where translation occurs, stop alias search here */
  	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
  	/* Do not use FLR even if device advertises PCI_AF_CAP */
  	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
@@@ -219,17 -222,17 +219,17 @@@ enum pci_bus_flags 
  	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
  };
  
 -/* These values come from the PCI Express Spec */
 +/* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
  enum pcie_link_width {
  	PCIE_LNK_WIDTH_RESRV	= 0x00,
  	PCIE_LNK_X1		= 0x01,
  	PCIE_LNK_X2		= 0x02,
  	PCIE_LNK_X4		= 0x04,
  	PCIE_LNK_X8		= 0x08,
 -	PCIE_LNK_X12		= 0x0C,
 +	PCIE_LNK_X12		= 0x0c,
  	PCIE_LNK_X16		= 0x10,
  	PCIE_LNK_X32		= 0x20,
 -	PCIE_LNK_WIDTH_UNKNOWN  = 0xFF,
 +	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
  };
  
  /* Based on the PCI Hotplug Spec, but some values are made up by us */
@@@ -260,15 -263,15 +260,15 @@@ enum pci_bus_speed 
  };
  
  struct pci_cap_saved_data {
 -	u16 cap_nr;
 -	bool cap_extended;
 -	unsigned int size;
 -	u32 data[0];
 +	u16		cap_nr;
 +	bool		cap_extended;
 +	unsigned int	size;
 +	u32		data[0];
  };
  
  struct pci_cap_saved_state {
 -	struct hlist_node next;
 -	struct pci_cap_saved_data cap;
 +	struct hlist_node		next;
 +	struct pci_cap_saved_data	cap;
  };
  
  struct irq_affinity;
@@@ -277,17 -280,19 +277,17 @@@ struct pci_vpd
  struct pci_sriov;
  struct pci_ats;
  
 -/*
 - * The pci_dev structure is used to describe PCI devices.
 - */
 +/* The pci_dev structure describes PCI devices */
  struct pci_dev {
 -	struct list_head bus_list;	/* node in per-bus list */
 -	struct pci_bus	*bus;		/* bus this device is on */
 -	struct pci_bus	*subordinate;	/* bus this device bridges to */
 +	struct list_head bus_list;	/* Node in per-bus list */
 +	struct pci_bus	*bus;		/* Bus this device is on */
 +	struct pci_bus	*subordinate;	/* Bus this device bridges to */
  
 -	void		*sysdata;	/* hook for sys-specific extension */
 -	struct proc_dir_entry *procent;	/* device entry in /proc/bus/pci */
 +	void		*sysdata;	/* Hook for sys-specific extension */
 +	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
  	struct pci_slot	*slot;		/* Physical slot this device is in */
  
 -	unsigned int	devfn;		/* encoded device & function index */
 +	unsigned int	devfn;		/* Encoded device & function index */
  	unsigned short	vendor;
  	unsigned short	device;
  	unsigned short	subsystem_vendor;
@@@ -302,12 -307,12 +302,12 @@@
  	u8		msi_cap;	/* MSI capability offset */
  	u8		msix_cap;	/* MSI-X capability offset */
  	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
 -	u8		rom_base_reg;	/* which config register controls the ROM */
 -	u8		pin;		/* which interrupt pin this device uses */
 -	u16		pcie_flags_reg;	/* cached PCIe Capabilities Register */
 -	unsigned long	*dma_alias_mask;/* mask of enabled devfn aliases */
 +	u8		rom_base_reg;	/* Config register controlling ROM */
 +	u8		pin;		/* Interrupt pin this device uses */
 +	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
 +	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
  
 -	struct pci_driver *driver;	/* which driver has allocated this device */
 +	struct pci_driver *driver;	/* Driver bound to this device */
  	u64		dma_mask;	/* Mask of the bits of bus address this
  					   device implements.  Normally this is
  					   0xffffffff.  You only need to change
@@@ -316,9 -321,9 +316,9 @@@
  
  	struct device_dma_parameters dma_parms;
  
 -	pci_power_t     current_state;  /* Current operating state. In ACPI-speak,
 -					   this is D0-D3, D0 being fully functional,
 -					   and D3 being off. */
 +	pci_power_t	current_state;	/* Current operating state. In ACPI,
 +					   this is D0-D3, D0 being fully
 +					   functional, and D3 being off. */
  	u8		pm_cap;		/* PM capability offset */
  	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
  					   can be generated */
@@@ -329,10 -334,10 +329,10 @@@
  	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
  	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
  	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
 -	unsigned int	mmio_always_on:1;	/* disallow turning off io/mem
 -						   decoding during bar sizing */
 +	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
 +						   decoding during BAR sizing */
  	unsigned int	wakeup_prepared:1;
 -	unsigned int	runtime_d3cold:1;	/* whether go through runtime
 +	unsigned int	runtime_d3cold:1;	/* Whether go through runtime
  						   D3cold, not set for devices
  						   powered on/off by the
  						   corresponding bridge */
@@@ -345,14 -350,12 +345,14 @@@
  
  #ifdef CONFIG_PCIEASPM
  	struct pcie_link_state	*link_state;	/* ASPM link state */
 +	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
 +					   supported from root to here */
  #endif
  
 -	pci_channel_state_t error_state;	/* current connectivity state */
 -	struct	device	dev;		/* Generic device interface */
 +	pci_channel_state_t error_state;	/* Current connectivity state */
 +	struct device	dev;			/* Generic device interface */
  
 -	int		cfg_size;	/* Size of configuration space */
 +	int		cfg_size;		/* Size of config space */
  
  	/*
  	 * Instead of touching interrupt line and base address registers
@@@ -361,47 -364,47 +361,47 @@@
  	unsigned int	irq;
  	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
  
 -	bool match_driver;		/* Skip attaching driver */
 -	/* These fields are used by common fixups */
 -	unsigned int	transparent:1;	/* Subtractive decode PCI bridge */
 -	unsigned int	multifunction:1;/* Part of multi-function device */
 -	/* keep track of device state */
 +	bool		match_driver;		/* Skip attaching driver */
 +
 +	unsigned int	transparent:1;		/* Subtractive decode bridge */
 +	unsigned int	multifunction:1;	/* Multi-function device */
 +
  	unsigned int	is_added:1;
 -	unsigned int	is_busmaster:1; /* device is busmaster */
 -	unsigned int	no_msi:1;	/* device may not use msi */
 -	unsigned int	no_64bit_msi:1; /* device may only use 32-bit MSIs */
 -	unsigned int	block_cfg_access:1;	/* config space access is blocked */
 -	unsigned int	broken_parity_status:1;	/* Device generates false positive parity */
 -	unsigned int	irq_reroute_variant:2;	/* device needs IRQ rerouting variant */
 +	unsigned int	is_busmaster:1;		/* Is busmaster */
 +	unsigned int	no_msi:1;		/* May not use MSI */
 +	unsigned int	no_64bit_msi:1; 	/* May only use 32-bit MSIs */
 +	unsigned int	block_cfg_access:1;	/* Config space access blocked */
 +	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
 +	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
  	unsigned int	msi_enabled:1;
  	unsigned int	msix_enabled:1;
 -	unsigned int	ari_enabled:1;	/* ARI forwarding */
 -	unsigned int	ats_enabled:1;	/* Address Translation Service */
 +	unsigned int	ari_enabled:1;		/* ARI forwarding */
 +	unsigned int	ats_enabled:1;		/* Address Translation Svc */
  	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
  	unsigned int	pri_enabled:1;		/* Page Request Interface */
  	unsigned int	is_managed:1;
 -	unsigned int    needs_freset:1; /* Dev requires fundamental reset */
 +	unsigned int	needs_freset:1;		/* Requires fundamental reset */
  	unsigned int	state_saved:1;
  	unsigned int	is_physfn:1;
  	unsigned int	is_virtfn:1;
  	unsigned int	reset_fn:1;
 -	unsigned int    is_hotplug_bridge:1;
 -	unsigned int	is_thunderbolt:1; /* Thunderbolt controller */
 -	unsigned int    __aer_firmware_first_valid:1;
 +	unsigned int	is_hotplug_bridge:1;
 +	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
 +	unsigned int	__aer_firmware_first_valid:1;
  	unsigned int	__aer_firmware_first:1;
 -	unsigned int	broken_intx_masking:1; /* INTx masking can't be used */
 -	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
 +	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
 +	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
  	unsigned int	irq_managed:1;
  	unsigned int	has_secondary_link:1;
 -	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
 -	unsigned int	is_probed:1;		/* device probing in progress */
 +	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
 +	unsigned int	is_probed:1;		/* Device probing in progress */
  	pci_dev_flags_t dev_flags;
  	atomic_t	enable_cnt;	/* pci_enable_device has been called */
  
 -	u32		saved_config_space[16]; /* config space saved at suspend time */
 +	u32		saved_config_space[16]; /* Config space saved at suspend time */
  	struct hlist_head saved_cap_space;
 -	struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
 -	int rom_attr_enabled;		/* has display of the rom attribute been enabled? */
 +	struct bin_attribute *rom_attr;		/* Attribute descriptor for sysfs ROM entry */
 +	int		rom_attr_enabled;	/* Display of ROM attribute enabled? */
  	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
  	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
  
@@@ -416,12 -419,12 +416,12 @@@
  	struct pci_vpd *vpd;
  #ifdef CONFIG_PCI_ATS
  	union {
 -		struct pci_sriov *sriov;	/* SR-IOV capability related */
 -		struct pci_dev *physfn;	/* the PF this VF is associated with */
 +		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
 +		struct pci_dev		*physfn;	/* VF: related PF */
  	};
  	u16		ats_cap;	/* ATS Capability offset */
  	u8		ats_stu;	/* ATS Smallest Translation Unit */
 -	atomic_t	ats_ref_cnt;	/* number of VFs with ATS enabled */
 +	atomic_t	ats_ref_cnt;	/* Number of VFs with ATS enabled */
  #endif
  #ifdef CONFIG_PCI_PRI
  	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
@@@ -429,11 -432,11 +429,11 @@@
  #ifdef CONFIG_PCI_PASID
  	u16		pasid_features;
  #endif
 -	phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
 -	size_t romlen; /* Length of ROM if it's not from the BAR */
 -	char *driver_override; /* Driver name to force a match */
 +	phys_addr_t	rom;		/* Physical address if not from BAR */
 +	size_t		romlen;		/* Length if not from BAR */
 +	char		*driver_override; /* Driver name to force a match */
  
 -	unsigned long priv_flags; /* Private flags for the pci driver */
 +	unsigned long	priv_flags;	/* Private flags for the PCI driver */
  };
  
  static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@@ -456,26 -459,26 +456,26 @@@ static inline int pci_channel_offline(s
  }
  
  struct pci_host_bridge {
 -	struct device dev;
 -	struct pci_bus *bus;		/* root bus */
 -	struct pci_ops *ops;
 -	void *sysdata;
 -	int busnr;
 +	struct device	dev;
 +	struct pci_bus	*bus;		/* Root bus */
 +	struct pci_ops	*ops;
 +	void		*sysdata;
 +	int		busnr;
  	struct list_head windows;	/* resource_entry */
 -	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */
 +	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
  	int (*map_irq)(const struct pci_dev *, u8, u8);
  	void (*release_fn)(struct pci_host_bridge *);
 -	void *release_data;
 +	void		*release_data;
  	struct msi_controller *msi;
 -	unsigned int ignore_reset_delay:1;	/* for entire hierarchy */
 -	unsigned int no_ext_tags:1;		/* no Extended Tags */
 +	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
 +	unsigned int	no_ext_tags:1;		/* No Extended Tags */
  	/* Resource alignment requirements */
  	resource_size_t (*align_resource)(struct pci_dev *dev,
  			const struct resource *res,
  			resource_size_t start,
  			resource_size_t size,
  			resource_size_t align);
 -	unsigned long private[0] ____cacheline_aligned;
 +	unsigned long	private[0] ____cacheline_aligned;
  };
  
  #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
@@@ -497,8 -500,8 +497,8 @@@ void pci_free_host_bridge(struct pci_ho
  struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
  
  void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
 -		     void (*release_fn)(struct pci_host_bridge *),
 -		     void *release_data);
 +				 void (*release_fn)(struct pci_host_bridge *),
 +				 void *release_data);
  
  int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
  
@@@ -518,32 -521,32 +518,32 @@@
  #define PCI_SUBTRACTIVE_DECODE	0x1
  
  struct pci_bus_resource {
 -	struct list_head list;
 -	struct resource *res;
 -	unsigned int flags;
 +	struct list_head	list;
 +	struct resource		*res;
 +	unsigned int		flags;
  };
  
  #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
  
  struct pci_bus {
 -	struct list_head node;		/* node in list of buses */
 -	struct pci_bus	*parent;	/* parent bus this bridge is on */
 -	struct list_head children;	/* list of child buses */
 -	struct list_head devices;	/* list of devices on this bus */
 -	struct pci_dev	*self;		/* bridge device as seen by parent */
 -	struct list_head slots;		/* list of slots on this bus;
 +	struct list_head node;		/* Node in list of buses */
 +	struct pci_bus	*parent;	/* Parent bus this bridge is on */
 +	struct list_head children;	/* List of child buses */
 +	struct list_head devices;	/* List of devices on this bus */
 +	struct pci_dev	*self;		/* Bridge device as seen by parent */
 +	struct list_head slots;		/* List of slots on this bus;
  					   protected by pci_slot_mutex */
  	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
 -	struct list_head resources;	/* address space routed to this bus */
 -	struct resource busn_res;	/* bus numbers routed to this bus */
 +	struct list_head resources;	/* Address space routed to this bus */
 +	struct resource busn_res;	/* Bus numbers routed to this bus */
  
 -	struct pci_ops	*ops;		/* configuration access functions */
 +	struct pci_ops	*ops;		/* Configuration access functions */
  	struct msi_controller *msi;	/* MSI controller */
 -	void		*sysdata;	/* hook for sys-specific extension */
 -	struct proc_dir_entry *procdir;	/* directory entry in /proc/bus/pci */
 +	void		*sysdata;	/* Hook for sys-specific extension */
 +	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
  
 -	unsigned char	number;		/* bus number */
 -	unsigned char	primary;	/* number of primary bridge */
 +	unsigned char	number;		/* Bus number */
 +	unsigned char	primary;	/* Number of primary bridge */
  	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
  	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
  #ifdef CONFIG_PCI_DOMAINS_GENERIC
@@@ -552,12 -555,12 +552,12 @@@
  
  	char		name[48];
  
 -	unsigned short  bridge_ctl;	/* manage NO_ISA/FBB/et al behaviors */
 -	pci_bus_flags_t bus_flags;	/* inherited by child buses */
 +	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
 +	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
  	struct device		*bridge;
  	struct device		dev;
 -	struct bin_attribute	*legacy_io; /* legacy I/O for this bus */
 -	struct bin_attribute	*legacy_mem; /* legacy mem */
 +	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
 +	struct bin_attribute	*legacy_mem;	/* Legacy mem */
  	unsigned int		is_added:1;
  };
  
@@@ -614,7 -617,9 +614,7 @@@ static inline bool pci_dev_msi_enabled(
  static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
  #endif
  
 -/*
 - * Error values that may be returned by PCI functions.
 - */
 +/* Error values that may be returned by PCI functions */
  #define PCIBIOS_SUCCESSFUL		0x00
  #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
  #define PCIBIOS_BAD_VENDOR_ID		0x83
@@@ -623,7 -628,9 +623,7 @@@
  #define PCIBIOS_SET_FAILED		0x88
  #define PCIBIOS_BUFFER_TOO_SMALL	0x89
  
 -/*
 - * Translate above to generic errno for passing back through non-PCI code.
 - */
 +/* Translate above to generic errno for passing back through non-PCI code */
  static inline int pcibios_err_to_errno(int err)
  {
  	if (err <= PCIBIOS_SUCCESSFUL)
@@@ -673,13 -680,13 +673,13 @@@ typedef u32 pci_bus_addr_t
  #endif
  
  struct pci_bus_region {
 -	pci_bus_addr_t start;
 -	pci_bus_addr_t end;
 +	pci_bus_addr_t	start;
 +	pci_bus_addr_t	end;
  };
  
  struct pci_dynids {
 -	spinlock_t lock;            /* protects list, index */
 -	struct list_head list;      /* for IDs added at runtime */
 +	spinlock_t		lock;	/* Protects list, index */
 +	struct list_head	list;	/* For IDs added at runtime */
  };
  
  
@@@ -693,13 -700,13 +693,13 @@@
  typedef unsigned int __bitwise pci_ers_result_t;
  
  enum pci_ers_result {
 -	/* no result/none/not supported in device driver */
 +	/* No result/none/not supported in device driver */
  	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
  
  	/* Device driver can recover without slot reset */
  	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
  
 -	/* Device driver wants slot to be reset. */
 +	/* Device driver wants slot to be reset */
  	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
  
  	/* Device has completely failed, is unrecoverable */
@@@ -735,27 -742,27 +735,27 @@@ struct pci_error_handlers 
  
  struct module;
  struct pci_driver {
 -	struct list_head node;
 -	const char *name;
 -	const struct pci_device_id *id_table;	/* must be non-NULL for probe to be called */
 -	int  (*probe)  (struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
 -	void (*remove) (struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
 -	int  (*suspend) (struct pci_dev *dev, pm_message_t state);	/* Device suspended */
 -	int  (*suspend_late) (struct pci_dev *dev, pm_message_t state);
 -	int  (*resume_early) (struct pci_dev *dev);
 -	int  (*resume) (struct pci_dev *dev);	                /* Device woken up */
 +	struct list_head	node;
 +	const char		*name;
 +	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
 +	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
 +	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
 +	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
 +	int  (*suspend_late)(struct pci_dev *dev, pm_message_t state);
 +	int  (*resume_early)(struct pci_dev *dev);
 +	int  (*resume) (struct pci_dev *dev);	/* Device woken up */
  	void (*shutdown) (struct pci_dev *dev);
 -	int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */
 +	int  (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */
  	const struct pci_error_handlers *err_handler;
  	const struct attribute_group **groups;
  	struct device_driver	driver;
 -	struct pci_dynids dynids;
 +	struct pci_dynids	dynids;
  };
  
  #define	to_pci_driver(drv) container_of(drv, struct pci_driver, driver)
  
  /**
 - * PCI_DEVICE - macro used to describe a specific pci device
 + * PCI_DEVICE - macro used to describe a specific PCI device
   * @vend: the 16 bit PCI Vendor ID
   * @dev: the 16 bit PCI Device ID
   *
@@@ -768,7 -775,7 +768,7 @@@
  	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  
  /**
 - * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem
 + * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
   * @vend: the 16 bit PCI Vendor ID
   * @dev: the 16 bit PCI Device ID
   * @subvend: the 16 bit PCI Subvendor ID
@@@ -782,7 -789,7 +782,7 @@@
  	.subvendor = (subvend), .subdevice = (subdev)
  
  /**
 - * PCI_DEVICE_CLASS - macro used to describe a specific pci device class
 + * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
   * @dev_class: the class, subclass, prog-if triple for this device
   * @dev_class_mask: the class mask for this device
   *
@@@ -796,7 -803,7 +796,7 @@@
  	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
  
  /**
 - * PCI_VDEVICE - macro used to describe a specific pci device in short form
 + * PCI_VDEVICE - macro used to describe a specific PCI device in short form
   * @vend: the vendor name
   * @dev: the 16 bit PCI Device ID
   *
@@@ -805,21 -812,22 +805,21 @@@
   * to PCI_ANY_ID. The macro allows the next field to follow as the device
   * private data.
   */
 -
  #define PCI_VDEVICE(vend, dev) \
  	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
  	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
  
  enum {
 -	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* ignore firmware setup */
 -	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* reassign all bus numbers */
 -	PCI_PROBE_ONLY		= 0x00000004,	/* use existing setup */
 -	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* don't do ISA alignment */
 -	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* enable domains in /proc */
 +	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
 +	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
 +	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
 +	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
 +	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
  	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
 -	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* scan all, not just dev 0 */
 +	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
  };
  
 -/* these external functions are only available when PCI support is enabled */
 +/* These external functions are only available when PCI support is enabled */
  #ifdef CONFIG_PCI
  
  extern unsigned int pci_flags;
@@@ -832,11 -840,11 +832,11 @@@ static inline int pci_has_flag(int flag
  void pcie_bus_configure_settings(struct pci_bus *bus);
  
  enum pcie_bus_config_types {
 -	PCIE_BUS_TUNE_OFF,	/* don't touch MPS at all */
 -	PCIE_BUS_DEFAULT,	/* ensure MPS matches upstream bridge */
 -	PCIE_BUS_SAFE,		/* use largest MPS boot-time devices support */
 -	PCIE_BUS_PERFORMANCE,	/* use MPS and MRRS for best performance */
 -	PCIE_BUS_PEER2PEER,	/* set MPS = 128 for all devices */
 +	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
 +	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
 +	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
 +	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
 +	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
  };
  
  extern enum pcie_bus_config_types pcie_bus_config;
@@@ -845,7 -853,7 +845,7 @@@ extern struct bus_type pci_bus_type
  
  /* Do NOT directly access these two variables, unless you are arch-specific PCI
   * code, or PCI core code. */
 -extern struct list_head pci_root_buses;	/* list of all known PCI buses */
 +extern struct list_head pci_root_buses;	/* List of all known PCI buses */
  /* Some device drivers need know if PCI is initiated */
  int no_pci_devices(void);
  
@@@ -883,8 -891,8 +883,8 @@@ int pci_bus_insert_busn_res(struct pci_
  int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
  void pci_bus_release_busn_res(struct pci_bus *b);
  struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
 -					     struct pci_ops *ops, void *sysdata,
 -					     struct list_head *resources);
 +				  struct pci_ops *ops, void *sysdata,
 +				  struct list_head *resources);
  int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
  struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
  				int busnr);
@@@ -941,10 -949,10 +941,10 @@@ int pci_find_next_ht_capability(struct 
  struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
  
  struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
 -				struct pci_dev *from);
 +			       struct pci_dev *from);
  struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
 -				unsigned int ss_vendor, unsigned int ss_device,
 -				struct pci_dev *from);
 +			       unsigned int ss_vendor, unsigned int ss_device,
 +			       struct pci_dev *from);
  struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
  struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
  					    unsigned int devfn);
@@@ -1020,7 -1028,7 +1020,7 @@@ static inline int pcie_capability_clear
  	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
  }
  
 -/* user-space driven config access */
 +/* User-space driven config access */
  int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
  int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
  int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
@@@ -1064,6 -1072,7 +1064,7 @@@ int pci_set_pcie_reset_state(struct pci
  int pci_set_cacheline_size(struct pci_dev *dev);
  #define HAVE_PCI_SET_MWI
  int __must_check pci_set_mwi(struct pci_dev *dev);
+ int __must_check pcim_set_mwi(struct pci_dev *dev);
  int pci_try_set_mwi(struct pci_dev *dev);
  void pci_clear_mwi(struct pci_dev *dev);
  void pci_intx(struct pci_dev *dev, int enable);
@@@ -1162,7 -1171,7 +1163,7 @@@ unsigned int pci_rescan_bus(struct pci_
  void pci_lock_rescan_remove(void);
  void pci_unlock_rescan_remove(void);
  
 -/* Vital product data routines */
 +/* Vital Product Data routines */
  ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
  ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
  int pci_set_vpd_size(struct pci_dev *dev, size_t len);
@@@ -1247,7 -1256,9 +1248,7 @@@ static inline pci_bus_addr_t pci_bus_ad
  int __must_check __pci_register_driver(struct pci_driver *, struct module *,
  				       const char *mod_name);
  
 -/*
 - * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded
 - */
 +/* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
  #define pci_register_driver(driver)		\
  	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
  
@@@ -1262,7 -1273,8 +1263,7 @@@ void pci_unregister_driver(struct pci_d
   * use this macro once, and calling it replaces module_init() and module_exit()
   */
  #define module_pci_driver(__pci_driver) \
 -	module_driver(__pci_driver, pci_register_driver, \
 -		       pci_unregister_driver)
 +	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
  
  /**
   * builtin_pci_driver() - Helper macro for registering a PCI driver
@@@ -1301,10 -1313,10 +1302,10 @@@ resource_size_t pcibios_iov_resource_al
  int pci_set_vga_state(struct pci_dev *pdev, bool decode,
  		      unsigned int command_bits, u32 flags);
  
 -#define PCI_IRQ_LEGACY		(1 << 0) /* allow legacy interrupts */
 -#define PCI_IRQ_MSI		(1 << 1) /* allow MSI interrupts */
 -#define PCI_IRQ_MSIX		(1 << 2) /* allow MSI-X interrupts */
 -#define PCI_IRQ_AFFINITY	(1 << 3) /* auto-assign affinity */
 +#define PCI_IRQ_LEGACY		(1 << 0) /* Allow legacy interrupts */
 +#define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
 +#define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
 +#define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
  #define PCI_IRQ_ALL_TYPES \
  	(PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX)
  
@@@ -1323,8 -1335,8 +1324,8 @@@
  #define	pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
  
  struct msix_entry {
 -	u32	vector;	/* kernel uses to write allocated vector */
 -	u16	entry;	/* driver uses to specify entry, OS writes */
 +	u32	vector;	/* Kernel uses to write allocated vector */
 +	u16	entry;	/* Driver uses to specify entry, OS writes */
  };
  
  #ifdef CONFIG_PCI_MSI
@@@ -1364,10 -1376,10 +1365,10 @@@ static inline int pci_msi_enabled(void
  static inline int pci_enable_msi(struct pci_dev *dev)
  { return -ENOSYS; }
  static inline int pci_enable_msix_range(struct pci_dev *dev,
 -		      struct msix_entry *entries, int minvec, int maxvec)
 +			struct msix_entry *entries, int minvec, int maxvec)
  { return -ENOSYS; }
  static inline int pci_enable_msix_exact(struct pci_dev *dev,
 -		      struct msix_entry *entries, int nvec)
 +			struct msix_entry *entries, int nvec)
  { return -ENOSYS; }
  
  static inline int
@@@ -1532,9 -1544,9 +1533,9 @@@ static inline int acpi_pci_bus_find_dom
  int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
  #endif
  
 -/* some architectures require additional setup to direct VGA traffic */
 +/* Some architectures require additional setup to direct VGA traffic */
  typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
 -		      unsigned int command_bits, u32 flags);
 +				    unsigned int command_bits, u32 flags);
  void pci_register_set_vga_state(arch_set_vga_state_t func);
  
  static inline int
@@@ -1573,9 -1585,10 +1574,9 @@@ static inline void pci_clear_flags(int 
  static inline int pci_has_flag(int flag) { return 0; }
  
  /*
 - *  If the system does not have PCI, clearly these return errors.  Define
 - *  these as simple inline functions to avoid hair in drivers.
 + * If the system does not have PCI, clearly these return errors.  Define
 + * these as simple inline functions to avoid hair in drivers.
   */
 -
  #define _PCI_NOP(o, s, t) \
  	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
  						int where, t val) \
@@@ -1714,10 -1727,8 +1715,10 @@@ int pci_iobar_pfn(struct pci_dev *pdev
  #define pci_root_bus_fwnode(bus)	NULL
  #endif
  
 -/* these helpers provide future and backwards compatibility
 - * for accessing popular PCI BAR info */
 +/*
 + * These helpers provide future and backwards compatibility
 + * for accessing popular PCI BAR info
 + */
  #define pci_resource_start(dev, bar)	((dev)->resource[(bar)].start)
  #define pci_resource_end(dev, bar)	((dev)->resource[(bar)].end)
  #define pci_resource_flags(dev, bar)	((dev)->resource[(bar)].flags)
@@@ -1729,8 -1740,7 +1730,8 @@@
  	 (pci_resource_end((dev), (bar)) -		\
  	  pci_resource_start((dev), (bar)) + 1))
  
 -/* Similar to the helpers above, these manipulate per-pci_dev
 +/*
 + * Similar to the helpers above, these manipulate per-pci_dev
   * driver-specific data.  They are really just a wrapper around
   * the generic device structure functions of these calls.
   */
@@@ -1744,14 -1754,16 +1745,14 @@@ static inline void pci_set_drvdata(stru
  	dev_set_drvdata(&pdev->dev, data);
  }
  
 -/* If you want to know what to call your pci_dev, ask this function.
 - * Again, it's a wrapper around the generic device.
 - */
  static inline const char *pci_name(const struct pci_dev *pdev)
  {
  	return dev_name(&pdev->dev);
  }
  
  
 -/* Some archs don't want to expose struct resource to userland as-is
 +/*
 + * Some archs don't want to expose struct resource to userland as-is
   * in sysfs and /proc
   */
  #ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER
@@@ -1770,16 -1782,16 +1771,16 @@@ static inline void pci_resource_to_user
  
  
  /*
 - *  The world is not perfect and supplies us with broken PCI devices.
 - *  For at least a part of these bugs we need a work-around, so both
 - *  generic (drivers/pci/quirks.c) and per-architecture code can define
 - *  fixup hooks to be called for particular buggy devices.
 + * The world is not perfect and supplies us with broken PCI devices.
 + * For at least a part of these bugs we need a work-around, so both
 + * generic (drivers/pci/quirks.c) and per-architecture code can define
 + * fixup hooks to be called for particular buggy devices.
   */
  
  struct pci_fixup {
 -	u16 vendor;		/* You can use PCI_ANY_ID here of course */
 -	u16 device;		/* You can use PCI_ANY_ID here of course */
 -	u32 class;		/* You can use PCI_ANY_ID here too */
 +	u16 vendor;			/* Or PCI_ANY_ID */
 +	u16 device;			/* Or PCI_ANY_ID */
 +	u32 class;			/* Or PCI_ANY_ID */
  	unsigned int class_shift;	/* should be 0, 8, 16 */
  	void (*hook)(struct pci_dev *dev);
  };
@@@ -1821,19 -1833,23 +1822,19 @@@ enum pci_fixup_pass 
  #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
  					 class_shift, hook)		\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
 -		resume##hook, vendor, device, class,	\
 -		class_shift, hook)
 +		resume##hook, vendor, device, class, class_shift, hook)
  #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
  					 class_shift, hook)		\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
 -		resume_early##hook, vendor, device,	\
 -		class, class_shift, hook)
 +		resume_early##hook, vendor, device, class, class_shift, hook)
  #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
  					 class_shift, hook)		\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 -		suspend##hook, vendor, device, class,	\
 -		class_shift, hook)
 +		suspend##hook, vendor, device, class, class_shift, hook)
  #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
  					 class_shift, hook)		\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
 -		suspend_late##hook, vendor, device,	\
 -		class, class_shift, hook)
 +		suspend_late##hook, vendor, device, class, class_shift, hook)
  
  #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
@@@ -1849,16 -1865,20 +1850,16 @@@
  		hook, vendor, device, PCI_ANY_ID, 0, hook)
  #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
 -		resume##hook, vendor, device,		\
 -		PCI_ANY_ID, 0, hook)
 +		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
  #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
 -		resume_early##hook, vendor, device,	\
 -		PCI_ANY_ID, 0, hook)
 +		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
  #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
 -		suspend##hook, vendor, device,		\
 -		PCI_ANY_ID, 0, hook)
 +		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
  #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
 -		suspend_late##hook, vendor, device,	\
 -		PCI_ANY_ID, 0, hook)
 +		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
  
  #ifdef CONFIG_PCI_QUIRKS
  void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
@@@ -1945,7 -1965,6 +1946,7 @@@ int pci_vfs_assigned(struct pci_dev *de
  int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
  int pci_sriov_get_totalvfs(struct pci_dev *dev);
  resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
 +void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
  #else
  static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
  {
@@@ -1973,7 -1992,6 +1974,7 @@@ static inline int pci_sriov_get_totalvf
  { return 0; }
  static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
  { return 0; }
 +static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
  #endif
  
  #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
@@@ -2095,7 -2113,7 +2096,7 @@@ static inline u16 pci_vpd_lrdt_size(con
   */
  static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
  {
 -    return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
 +	return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
  }
  
  /**
@@@ -2180,7 -2198,7 +2181,7 @@@ static inline struct device_node *pci_b
  	return bus ? bus->dev.of_node : NULL;
  }
  
 -#else /* CONFIG_OF */
 +#else	/* CONFIG_OF */
  static inline void pci_set_of_node(struct pci_dev *dev) { }
  static inline void pci_release_of_node(struct pci_dev *dev) { }
  static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
@@@ -2189,7 -2207,7 +2190,7 @@@ static inline struct device_node 
  pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
  static inline struct irq_domain *
  pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
 -#endif  /* CONFIG_OF */
 +#endif	/* CONFIG_OF */
  
  #ifdef CONFIG_ACPI
  struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
@@@ -2214,7 -2232,7 +2215,7 @@@ int pci_for_each_dma_alias(struct pci_d
  			   int (*fn)(struct pci_dev *pdev,
  				     u16 alias, void *data), void *data);
  
 -/* helper functions for operation of device flag */
 +/* Helper functions for operation of device flag */
  static inline void pci_set_dev_assigned(struct pci_dev *pdev)
  {
  	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
@@@ -2261,7 -2279,7 +2262,7 @@@ static inline bool pci_is_thunderbolt_a
  	return false;
  }
  
 -/* provide the legacy pci_dma_* API */
 +/* Provide the legacy pci_dma_* API */
  #include <linux/pci-dma-compat.h>
  
  #endif /* LINUX_PCI_H */
diff --combined include/linux/skbuff.h
index a87e43d16f44,b8e0da6c27d6..ac89a93b7c83
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@@ -1211,6 -1211,11 +1211,11 @@@ static inline bool skb_flow_dissect_flo
  				  data, proto, nhoff, hlen, flags);
  }
  
+ void
+ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
+ 			     struct flow_dissector *flow_dissector,
+ 			     void *target_container);
+ 
  static inline __u32 skb_get_hash(struct sk_buff *skb)
  {
  	if (!skb->l4_hash && !skb->sw_hash)
@@@ -3241,7 -3246,7 +3246,7 @@@ struct sk_buff *__skb_recv_datagram(str
  				    int *peeked, int *off, int *err);
  struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
  				  int *err);
 -unsigned int datagram_poll(struct file *file, struct socket *sock,
 +__poll_t datagram_poll(struct file *file, struct socket *sock,
  			   struct poll_table_struct *wait);
  int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
  			   struct iov_iter *to, int size);
diff --combined include/net/inet_connection_sock.h
index ec72cdb5bc39,8e1bf9ae4a5e..6692d67e9245
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@@ -77,6 -77,7 +77,7 @@@ struct inet_connection_sock_af_ops 
   * @icsk_af_ops		   Operations which are AF_INET{4,6} specific
   * @icsk_ulp_ops	   Pluggable ULP control hook
   * @icsk_ulp_data	   ULP private data
+  * @icsk_listen_portaddr_node	hash to the portaddr listener hashtable
   * @icsk_ca_state:	   Congestion control state
   * @icsk_retransmits:	   Number of unrecovered [RTO] timeouts
   * @icsk_pending:	   Scheduled timer event
@@@ -101,6 -102,7 +102,7 @@@ struct inet_connection_sock 
  	const struct inet_connection_sock_af_ops *icsk_af_ops;
  	const struct tcp_ulp_ops  *icsk_ulp_ops;
  	void			  *icsk_ulp_data;
+ 	struct hlist_node         icsk_listen_portaddr_node;
  	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
  	__u8			  icsk_ca_state:6,
  				  icsk_ca_setsockopt:1,
@@@ -305,7 -307,7 +307,7 @@@ void inet_csk_prepare_forced_close(stru
  /*
   * LISTEN is a special case for poll..
   */
 -static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
 +static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
  {
  	return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
  			(POLLIN | POLLRDNORM) : 0;
diff --combined include/net/sctp/sctp.h
index 608d123ef25f,20c0c1be2ca7..f7ae6b0a21d0
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@@ -107,7 -107,7 +107,7 @@@ int sctp_backlog_rcv(struct sock *sk, s
  int sctp_inet_listen(struct socket *sock, int backlog);
  void sctp_write_space(struct sock *sk);
  void sctp_data_ready(struct sock *sk);
 -unsigned int sctp_poll(struct file *file, struct socket *sock,
 +__poll_t sctp_poll(struct file *file, struct socket *sock,
  		poll_table *wait);
  void sctp_sock_rfree(struct sk_buff *skb);
  void sctp_copy_sock(struct sock *newsk, struct sock *sk,
@@@ -116,7 -116,7 +116,7 @@@ extern struct percpu_counter sctp_socke
  int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
  struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
  
- int sctp_transport_walk_start(struct rhashtable_iter *iter);
+ void sctp_transport_walk_start(struct rhashtable_iter *iter);
  void sctp_transport_walk_stop(struct rhashtable_iter *iter);
  struct sctp_transport *sctp_transport_get_next(struct net *net,
  			struct rhashtable_iter *iter);
@@@ -444,13 -444,13 +444,13 @@@ static inline int sctp_frag_point(cons
  	int frag = pmtu;
  
  	frag -= sp->pf->af->net_header_len;
- 	frag -= sizeof(struct sctphdr) + sizeof(struct sctp_data_chunk);
+ 	frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
  
  	if (asoc->user_frag)
  		frag = min_t(int, frag, asoc->user_frag);
  
  	frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
- 					    sizeof(struct sctp_data_chunk)));
+ 					    sctp_datachk_len(&asoc->stream)));
  
  	return frag;
  }
diff --combined include/net/sock.h
index 4fd74e0d1bbb,66fd3951e6f3..57750cdd67d8
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@@ -1262,6 -1262,7 +1262,7 @@@ proto_memory_pressure(struct proto *pro
  /* Called with local bh disabled */
  void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
  int sock_prot_inuse_get(struct net *net, struct proto *proto);
+ int sock_inuse_get(struct net *net);
  #else
  static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
  		int inc)
@@@ -1583,7 -1584,7 +1584,7 @@@ int sock_no_connect(struct socket *, st
  int sock_no_socketpair(struct socket *, struct socket *);
  int sock_no_accept(struct socket *, struct socket *, int, bool);
  int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
 -unsigned int sock_no_poll(struct file *, struct socket *,
 +__poll_t sock_no_poll(struct file *, struct socket *,
  			  struct poll_table_struct *);
  int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
  int sock_no_listen(struct socket *, int);
@@@ -2337,31 -2338,6 +2338,6 @@@ static inline bool sk_listener(const st
  	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
  }
  
- /**
-  * sk_state_load - read sk->sk_state for lockless contexts
-  * @sk: socket pointer
-  *
-  * Paired with sk_state_store(). Used in places we do not hold socket lock :
-  * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
-  */
- static inline int sk_state_load(const struct sock *sk)
- {
- 	return smp_load_acquire(&sk->sk_state);
- }
- 
- /**
-  * sk_state_store - update sk->sk_state
-  * @sk: socket pointer
-  * @newstate: new state
-  *
-  * Paired with sk_state_load(). Should be used in contexts where
-  * state change might impact lockless readers.
-  */
- static inline void sk_state_store(struct sock *sk, int newstate)
- {
- 	smp_store_release(&sk->sk_state, newstate);
- }
- 
  void sock_enable_timestamp(struct sock *sk, int flag);
  int sock_get_timestamp(struct sock *, struct timeval __user *);
  int sock_get_timestampns(struct sock *, struct timespec __user *);
@@@ -2412,4 -2388,15 +2388,15 @@@ static inline int sk_get_rmem0(const st
  	return *proto->sysctl_rmem;
  }
  
+ /* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
+  * Some wifi drivers need to tweak it to get more chunks.
+  * They can use this helper from their ndo_start_xmit()
+  */
+ static inline void sk_pacing_shift_update(struct sock *sk, int val)
+ {
+ 	if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+ 		return;
+ 	sk->sk_pacing_shift = val;
+ }
+ 
  #endif	/* _SOCK_H */
diff --combined include/net/tcp.h
index 50b21a49d870,6939e69d3c37..26c2793846a1
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@@ -387,7 -387,7 +387,7 @@@ bool tcp_peer_is_proven(struct request_
  void tcp_close(struct sock *sk, long timeout);
  void tcp_init_sock(struct sock *sk);
  void tcp_init_transfer(struct sock *sk, int bpf_op);
 -unsigned int tcp_poll(struct file *file, struct socket *sock,
 +__poll_t tcp_poll(struct file *file, struct socket *sock,
  		      struct poll_table_struct *wait);
  int tcp_getsockopt(struct sock *sk, int level, int optname,
  		   char __user *optval, int __user *optlen);
@@@ -1507,8 -1507,7 +1507,7 @@@ int tcp_md5_hash_key(struct tcp_md5sig_
  
  /* From tcp_fastopen.c */
  void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
- 			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
- 			    unsigned long *last_syn_loss);
+ 			    struct tcp_fastopen_cookie *cookie);
  void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
  			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
  			    u16 try_exp);
@@@ -1546,7 -1545,7 +1545,7 @@@ extern unsigned int sysctl_tcp_fastopen
  void tcp_fastopen_active_disable(struct sock *sk);
  bool tcp_fastopen_active_should_disable(struct sock *sk);
  void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
- void tcp_fastopen_active_timeout_reset(void);
+ void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
  
  /* Latencies incurred by various limits for a sender. They are
   * chronograph-like stats that are mutually exclusive.
@@@ -2011,10 -2010,12 +2010,12 @@@ static inline int tcp_call_bpf(struct s
  	struct bpf_sock_ops_kern sock_ops;
  	int ret;
  
- 	if (sk_fullsock(sk))
+ 	memset(&sock_ops, 0, sizeof(sock_ops));
+ 	if (sk_fullsock(sk)) {
+ 		sock_ops.is_fullsock = 1;
  		sock_owned_by_me(sk);
+ 	}
  
- 	memset(&sock_ops, 0, sizeof(sock_ops));
  	sock_ops.sk = sk;
  	sock_ops.op = op;
  
diff --combined include/uapi/linux/if_ether.h
index 144de4d2f385,87b7529fcdfe..f8cb5760ea4f
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@@ -23,7 -23,6 +23,7 @@@
  #define _UAPI_LINUX_IF_ETHER_H
  
  #include <linux/types.h>
 +#include <linux/libc-compat.h>
  
  /*
   *	IEEE 802.3 Ethernet magic constants.  The frame sizes omit the preamble
@@@ -48,6 -47,7 +48,7 @@@
  #define ETH_P_PUP	0x0200		/* Xerox PUP packet		*/
  #define ETH_P_PUPAT	0x0201		/* Xerox PUP Addr Trans packet	*/
  #define ETH_P_TSN	0x22F0		/* TSN (IEEE 1722) packet	*/
+ #define ETH_P_ERSPAN2	0x22EB		/* ERSPAN version 2 (type III)	*/
  #define ETH_P_IP	0x0800		/* Internet Protocol packet	*/
  #define ETH_P_X25	0x0805		/* CCITT X.25			*/
  #define ETH_P_ARP	0x0806		/* Address Resolution packet	*/
@@@ -150,13 -150,11 +151,13 @@@
   *	This is an Ethernet frame header.
   */
  
 +#if __UAPI_DEF_ETHHDR
  struct ethhdr {
  	unsigned char	h_dest[ETH_ALEN];	/* destination eth addr	*/
  	unsigned char	h_source[ETH_ALEN];	/* source ether addr	*/
  	__be16		h_proto;		/* packet type ID field	*/
  } __attribute__((packed));
 +#endif
  
  
  #endif /* _UAPI_LINUX_IF_ETHER_H */
diff --combined kernel/bpf/syscall.c
index 5cb783fc8224,007802c5ca7d..f35ce7e70c90
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@@ -937,10 -937,16 +937,16 @@@ static void __bpf_prog_put_rcu(struct r
  static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
  {
  	if (atomic_dec_and_test(&prog->aux->refcnt)) {
+ 		int i;
+ 
  		trace_bpf_prog_put_rcu(prog);
  		/* bpf_prog_free_id() must be called first */
  		bpf_prog_free_id(prog, do_idr_lock);
+ 
+ 		for (i = 0; i < prog->aux->func_cnt; i++)
+ 			bpf_prog_kallsyms_del(prog->aux->func[i]);
  		bpf_prog_kallsyms_del(prog);
+ 
  		call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
  	}
  }
@@@ -1057,7 -1063,7 +1063,7 @@@ struct bpf_prog *bpf_prog_inc_not_zero(
  }
  EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
  
 -static bool bpf_prog_get_ok(struct bpf_prog *prog,
 +bool bpf_prog_get_ok(struct bpf_prog *prog,
  			    enum bpf_prog_type *attach_type, bool attach_drv)
  {
  	/* not an attachment, just a refcount inc, always allow */
@@@ -1194,7 -1200,8 +1200,8 @@@ static int bpf_prog_load(union bpf_att
  		goto free_used_maps;
  
  	/* eBPF program is ready to be JITed */
- 	prog = bpf_prog_select_runtime(prog, &err);
+ 	if (!prog->bpf_func)
+ 		prog = bpf_prog_select_runtime(prog, &err);
  	if (err < 0)
  		goto free_used_maps;
  
@@@ -1551,6 -1558,67 +1558,67 @@@ static int bpf_map_get_fd_by_id(const u
  	return fd;
  }
  
+ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
+ 					      unsigned long addr)
+ {
+ 	int i;
+ 
+ 	for (i = 0; i < prog->aux->used_map_cnt; i++)
+ 		if (prog->aux->used_maps[i] == (void *)addr)
+ 			return prog->aux->used_maps[i];
+ 	return NULL;
+ }
+ 
+ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+ {
+ 	const struct bpf_map *map;
+ 	struct bpf_insn *insns;
+ 	u64 imm;
+ 	int i;
+ 
+ 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
+ 			GFP_USER);
+ 	if (!insns)
+ 		return insns;
+ 
+ 	for (i = 0; i < prog->len; i++) {
+ 		if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
+ 			insns[i].code = BPF_JMP | BPF_CALL;
+ 			insns[i].imm = BPF_FUNC_tail_call;
+ 			/* fall-through */
+ 		}
+ 		if (insns[i].code == (BPF_JMP | BPF_CALL) ||
+ 		    insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
+ 			if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
+ 				insns[i].code = BPF_JMP | BPF_CALL;
+ 			if (!bpf_dump_raw_ok())
+ 				insns[i].imm = 0;
+ 			continue;
+ 		}
+ 
+ 		if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
+ 			continue;
+ 
+ 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
+ 		map = bpf_map_from_imm(prog, imm);
+ 		if (map) {
+ 			insns[i].src_reg = BPF_PSEUDO_MAP_FD;
+ 			insns[i].imm = map->id;
+ 			insns[i + 1].imm = 0;
+ 			continue;
+ 		}
+ 
+ 		if (!bpf_dump_raw_ok() &&
+ 		    imm == (unsigned long)prog->aux) {
+ 			insns[i].imm = 0;
+ 			insns[i + 1].imm = 0;
+ 			continue;
+ 		}
+ 	}
+ 
+ 	return insns;
+ }
+ 
  static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
  				   const union bpf_attr *attr,
  				   union bpf_attr __user *uattr)
@@@ -1601,18 -1669,34 +1669,34 @@@
  	ulen = info.jited_prog_len;
  	info.jited_prog_len = prog->jited_len;
  	if (info.jited_prog_len && ulen) {
- 		uinsns = u64_to_user_ptr(info.jited_prog_insns);
- 		ulen = min_t(u32, info.jited_prog_len, ulen);
- 		if (copy_to_user(uinsns, prog->bpf_func, ulen))
- 			return -EFAULT;
+ 		if (bpf_dump_raw_ok()) {
+ 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
+ 			ulen = min_t(u32, info.jited_prog_len, ulen);
+ 			if (copy_to_user(uinsns, prog->bpf_func, ulen))
+ 				return -EFAULT;
+ 		} else {
+ 			info.jited_prog_insns = 0;
+ 		}
  	}
  
  	ulen = info.xlated_prog_len;
  	info.xlated_prog_len = bpf_prog_insn_size(prog);
  	if (info.xlated_prog_len && ulen) {
+ 		struct bpf_insn *insns_sanitized;
+ 		bool fault;
+ 
+ 		if (prog->blinded && !bpf_dump_raw_ok()) {
+ 			info.xlated_prog_insns = 0;
+ 			goto done;
+ 		}
+ 		insns_sanitized = bpf_insn_prepare_dump(prog);
+ 		if (!insns_sanitized)
+ 			return -ENOMEM;
  		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
  		ulen = min_t(u32, info.xlated_prog_len, ulen);
- 		if (copy_to_user(uinsns, prog->insnsi, ulen))
+ 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
+ 		kfree(insns_sanitized);
+ 		if (fault)
  			return -EFAULT;
  	}
  
diff --combined kernel/events/core.c
index 56d2b99de409,878d86c513d6..0f2fe78c2fa2
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@@ -4511,11 -4511,11 +4511,11 @@@ perf_read(struct file *file, char __use
  	return ret;
  }
  
 -static unsigned int perf_poll(struct file *file, poll_table *wait)
 +static __poll_t perf_poll(struct file *file, poll_table *wait)
  {
  	struct perf_event *event = file->private_data;
  	struct ring_buffer *rb;
 -	unsigned int events = POLLHUP;
 +	__poll_t events = POLLHUP;
  
  	poll_wait(file, &event->waitq, wait);
  
@@@ -4723,6 -4723,9 +4723,9 @@@ static long _perf_ioctl(struct perf_eve
  		rcu_read_unlock();
  		return 0;
  	}
+ 
+ 	case PERF_EVENT_IOC_QUERY_BPF:
+ 		return perf_event_query_prog_array(event, (void __user *)arg);
  	default:
  		return -ENOTTY;
  	}
@@@ -4904,7 -4907,6 +4907,7 @@@ void perf_event_update_userpage(struct 
  unlock:
  	rcu_read_unlock();
  }
 +EXPORT_SYMBOL_GPL(perf_event_update_userpage);
  
  static int perf_mmap_fault(struct vm_fault *vmf)
  {
@@@ -8081,6 -8083,13 +8084,13 @@@ static int perf_event_set_bpf_prog(stru
  		return -EINVAL;
  	}
  
+ 	/* Kprobe override only works for kprobes, not uprobes. */
+ 	if (prog->kprobe_override &&
+ 	    !(event->tp_event->flags & TRACE_EVENT_FL_KPROBE)) {
+ 		bpf_prog_put(prog);
+ 		return -EINVAL;
+ 	}
+ 
  	if (is_tracepoint || is_syscall_tp) {
  		int off = trace_event_get_offsets(event->tp_event);
  
diff --combined kernel/module.c
index 8042b8fcbf14,bd695bfdc5c4..83075a104710
--- a/kernel/module.c
+++ b/kernel/module.c
@@@ -3118,7 -3118,11 +3118,11 @@@ static int find_module_sections(struct 
  					     sizeof(*mod->ftrace_callsites),
  					     &mod->num_ftrace_callsites);
  #endif
- 
+ #ifdef CONFIG_BPF_KPROBE_OVERRIDE
+ 	mod->kprobe_ei_funcs = section_objs(info, "_kprobe_error_inject_list",
+ 					    sizeof(*mod->kprobe_ei_funcs),
+ 					    &mod->num_kprobe_ei_funcs);
+ #endif
  	mod->extable = section_objs(info, "__ex_table",
  				    sizeof(*mod->extable), &mod->num_exentries);
  
@@@ -3938,12 -3942,6 +3942,12 @@@ static const char *get_ksymbol(struct m
  	return symname(kallsyms, best);
  }
  
 +void * __weak dereference_module_function_descriptor(struct module *mod,
 +						     void *ptr)
 +{
 +	return ptr;
 +}
 +
  /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
   * not to lock to avoid deadlock on oopses, simply disable preemption. */
  const char *module_address_lookup(unsigned long addr,
diff --combined net/atm/common.c
index 8f12f1c6fa14,5763fd241dc3..6523f38c4957
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@@ -14,7 -14,7 +14,7 @@@
  #include <linux/capability.h>
  #include <linux/mm.h>
  #include <linux/sched/signal.h>
- #include <linux/time.h>		/* struct timeval */
+ #include <linux/time64.h>	/* 64-bit time for seconds */
  #include <linux/skbuff.h>
  #include <linux/bitops.h>
  #include <linux/init.h>
@@@ -648,11 -648,11 +648,11 @@@ out
  	return error;
  }
  
 -unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
 +__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
  {
  	struct sock *sk = sock->sk;
  	struct atm_vcc *vcc;
 -	unsigned int mask;
 +	__poll_t mask;
  
  	sock_poll_wait(file, sk_sleep(sk), wait);
  	mask = 0;
diff --combined net/batman-adv/icmp_socket.c
index a98e0a986cef,8041cf106c37..581375d0eed2
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner
@@@ -26,6 -27,7 +27,7 @@@
  #include <linux/export.h>
  #include <linux/fcntl.h>
  #include <linux/fs.h>
+ #include <linux/gfp.h>
  #include <linux/if_ether.h>
  #include <linux/kernel.h>
  #include <linux/list.h>
@@@ -42,11 -44,11 +44,11 @@@
  #include <linux/string.h>
  #include <linux/uaccess.h>
  #include <linux/wait.h>
+ #include <uapi/linux/batadv_packet.h>
  
  #include "hard-interface.h"
  #include "log.h"
  #include "originator.h"
- #include "packet.h"
  #include "send.h"
  
  static struct batadv_socket_client *batadv_socket_client_hash[256];
@@@ -55,6 -57,9 +57,9 @@@ static void batadv_socket_add_packet(st
  				     struct batadv_icmp_header *icmph,
  				     size_t icmp_len);
  
+ /**
+  * batadv_socket_init() - Initialize soft interface independent socket data
+  */
  void batadv_socket_init(void)
  {
  	memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
@@@ -292,7 -297,7 +297,7 @@@ out
  	return len;
  }
  
 -static unsigned int batadv_socket_poll(struct file *file, poll_table *wait)
 +static __poll_t batadv_socket_poll(struct file *file, poll_table *wait)
  {
  	struct batadv_socket_client *socket_client = file->private_data;
  
@@@ -314,6 -319,12 +319,12 @@@ static const struct file_operations bat
  	.llseek = no_llseek,
  };
  
+ /**
+  * batadv_socket_setup() - Create debugfs "socket" file
+  * @bat_priv: the bat priv with all the soft interface information
+  *
+  * Return: 0 on success or negative error number in case of failure
+  */
  int batadv_socket_setup(struct batadv_priv *bat_priv)
  {
  	struct dentry *d;
@@@ -333,7 -344,7 +344,7 @@@ err
  }
  
  /**
-  * batadv_socket_add_packet - schedule an icmp packet to be sent to
+  * batadv_socket_add_packet() - schedule an icmp packet to be sent to
   *  userspace on an icmp socket.
   * @socket_client: the socket this packet belongs to
   * @icmph: pointer to the header of the icmp packet
@@@ -390,7 -401,7 +401,7 @@@ static void batadv_socket_add_packet(st
  }
  
  /**
-  * batadv_socket_receive_packet - schedule an icmp packet to be received
+  * batadv_socket_receive_packet() - schedule an icmp packet to be received
   *  locally and sent to userspace.
   * @icmph: pointer to the header of the icmp packet
   * @icmp_len: total length of the icmp packet
diff --combined net/batman-adv/log.c
index 76451460c98d,da004980ab8b..9be74a44e99d
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner
@@@ -24,6 -25,7 +25,7 @@@
  #include <linux/export.h>
  #include <linux/fcntl.h>
  #include <linux/fs.h>
+ #include <linux/gfp.h>
  #include <linux/jiffies.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
@@@ -86,6 -88,13 +88,13 @@@ static int batadv_fdebug_log(struct bat
  	return 0;
  }
  
+ /**
+  * batadv_debug_log() - Add debug log entry
+  * @bat_priv: the bat priv with all the soft interface information
+  * @fmt: format string
+  *
+  * Return: 0 on success or negative error number in case of failure
+  */
  int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
  {
  	va_list args;
@@@ -176,7 -185,7 +185,7 @@@ static ssize_t batadv_log_read(struct f
  	return error;
  }
  
 -static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
 +static __poll_t batadv_log_poll(struct file *file, poll_table *wait)
  {
  	struct batadv_priv *bat_priv = file->private_data;
  	struct batadv_priv_debug_log *debug_log = bat_priv->debug_log;
@@@ -197,6 -206,12 +206,12 @@@ static const struct file_operations bat
  	.llseek         = no_llseek,
  };
  
+ /**
+  * batadv_debug_log_setup() - Initialize debug log
+  * @bat_priv: the bat priv with all the soft interface information
+  *
+  * Return: 0 on success or negative error number in case of failure
+  */
  int batadv_debug_log_setup(struct batadv_priv *bat_priv)
  {
  	struct dentry *d;
@@@ -222,6 -237,10 +237,10 @@@ err
  	return -ENOMEM;
  }
  
+ /**
+  * batadv_debug_log_cleanup() - Destroy debug log
+  * @bat_priv: the bat priv with all the soft interface information
+  */
  void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
  {
  	kfree(bat_priv->debug_log);
diff --combined net/bluetooth/af_bluetooth.c
index 671b907ba678,f044202346c6..f897681780db
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@@ -421,7 -421,7 +421,7 @@@ out
  }
  EXPORT_SYMBOL(bt_sock_stream_recvmsg);
  
 -static inline unsigned int bt_accept_poll(struct sock *parent)
 +static inline __poll_t bt_accept_poll(struct sock *parent)
  {
  	struct bt_sock *s, *n;
  	struct sock *sk;
@@@ -437,11 -437,11 +437,11 @@@
  	return 0;
  }
  
 -unsigned int bt_sock_poll(struct file *file, struct socket *sock,
 +__poll_t bt_sock_poll(struct file *file, struct socket *sock,
  			  poll_table *wait)
  {
  	struct sock *sk = sock->sk;
 -	unsigned int mask = 0;
 +	__poll_t mask = 0;
  
  	BT_DBG("sock %p, sk %p", sock, sk);
  
@@@ -766,43 -766,39 +766,39 @@@ static int __init bt_init(void
  		return err;
  
  	err = sock_register(&bt_sock_family_ops);
- 	if (err < 0) {
- 		bt_sysfs_cleanup();
- 		return err;
- 	}
+ 	if (err)
+ 		goto cleanup_sysfs;
  
  	BT_INFO("HCI device and connection manager initialized");
  
  	err = hci_sock_init();
- 	if (err < 0)
- 		goto error;
+ 	if (err)
+ 		goto unregister_socket;
  
  	err = l2cap_init();
- 	if (err < 0)
- 		goto sock_err;
+ 	if (err)
+ 		goto cleanup_socket;
  
  	err = sco_init();
- 	if (err < 0) {
- 		l2cap_exit();
- 		goto sock_err;
- 	}
+ 	if (err)
+ 		goto cleanup_cap;
  
  	err = mgmt_init();
- 	if (err < 0) {
- 		sco_exit();
- 		l2cap_exit();
- 		goto sock_err;
- 	}
+ 	if (err)
+ 		goto cleanup_sco;
  
  	return 0;
  
- sock_err:
+ cleanup_sco:
+ 	sco_exit();
+ cleanup_cap:
+ 	l2cap_exit();
+ cleanup_socket:
  	hci_sock_cleanup();
- 
- error:
+ unregister_socket:
  	sock_unregister(PF_BLUETOOTH);
+ cleanup_sysfs:
  	bt_sysfs_cleanup();
- 
  	return err;
  }
  
diff --combined net/core/dev.c
index 0e0ba36eeac9,2eb66c0d9cdb..dc3bb37ba252
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -1146,19 -1146,7 +1146,19 @@@ EXPORT_SYMBOL(dev_alloc_name)
  int dev_get_valid_name(struct net *net, struct net_device *dev,
  		       const char *name)
  {
 -	return dev_alloc_name_ns(net, dev, name);
 +	BUG_ON(!net);
 +
 +	if (!dev_valid_name(name))
 +		return -EINVAL;
 +
 +	if (strchr(name, '%'))
 +		return dev_alloc_name_ns(net, dev, name);
 +	else if (__dev_get_by_name(net, name))
 +		return -EEXIST;
 +	else if (dev->name != name)
 +		strlcpy(dev->name, name, IFNAMSIZ);
 +
 +	return 0;
  }
  EXPORT_SYMBOL(dev_get_valid_name);
  
@@@ -1554,6 -1542,23 +1554,23 @@@ void dev_disable_lro(struct net_device 
  }
  EXPORT_SYMBOL(dev_disable_lro);
  
+ /**
+  *	dev_disable_gro_hw - disable HW Generic Receive Offload on a device
+  *	@dev: device
+  *
+  *	Disable HW Generic Receive Offload (GRO_HW) on a net device.  Must be
+  *	called under RTNL.  This is needed if Generic XDP is installed on
+  *	the device.
+  */
+ static void dev_disable_gro_hw(struct net_device *dev)
+ {
+ 	dev->wanted_features &= ~NETIF_F_GRO_HW;
+ 	netdev_update_features(dev);
+ 
+ 	if (unlikely(dev->features & NETIF_F_GRO_HW))
+ 		netdev_WARN(dev, "failed to disable GRO_HW!\n");
+ }
+ 
  static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
  				   struct net_device *dev)
  {
@@@ -2815,7 -2820,7 +2832,7 @@@ struct sk_buff *__skb_gso_segment(struc
  
  	segs = skb_mac_gso_segment(skb, features);
  
- 	if (unlikely(skb_needs_check(skb, tx_path)))
+ 	if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
  		skb_warn_bad_offload(skb);
  
  	return segs;
@@@ -3054,7 -3059,7 +3071,7 @@@ int skb_csum_hwoffload_help(struct sk_b
  }
  EXPORT_SYMBOL(skb_csum_hwoffload_help);
  
- static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
  {
  	netdev_features_t features;
  
@@@ -3078,9 -3083,6 +3095,6 @@@
  		    __skb_linearize(skb))
  			goto out_kfree_skb;
  
- 		if (validate_xmit_xfrm(skb, features))
- 			goto out_kfree_skb;
- 
  		/* If packet is not checksummed and device does not
  		 * support checksumming for this protocol, complete
  		 * checksumming here.
@@@ -3097,6 -3099,8 +3111,8 @@@
  		}
  	}
  
+ 	skb = validate_xmit_xfrm(skb, features, again);
+ 
  	return skb;
  
  out_kfree_skb:
@@@ -3106,7 -3110,7 +3122,7 @@@ out_null
  	return NULL;
  }
  
- struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
  {
  	struct sk_buff *next, *head = NULL, *tail;
  
@@@ -3117,7 -3121,7 +3133,7 @@@
  		/* in case skb wont be segmented, point to itself */
  		skb->prev = skb;
  
- 		skb = validate_xmit_skb(skb, dev);
+ 		skb = validate_xmit_skb(skb, dev, again);
  		if (!skb)
  			continue;
  
@@@ -3174,6 -3178,21 +3190,21 @@@ static inline int __dev_xmit_skb(struc
  	int rc;
  
  	qdisc_calculate_pkt_len(skb, q);
+ 
+ 	if (q->flags & TCQ_F_NOLOCK) {
+ 		if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ 			__qdisc_drop(skb, &to_free);
+ 			rc = NET_XMIT_DROP;
+ 		} else {
+ 			rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ 			__qdisc_run(q);
+ 		}
+ 
+ 		if (unlikely(to_free))
+ 			kfree_skb_list(to_free);
+ 		return rc;
+ 	}
+ 
  	/*
  	 * Heuristic to force contended enqueues to serialize on a
  	 * separate lock before trying to get qdisc main lock.
@@@ -3204,9 -3223,9 +3235,9 @@@
  				contended = false;
  			}
  			__qdisc_run(q);
- 		} else
- 			qdisc_run_end(q);
+ 		}
  
+ 		qdisc_run_end(q);
  		rc = NET_XMIT_SUCCESS;
  	} else {
  		rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
@@@ -3216,6 -3235,7 +3247,7 @@@
  				contended = false;
  			}
  			__qdisc_run(q);
+ 			qdisc_run_end(q);
  		}
  	}
  	spin_unlock(root_lock);
@@@ -3428,6 -3448,7 +3460,7 @@@ static int __dev_queue_xmit(struct sk_b
  	struct netdev_queue *txq;
  	struct Qdisc *q;
  	int rc = -ENOMEM;
+ 	bool again = false;
  
  	skb_reset_mac_header(skb);
  
@@@ -3489,7 -3510,7 +3522,7 @@@
  				     XMIT_RECURSION_LIMIT))
  				goto recursion_alert;
  
- 			skb = validate_xmit_skb(skb, dev);
+ 			skb = validate_xmit_skb(skb, dev, &again);
  			if (!skb)
  				goto out;
  
@@@ -4155,21 -4176,26 +4188,26 @@@ static __latent_entropy void net_tx_act
  
  		while (head) {
  			struct Qdisc *q = head;
- 			spinlock_t *root_lock;
+ 			spinlock_t *root_lock = NULL;
  
  			head = head->next_sched;
  
- 			root_lock = qdisc_lock(q);
- 			spin_lock(root_lock);
+ 			if (!(q->flags & TCQ_F_NOLOCK)) {
+ 				root_lock = qdisc_lock(q);
+ 				spin_lock(root_lock);
+ 			}
  			/* We need to make sure head->next_sched is read
  			 * before clearing __QDISC_STATE_SCHED
  			 */
  			smp_mb__before_atomic();
  			clear_bit(__QDISC_STATE_SCHED, &q->state);
  			qdisc_run(q);
- 			spin_unlock(root_lock);
+ 			if (root_lock)
+ 				spin_unlock(root_lock);
  		}
  	}
+ 
+ 	xfrm_dev_backlog(sd);
  }
  
  #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
@@@ -4557,6 -4583,7 +4595,7 @@@ static int generic_xdp_install(struct n
  		} else if (new && !old) {
  			static_key_slow_inc(&generic_xdp_needed);
  			dev_disable_lro(dev);
+ 			dev_disable_gro_hw(dev);
  		}
  		break;
  
@@@ -7085,17 -7112,21 +7124,21 @@@ int dev_change_proto_down(struct net_de
  }
  EXPORT_SYMBOL(dev_change_proto_down);
  
- u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id)
+ void __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op,
+ 		     struct netdev_bpf *xdp)
  {
- 	struct netdev_bpf xdp;
- 
- 	memset(&xdp, 0, sizeof(xdp));
- 	xdp.command = XDP_QUERY_PROG;
+ 	memset(xdp, 0, sizeof(*xdp));
+ 	xdp->command = XDP_QUERY_PROG;
  
  	/* Query must always succeed. */
- 	WARN_ON(bpf_op(dev, &xdp) < 0);
- 	if (prog_id)
- 		*prog_id = xdp.prog_id;
+ 	WARN_ON(bpf_op(dev, xdp) < 0);
+ }
+ 
+ static u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op)
+ {
+ 	struct netdev_bpf xdp;
+ 
+ 	__dev_xdp_query(dev, bpf_op, &xdp);
  
  	return xdp.prog_attached;
  }
@@@ -7118,6 -7149,27 +7161,27 @@@ static int dev_xdp_install(struct net_d
  	return bpf_op(dev, &xdp);
  }
  
+ static void dev_xdp_uninstall(struct net_device *dev)
+ {
+ 	struct netdev_bpf xdp;
+ 	bpf_op_t ndo_bpf;
+ 
+ 	/* Remove generic XDP */
+ 	WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL));
+ 
+ 	/* Remove from the driver */
+ 	ndo_bpf = dev->netdev_ops->ndo_bpf;
+ 	if (!ndo_bpf)
+ 		return;
+ 
+ 	__dev_xdp_query(dev, ndo_bpf, &xdp);
+ 	if (xdp.prog_attached == XDP_ATTACHED_NONE)
+ 		return;
+ 
+ 	/* Program removal should always succeed */
+ 	WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, NULL));
+ }
+ 
  /**
   *	dev_change_xdp_fd - set or clear a bpf program for a device rx path
   *	@dev: device
@@@ -7146,10 -7198,10 +7210,10 @@@ int dev_change_xdp_fd(struct net_devic
  		bpf_chk = generic_xdp_install;
  
  	if (fd >= 0) {
- 		if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL))
+ 		if (bpf_chk && __dev_xdp_attached(dev, bpf_chk))
  			return -EEXIST;
  		if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
- 		    __dev_xdp_attached(dev, bpf_op, NULL))
+ 		    __dev_xdp_attached(dev, bpf_op))
  			return -EBUSY;
  
  		prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
@@@ -7248,6 -7300,7 +7312,7 @@@ static void rollback_registered_many(st
  		/* Shutdown queueing discipline. */
  		dev_shutdown(dev);
  
+ 		dev_xdp_uninstall(dev);
  
  		/* Notify protocols, that we are about to destroy
  		 * this device. They should clean all the things.
@@@ -7391,6 -7444,18 +7456,18 @@@ static netdev_features_t netdev_fix_fea
  		features &= ~dev->gso_partial_features;
  	}
  
+ 	if (!(features & NETIF_F_RXCSUM)) {
+ 		/* NETIF_F_GRO_HW implies doing RXCSUM since every packet
+ 		 * successfully merged by hardware must also have the
+ 		 * checksum verified by hardware.  If the user does not
+ 		 * want to enable RXCSUM, logically, we should disable GRO_HW.
+ 		 */
+ 		if (features & NETIF_F_GRO_HW) {
+ 			netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
+ 			features &= ~NETIF_F_GRO_HW;
+ 		}
+ 	}
+ 
  	return features;
  }
  
@@@ -8207,7 -8272,6 +8284,6 @@@ EXPORT_SYMBOL(alloc_netdev_mqs)
  void free_netdev(struct net_device *dev)
  {
  	struct napi_struct *p, *n;
- 	struct bpf_prog *prog;
  
  	might_sleep();
  	netif_free_tx_queues(dev);
@@@ -8226,12 -8290,6 +8302,6 @@@
  	free_percpu(dev->pcpu_refcnt);
  	dev->pcpu_refcnt = NULL;
  
- 	prog = rcu_dereference_protected(dev->xdp_prog, 1);
- 	if (prog) {
- 		bpf_prog_put(prog);
- 		static_key_slow_dec(&generic_xdp_needed);
- 	}
- 
  	/*  Compatibility with error handling in drivers */
  	if (dev->reg_state == NETREG_UNINITIALIZED) {
  		netdev_freemem(dev);
@@@ -8819,6 -8877,9 +8889,9 @@@ static int __init net_dev_init(void
  
  		skb_queue_head_init(&sd->input_pkt_queue);
  		skb_queue_head_init(&sd->process_queue);
+ #ifdef CONFIG_XFRM_OFFLOAD
+ 		skb_queue_head_init(&sd->xfrm_backlog);
+ #endif
  		INIT_LIST_HEAD(&sd->poll_list);
  		sd->output_queue_tailp = &sd->output_queue;
  #ifdef CONFIG_RPS
diff --combined net/core/ethtool.c
index 8225416911ae,50a79203043b..fff6314f4c5e
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@@ -73,6 -73,7 +73,7 @@@ static const char netdev_features_strin
  	[NETIF_F_LLTX_BIT] =             "tx-lockless",
  	[NETIF_F_NETNS_LOCAL_BIT] =      "netns-local",
  	[NETIF_F_GRO_BIT] =              "rx-gro",
+ 	[NETIF_F_GRO_HW_BIT] =           "rx-gro-hw",
  	[NETIF_F_LRO_BIT] =              "rx-lro",
  
  	[NETIF_F_TSO_BIT] =              "tx-tcp-segmentation",
@@@ -770,6 -771,15 +771,6 @@@ static int ethtool_set_link_ksettings(s
  	return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings);
  }
  
 -static void
 -warn_incomplete_ethtool_legacy_settings_conversion(const char *details)
 -{
 -	char name[sizeof(current->comm)];
 -
 -	pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n",
 -		     get_task_comm(name, current), details);
 -}
 -
  /* Query device for its ethtool_cmd settings.
   *
   * Backward compatibility note: for compatibility with legacy ethtool,
@@@ -796,8 -806,10 +797,8 @@@ static int ethtool_get_settings(struct 
  							   &link_ksettings);
  		if (err < 0)
  			return err;
 -		if (!convert_link_ksettings_to_legacy_settings(&cmd,
 -							       &link_ksettings))
 -			warn_incomplete_ethtool_legacy_settings_conversion(
 -				"link modes are only partially reported");
 +		convert_link_ksettings_to_legacy_settings(&cmd,
 +							  &link_ksettings);
  
  		/* send a sensible cmd tag back to user */
  		cmd.cmd = ETHTOOL_GSET;
diff --combined net/core/rtnetlink.c
index 778d7f03404a,c688dc564b11..a0500eeda344
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@@ -62,7 -62,9 +62,9 @@@
  struct rtnl_link {
  	rtnl_doit_func		doit;
  	rtnl_dumpit_func	dumpit;
+ 	struct module		*owner;
  	unsigned int		flags;
+ 	struct rcu_head		rcu;
  };
  
  static DEFINE_MUTEX(rtnl_mutex);
@@@ -127,8 -129,7 +129,7 @@@ bool lockdep_rtnl_is_held(void
  EXPORT_SYMBOL(lockdep_rtnl_is_held);
  #endif /* #ifdef CONFIG_PROVE_LOCKING */
  
- static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
- static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1];
+ static struct rtnl_link *__rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
  
  static inline int rtm_msgindex(int msgtype)
  {
@@@ -144,72 -145,127 +145,127 @@@
  	return msgindex;
  }
  
- /**
-  * __rtnl_register - Register a rtnetlink message type
-  * @protocol: Protocol family or PF_UNSPEC
-  * @msgtype: rtnetlink message type
-  * @doit: Function pointer called for each request message
-  * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
-  * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
-  *
-  * Registers the specified function pointers (at least one of them has
-  * to be non-NULL) to be called whenever a request message for the
-  * specified protocol family and message type is received.
-  *
-  * The special protocol family PF_UNSPEC may be used to define fallback
-  * function pointers for the case when no entry for the specific protocol
-  * family exists.
-  *
-  * Returns 0 on success or a negative error code.
-  */
- int __rtnl_register(int protocol, int msgtype,
- 		    rtnl_doit_func doit, rtnl_dumpit_func dumpit,
- 		    unsigned int flags)
+ static struct rtnl_link *rtnl_get_link(int protocol, int msgtype)
+ {
+ 	struct rtnl_link **tab;
+ 
+ 	if (protocol >= ARRAY_SIZE(rtnl_msg_handlers))
+ 		protocol = PF_UNSPEC;
+ 
+ 	tab = rcu_dereference_rtnl(rtnl_msg_handlers[protocol]);
+ 	if (!tab)
+ 		tab = rcu_dereference_rtnl(rtnl_msg_handlers[PF_UNSPEC]);
+ 
+ 	return tab[msgtype];
+ }
+ 
+ static int rtnl_register_internal(struct module *owner,
+ 				  int protocol, int msgtype,
+ 				  rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+ 				  unsigned int flags)
  {
- 	struct rtnl_link *tab;
+ 	struct rtnl_link *link, *old;
+ 	struct rtnl_link __rcu **tab;
  	int msgindex;
+ 	int ret = -ENOBUFS;
  
  	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  	msgindex = rtm_msgindex(msgtype);
  
- 	tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]);
+ 	rtnl_lock();
+ 	tab = rtnl_msg_handlers[protocol];
  	if (tab == NULL) {
- 		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
- 		if (tab == NULL)
- 			return -ENOBUFS;
+ 		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(void *), GFP_KERNEL);
+ 		if (!tab)
+ 			goto unlock;
  
+ 		/* ensures we see the 0 stores */
  		rcu_assign_pointer(rtnl_msg_handlers[protocol], tab);
  	}
  
+ 	old = rtnl_dereference(tab[msgindex]);
+ 	if (old) {
+ 		link = kmemdup(old, sizeof(*old), GFP_KERNEL);
+ 		if (!link)
+ 			goto unlock;
+ 	} else {
+ 		link = kzalloc(sizeof(*link), GFP_KERNEL);
+ 		if (!link)
+ 			goto unlock;
+ 	}
+ 
+ 	WARN_ON(link->owner && link->owner != owner);
+ 	link->owner = owner;
+ 
+ 	WARN_ON(doit && link->doit && link->doit != doit);
  	if (doit)
- 		tab[msgindex].doit = doit;
+ 		link->doit = doit;
+ 	WARN_ON(dumpit && link->dumpit && link->dumpit != dumpit);
  	if (dumpit)
- 		tab[msgindex].dumpit = dumpit;
- 	tab[msgindex].flags |= flags;
+ 		link->dumpit = dumpit;
  
- 	return 0;
+ 	link->flags |= flags;
+ 
+ 	/* publish protocol:msgtype */
+ 	rcu_assign_pointer(tab[msgindex], link);
+ 	ret = 0;
+ 	if (old)
+ 		kfree_rcu(old, rcu);
+ unlock:
+ 	rtnl_unlock();
+ 	return ret;
+ }
+ 
+ /**
+  * rtnl_register_module - Register a rtnetlink message type
+  *
+  * @owner: module registering the hook (THIS_MODULE)
+  * @protocol: Protocol family or PF_UNSPEC
+  * @msgtype: rtnetlink message type
+  * @doit: Function pointer called for each request message
+  * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+  * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
+  *
+  * Like rtnl_register, but for use by removable modules.
+  */
+ int rtnl_register_module(struct module *owner,
+ 			 int protocol, int msgtype,
+ 			 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
+ 			 unsigned int flags)
+ {
+ 	return rtnl_register_internal(owner, protocol, msgtype,
+ 				      doit, dumpit, flags);
  }
- EXPORT_SYMBOL_GPL(__rtnl_register);
+ EXPORT_SYMBOL_GPL(rtnl_register_module);
  
  /**
   * rtnl_register - Register a rtnetlink message type
+  * @protocol: Protocol family or PF_UNSPEC
+  * @msgtype: rtnetlink message type
+  * @doit: Function pointer called for each request message
+  * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+  * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions
   *
-  * Identical to __rtnl_register() but panics on failure. This is useful
-  * as failure of this function is very unlikely, it can only happen due
-  * to lack of memory when allocating the chain to store all message
-  * handlers for a protocol. Meant for use in init functions where lack
-  * of memory implies no sense in continuing.
+  * Registers the specified function pointers (at least one of them has
+  * to be non-NULL) to be called whenever a request message for the
+  * specified protocol family and message type is received.
+  *
+  * The special protocol family PF_UNSPEC may be used to define fallback
+  * function pointers for the case when no entry for the specific protocol
+  * family exists.
   */
  void rtnl_register(int protocol, int msgtype,
  		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
  		   unsigned int flags)
  {
- 	if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0)
- 		panic("Unable to register rtnetlink message handler, "
- 		      "protocol = %d, message type = %d\n",
- 		      protocol, msgtype);
+ 	int err;
+ 
+ 	err = rtnl_register_internal(NULL, protocol, msgtype, doit, dumpit,
+ 				     flags);
+ 	if (err)
+ 		pr_err("Unable to register rtnetlink message handler, "
+ 		       "protocol = %d, message type = %d\n", protocol, msgtype);
  }
- EXPORT_SYMBOL_GPL(rtnl_register);
  
  /**
   * rtnl_unregister - Unregister a rtnetlink message type
@@@ -220,24 -276,25 +276,25 @@@
   */
  int rtnl_unregister(int protocol, int msgtype)
  {
- 	struct rtnl_link *handlers;
+ 	struct rtnl_link **tab, *link;
  	int msgindex;
  
  	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  	msgindex = rtm_msgindex(msgtype);
  
  	rtnl_lock();
- 	handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
- 	if (!handlers) {
+ 	tab = rtnl_dereference(rtnl_msg_handlers[protocol]);
+ 	if (!tab) {
  		rtnl_unlock();
  		return -ENOENT;
  	}
  
- 	handlers[msgindex].doit = NULL;
- 	handlers[msgindex].dumpit = NULL;
- 	handlers[msgindex].flags = 0;
+ 	link = tab[msgindex];
+ 	rcu_assign_pointer(tab[msgindex], NULL);
  	rtnl_unlock();
  
+ 	kfree_rcu(link, rcu);
+ 
  	return 0;
  }
  EXPORT_SYMBOL_GPL(rtnl_unregister);
@@@ -251,20 -308,27 +308,27 @@@
   */
  void rtnl_unregister_all(int protocol)
  {
- 	struct rtnl_link *handlers;
+ 	struct rtnl_link **tab, *link;
+ 	int msgindex;
  
  	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
  
  	rtnl_lock();
- 	handlers = rtnl_dereference(rtnl_msg_handlers[protocol]);
+ 	tab = rtnl_msg_handlers[protocol];
  	RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
+ 	for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
+ 		link = tab[msgindex];
+ 		if (!link)
+ 			continue;
+ 
+ 		rcu_assign_pointer(tab[msgindex], NULL);
+ 		kfree_rcu(link, rcu);
+ 	}
  	rtnl_unlock();
  
  	synchronize_net();
  
- 	while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 1)
- 		schedule();
- 	kfree(handlers);
+ 	kfree(tab);
  }
  EXPORT_SYMBOL_GPL(rtnl_unregister_all);
  
@@@ -1261,6 -1325,7 +1325,7 @@@ static u8 rtnl_xdp_attached_mode(struc
  {
  	const struct net_device_ops *ops = dev->netdev_ops;
  	const struct bpf_prog *generic_xdp_prog;
+ 	struct netdev_bpf xdp;
  
  	ASSERT_RTNL();
  
@@@ -1273,7 -1338,10 +1338,10 @@@
  	if (!ops->ndo_bpf)
  		return XDP_ATTACHED_NONE;
  
- 	return __dev_xdp_attached(dev, ops->ndo_bpf, prog_id);
+ 	__dev_xdp_query(dev, ops->ndo_bpf, &xdp);
+ 	*prog_id = xdp.prog_id;
+ 
+ 	return xdp.prog_attached;
  }
  
  static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
@@@ -1569,6 -1637,8 +1637,8 @@@ static const struct nla_policy ifla_pol
  	[IFLA_PROMISCUITY]	= { .type = NLA_U32 },
  	[IFLA_NUM_TX_QUEUES]	= { .type = NLA_U32 },
  	[IFLA_NUM_RX_QUEUES]	= { .type = NLA_U32 },
+ 	[IFLA_GSO_MAX_SEGS]	= { .type = NLA_U32 },
+ 	[IFLA_GSO_MAX_SIZE]	= { .type = NLA_U32 },
  	[IFLA_PHYS_PORT_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
  	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
  	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
@@@ -1681,18 -1751,18 +1751,18 @@@ static bool link_dump_filtered(struct n
  	return false;
  }
  
 -static struct net *get_target_net(struct sk_buff *skb, int netnsid)
 +static struct net *get_target_net(struct sock *sk, int netnsid)
  {
  	struct net *net;
  
 -	net = get_net_ns_by_id(sock_net(skb->sk), netnsid);
 +	net = get_net_ns_by_id(sock_net(sk), netnsid);
  	if (!net)
  		return ERR_PTR(-EINVAL);
  
  	/* For now, the caller is required to have CAP_NET_ADMIN in
  	 * the user namespace owning the target net ns.
  	 */
 -	if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
 +	if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
  		put_net(net);
  		return ERR_PTR(-EACCES);
  	}
@@@ -1733,7 -1803,7 +1803,7 @@@ static int rtnl_dump_ifinfo(struct sk_b
  			ifla_policy, NULL) >= 0) {
  		if (tb[IFLA_IF_NETNSID]) {
  			netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
 -			tgt_net = get_target_net(skb, netnsid);
 +			tgt_net = get_target_net(skb->sk, netnsid);
  			if (IS_ERR(tgt_net)) {
  				tgt_net = net;
  				netnsid = -1;
@@@ -2219,6 -2289,34 +2289,34 @@@ static int do_setlink(const struct sk_b
  		}
  	}
  
+ 	if (tb[IFLA_GSO_MAX_SIZE]) {
+ 		u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
+ 
+ 		if (max_size > GSO_MAX_SIZE) {
+ 			err = -EINVAL;
+ 			goto errout;
+ 		}
+ 
+ 		if (dev->gso_max_size ^ max_size) {
+ 			netif_set_gso_max_size(dev, max_size);
+ 			status |= DO_SETLINK_MODIFIED;
+ 		}
+ 	}
+ 
+ 	if (tb[IFLA_GSO_MAX_SEGS]) {
+ 		u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
+ 
+ 		if (max_segs > GSO_MAX_SEGS) {
+ 			err = -EINVAL;
+ 			goto errout;
+ 		}
+ 
+ 		if (dev->gso_max_segs ^ max_segs) {
+ 			dev->gso_max_segs = max_segs;
+ 			status |= DO_SETLINK_MODIFIED;
+ 		}
+ 	}
+ 
  	if (tb[IFLA_OPERSTATE])
  		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
  
@@@ -2583,6 -2681,10 +2681,10 @@@ struct net_device *rtnl_create_link(str
  		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
  	if (tb[IFLA_GROUP])
  		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+ 	if (tb[IFLA_GSO_MAX_SIZE])
+ 		netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
+ 	if (tb[IFLA_GSO_MAX_SEGS])
+ 		dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
  
  	return dev;
  }
@@@ -2883,7 -2985,7 +2985,7 @@@ static int rtnl_getlink(struct sk_buff 
  
  	if (tb[IFLA_IF_NETNSID]) {
  		netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
 -		tgt_net = get_target_net(skb, netnsid);
 +		tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
  		if (IS_ERR(tgt_net))
  			return PTR_ERR(tgt_net);
  	}
@@@ -2973,18 -3075,26 +3075,26 @@@ static int rtnl_dump_all(struct sk_buf
  		s_idx = 1;
  
  	for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
+ 		struct rtnl_link **tab;
  		int type = cb->nlh->nlmsg_type-RTM_BASE;
- 		struct rtnl_link *handlers;
+ 		struct rtnl_link *link;
  		rtnl_dumpit_func dumpit;
  
  		if (idx < s_idx || idx == PF_PACKET)
  			continue;
  
- 		handlers = rtnl_dereference(rtnl_msg_handlers[idx]);
- 		if (!handlers)
+ 		if (type < 0 || type >= RTM_NR_MSGTYPES)
  			continue;
  
- 		dumpit = READ_ONCE(handlers[type].dumpit);
+ 		tab = rcu_dereference_rtnl(rtnl_msg_handlers[idx]);
+ 		if (!tab)
+ 			continue;
+ 
+ 		link = tab[type];
+ 		if (!link)
+ 			continue;
+ 
+ 		dumpit = link->dumpit;
  		if (!dumpit)
  			continue;
  
@@@ -4314,7 -4424,8 +4424,8 @@@ static int rtnetlink_rcv_msg(struct sk_
  			     struct netlink_ext_ack *extack)
  {
  	struct net *net = sock_net(skb->sk);
- 	struct rtnl_link *handlers;
+ 	struct rtnl_link *link;
+ 	struct module *owner;
  	int err = -EOPNOTSUPP;
  	rtnl_doit_func doit;
  	unsigned int flags;
@@@ -4338,79 -4449,85 +4449,85 @@@
  	if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
  		return -EPERM;
  
- 	if (family >= ARRAY_SIZE(rtnl_msg_handlers))
- 		family = PF_UNSPEC;
- 
  	rcu_read_lock();
- 	handlers = rcu_dereference(rtnl_msg_handlers[family]);
- 	if (!handlers) {
- 		family = PF_UNSPEC;
- 		handlers = rcu_dereference(rtnl_msg_handlers[family]);
- 	}
- 
  	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
  		struct sock *rtnl;
  		rtnl_dumpit_func dumpit;
  		u16 min_dump_alloc = 0;
  
- 		dumpit = READ_ONCE(handlers[type].dumpit);
- 		if (!dumpit) {
+ 		link = rtnl_get_link(family, type);
+ 		if (!link || !link->dumpit) {
  			family = PF_UNSPEC;
- 			handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]);
- 			if (!handlers)
- 				goto err_unlock;
- 
- 			dumpit = READ_ONCE(handlers[type].dumpit);
- 			if (!dumpit)
+ 			link = rtnl_get_link(family, type);
+ 			if (!link || !link->dumpit)
  				goto err_unlock;
  		}
- 
- 		refcount_inc(&rtnl_msg_handlers_ref[family]);
+ 		owner = link->owner;
+ 		dumpit = link->dumpit;
  
  		if (type == RTM_GETLINK - RTM_BASE)
  			min_dump_alloc = rtnl_calcit(skb, nlh);
  
+ 		err = 0;
+ 		/* need to do this before rcu_read_unlock() */
+ 		if (!try_module_get(owner))
+ 			err = -EPROTONOSUPPORT;
+ 
  		rcu_read_unlock();
  
  		rtnl = net->rtnl;
- 		{
+ 		if (err == 0) {
  			struct netlink_dump_control c = {
  				.dump		= dumpit,
  				.min_dump_alloc	= min_dump_alloc,
+ 				.module		= owner,
  			};
  			err = netlink_dump_start(rtnl, skb, nlh, &c);
+ 			/* netlink_dump_start() will keep a reference on
+ 			 * module if dump is still in progress.
+ 			 */
+ 			module_put(owner);
  		}
- 		refcount_dec(&rtnl_msg_handlers_ref[family]);
  		return err;
  	}
  
- 	doit = READ_ONCE(handlers[type].doit);
- 	if (!doit) {
+ 	link = rtnl_get_link(family, type);
+ 	if (!link || !link->doit) {
  		family = PF_UNSPEC;
- 		handlers = rcu_dereference(rtnl_msg_handlers[family]);
+ 		link = rtnl_get_link(PF_UNSPEC, type);
+ 		if (!link || !link->doit)
+ 			goto out_unlock;
  	}
  
- 	flags = READ_ONCE(handlers[type].flags);
+ 	owner = link->owner;
+ 	if (!try_module_get(owner)) {
+ 		err = -EPROTONOSUPPORT;
+ 		goto out_unlock;
+ 	}
+ 
+ 	flags = link->flags;
  	if (flags & RTNL_FLAG_DOIT_UNLOCKED) {
- 		refcount_inc(&rtnl_msg_handlers_ref[family]);
- 		doit = READ_ONCE(handlers[type].doit);
+ 		doit = link->doit;
  		rcu_read_unlock();
  		if (doit)
  			err = doit(skb, nlh, extack);
- 		refcount_dec(&rtnl_msg_handlers_ref[family]);
+ 		module_put(owner);
  		return err;
  	}
- 
  	rcu_read_unlock();
  
  	rtnl_lock();
- 	handlers = rtnl_dereference(rtnl_msg_handlers[family]);
- 	if (handlers) {
- 		doit = READ_ONCE(handlers[type].doit);
- 		if (doit)
- 			err = doit(skb, nlh, extack);
- 	}
+ 	link = rtnl_get_link(family, type);
+ 	if (link && link->doit)
+ 		err = link->doit(skb, nlh, extack);
  	rtnl_unlock();
+ 
+ 	module_put(owner);
+ 
+ 	return err;
+ 
+ out_unlock:
+ 	rcu_read_unlock();
  	return err;
  
  err_unlock:
@@@ -4498,11 -4615,6 +4615,6 @@@ static struct pernet_operations rtnetli
  
  void __init rtnetlink_init(void)
  {
- 	int i;
- 
- 	for (i = 0; i < ARRAY_SIZE(rtnl_msg_handlers_ref); i++)
- 		refcount_set(&rtnl_msg_handlers_ref[i], 1);
- 
  	if (register_pernet_subsys(&rtnetlink_net_ops))
  		panic("rtnetlink_init: cannot initialize rtnetlink\n");
  
diff --combined net/core/sock.c
index 1211159718ad,72d14b221784..420c380bc61d
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@@ -145,6 -145,8 +145,8 @@@
  static DEFINE_MUTEX(proto_list_mutex);
  static LIST_HEAD(proto_list);
  
+ static void sock_inuse_add(struct net *net, int val);
+ 
  /**
   * sk_ns_capable - General socket capability test
   * @sk: Socket to use a capability on or through
@@@ -1531,8 -1533,11 +1533,11 @@@ struct sock *sk_alloc(struct net *net, 
  		sk->sk_kern_sock = kern;
  		sock_lock_init(sk);
  		sk->sk_net_refcnt = kern ? 0 : 1;
- 		if (likely(sk->sk_net_refcnt))
+ 		if (likely(sk->sk_net_refcnt)) {
  			get_net(net);
+ 			sock_inuse_add(net, 1);
+ 		}
+ 
  		sock_net_set(sk, net);
  		refcount_set(&sk->sk_wmem_alloc, 1);
  
@@@ -1595,6 -1600,9 +1600,9 @@@ void sk_destruct(struct sock *sk
  
  static void __sk_free(struct sock *sk)
  {
+ 	if (likely(sk->sk_net_refcnt))
+ 		sock_inuse_add(sock_net(sk), -1);
+ 
  	if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
  		sock_diag_broadcast_destroy(sk);
  	else
@@@ -1716,6 -1724,8 +1724,8 @@@ struct sock *sk_clone_lock(const struc
  		newsk->sk_priority = 0;
  		newsk->sk_incoming_cpu = raw_smp_processor_id();
  		atomic64_set(&newsk->sk_cookie, 0);
+ 		if (likely(newsk->sk_net_refcnt))
+ 			sock_inuse_add(sock_net(newsk), 1);
  
  		/*
  		 * Before updating sk_refcnt, we must commit prior changes to memory
@@@ -2496,7 -2506,7 +2506,7 @@@ int sock_no_getname(struct socket *sock
  }
  EXPORT_SYMBOL(sock_no_getname);
  
 -unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
 +__poll_t sock_no_poll(struct file *file, struct socket *sock, poll_table *pt)
  {
  	return 0;
  }
@@@ -3045,7 -3055,7 +3055,7 @@@ static DECLARE_BITMAP(proto_inuse_idx, 
  
  void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
  {
- 	__this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
+ 	__this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
  }
  EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
  
@@@ -3055,21 -3065,50 +3065,50 @@@ int sock_prot_inuse_get(struct net *net
  	int res = 0;
  
  	for_each_possible_cpu(cpu)
- 		res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
+ 		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
  
  	return res >= 0 ? res : 0;
  }
  EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
  
+ static void sock_inuse_add(struct net *net, int val)
+ {
+ 	this_cpu_add(*net->core.sock_inuse, val);
+ }
+ 
+ int sock_inuse_get(struct net *net)
+ {
+ 	int cpu, res = 0;
+ 
+ 	for_each_possible_cpu(cpu)
+ 		res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+ 
+ 	return res;
+ }
+ 
+ EXPORT_SYMBOL_GPL(sock_inuse_get);
+ 
  static int __net_init sock_inuse_init_net(struct net *net)
  {
- 	net->core.inuse = alloc_percpu(struct prot_inuse);
- 	return net->core.inuse ? 0 : -ENOMEM;
+ 	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
+ 	if (net->core.prot_inuse == NULL)
+ 		return -ENOMEM;
+ 
+ 	net->core.sock_inuse = alloc_percpu(int);
+ 	if (net->core.sock_inuse == NULL)
+ 		goto out;
+ 
+ 	return 0;
+ 
+ out:
+ 	free_percpu(net->core.prot_inuse);
+ 	return -ENOMEM;
  }
  
  static void __net_exit sock_inuse_exit_net(struct net *net)
  {
- 	free_percpu(net->core.inuse);
+ 	free_percpu(net->core.prot_inuse);
+ 	free_percpu(net->core.sock_inuse);
  }
  
  static struct pernet_operations net_inuse_ops = {
@@@ -3112,6 -3151,10 +3151,10 @@@ static inline void assign_proto_idx(str
  static inline void release_proto_idx(struct proto *prot)
  {
  }
+ 
+ static void sock_inuse_add(struct net *net, int val)
+ {
+ }
  #endif
  
  static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
diff --combined net/dccp/proto.c
index 8b8db3d481bd,fa7e92e08920..74685fecfdb9
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@@ -38,6 -38,9 +38,9 @@@
  #include "dccp.h"
  #include "feat.h"
  
+ #define CREATE_TRACE_POINTS
+ #include "trace.h"
+ 
  DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
  
  EXPORT_SYMBOL_GPL(dccp_statistics);
@@@ -110,7 -113,7 +113,7 @@@ void dccp_set_state(struct sock *sk, co
  	/* Change state AFTER socket is unhashed to avoid closed
  	 * socket sitting in hash tables.
  	 */
- 	sk->sk_state = state;
+ 	inet_sk_set_state(sk, state);
  }
  
  EXPORT_SYMBOL_GPL(dccp_set_state);
@@@ -318,10 -321,10 +321,10 @@@ EXPORT_SYMBOL_GPL(dccp_disconnect)
   *	take care of normal races (between the test and the event) and we don't
   *	go look at any of the socket buffers directly.
   */
 -unsigned int dccp_poll(struct file *file, struct socket *sock,
 +__poll_t dccp_poll(struct file *file, struct socket *sock,
  		       poll_table *wait)
  {
 -	unsigned int mask;
 +	__poll_t mask;
  	struct sock *sk = sock->sk;
  
  	sock_poll_wait(file, sk_sleep(sk), wait);
@@@ -761,6 -764,8 +764,8 @@@ int dccp_sendmsg(struct sock *sk, struc
  	int rc, size;
  	long timeo;
  
+ 	trace_dccp_probe(sk, len);
+ 
  	if (len > dp->dccps_mss_cache)
  		return -EMSGSIZE;
  
diff --combined net/ipv4/tcp.c
index c4a7ee7f6721,f68cb33d50d1..d58285b54813
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@@ -283,8 -283,6 +283,6 @@@
  #include <asm/ioctls.h>
  #include <net/busy_poll.h>
  
- #include <trace/events/tcp.h>
- 
  struct percpu_counter tcp_orphan_count;
  EXPORT_SYMBOL_GPL(tcp_orphan_count);
  
@@@ -493,18 -491,16 +491,16 @@@ static void tcp_tx_timestamp(struct soc
   *	take care of normal races (between the test and the event) and we don't
   *	go look at any of the socket buffers directly.
   */
 -unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 +__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
  {
 -	unsigned int mask;
 +	__poll_t mask;
  	struct sock *sk = sock->sk;
  	const struct tcp_sock *tp = tcp_sk(sk);
  	int state;
  
- 	sock_rps_record_flow(sk);
- 
  	sock_poll_wait(file, sk_sleep(sk), wait);
  
- 	state = sk_state_load(sk);
+ 	state = inet_sk_state_load(sk);
  	if (state == TCP_LISTEN)
  		return inet_csk_listen_poll(sk);
  
@@@ -1106,12 -1102,15 +1102,15 @@@ static int linear_payload_sz(bool first
  	return 0;
  }
  
- static int select_size(const struct sock *sk, bool sg, bool first_skb)
+ static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc)
  {
  	const struct tcp_sock *tp = tcp_sk(sk);
  	int tmp = tp->mss_cache;
  
  	if (sg) {
+ 		if (zc)
+ 			return 0;
+ 
  		if (sk_can_gso(sk)) {
  			tmp = linear_payload_sz(first_skb);
  		} else {
@@@ -1188,7 -1187,7 +1187,7 @@@ int tcp_sendmsg_locked(struct sock *sk
  	int flags, err, copied = 0;
  	int mss_now = 0, size_goal, copied_syn = 0;
  	bool process_backlog = false;
- 	bool sg;
+ 	bool sg, zc = false;
  	long timeo;
  
  	flags = msg->msg_flags;
@@@ -1206,7 -1205,8 +1205,8 @@@
  			goto out_err;
  		}
  
- 		if (!(sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG))
+ 		zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG;
+ 		if (!zc)
  			uarg->zerocopy = 0;
  	}
  
@@@ -1283,6 -1283,7 +1283,7 @@@ restart
  
  		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
  			bool first_skb;
+ 			int linear;
  
  new_segment:
  			/* Allocate new segment. If the interface is SG,
@@@ -1296,9 -1297,8 +1297,8 @@@
  				goto restart;
  			}
  			first_skb = tcp_rtx_and_write_queues_empty(sk);
- 			skb = sk_stream_alloc_skb(sk,
- 						  select_size(sk, sg, first_skb),
- 						  sk->sk_allocation,
+ 			linear = select_size(sk, sg, first_skb, zc);
+ 			skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation,
  						  first_skb);
  			if (!skb)
  				goto wait_for_memory;
@@@ -1327,13 -1327,13 +1327,13 @@@
  			copy = msg_data_left(msg);
  
  		/* Where to copy to? */
- 		if (skb_availroom(skb) > 0) {
+ 		if (skb_availroom(skb) > 0 && !zc) {
  			/* We have some space in skb head. Superb! */
  			copy = min_t(int, copy, skb_availroom(skb));
  			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
  			if (err)
  				goto do_fault;
- 		} else if (!uarg || !uarg->zerocopy) {
+ 		} else if (!zc) {
  			bool merge = true;
  			int i = skb_shinfo(skb)->nr_frags;
  			struct page_frag *pfrag = sk_page_frag(sk);
@@@ -1373,8 -1373,10 +1373,10 @@@
  			pfrag->offset += copy;
  		} else {
  			err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
- 			if (err == -EMSGSIZE || err == -EEXIST)
+ 			if (err == -EMSGSIZE || err == -EEXIST) {
+ 				tcp_mark_push(tp, skb);
  				goto new_segment;
+ 			}
  			if (err < 0)
  				goto do_error;
  			copy = err;
@@@ -2040,8 -2042,6 +2042,6 @@@ void tcp_set_state(struct sock *sk, in
  {
  	int oldstate = sk->sk_state;
  
- 	trace_tcp_set_state(sk, oldstate, state);
- 
  	switch (state) {
  	case TCP_ESTABLISHED:
  		if (oldstate != TCP_ESTABLISHED)
@@@ -2065,7 -2065,7 +2065,7 @@@
  	/* Change state AFTER socket is unhashed to avoid closed
  	 * socket sitting in hash tables.
  	 */
- 	sk_state_store(sk, state);
+ 	inet_sk_state_store(sk, state);
  
  #ifdef STATE_TRACE
  	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@@ -2920,7 -2920,7 +2920,7 @@@ void tcp_get_info(struct sock *sk, stru
  	if (sk->sk_type != SOCK_STREAM)
  		return;
  
- 	info->tcpi_state = sk_state_load(sk);
+ 	info->tcpi_state = inet_sk_state_load(sk);
  
  	/* Report meaningful fields for all TCP states, including listeners */
  	rate = READ_ONCE(sk->sk_pacing_rate);
@@@ -3578,6 -3578,9 +3578,9 @@@ void __init tcp_init(void
  	percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
  	percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
  	inet_hashinfo_init(&tcp_hashinfo);
+ 	inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
+ 			    thash_entries, 21,  /* one slot per 2 MB*/
+ 			    0, 64 * 1024);
  	tcp_hashinfo.bind_bucket_cachep =
  		kmem_cache_create("tcp_bind_bucket",
  				  sizeof(struct inet_bind_bucket), 0,
diff --combined net/ipv4/udp.c
index ef45adfc0edb,db72619e07e4..6eddd0602813
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@@ -357,18 -357,12 +357,12 @@@ fail
  }
  EXPORT_SYMBOL(udp_lib_get_port);
  
- static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
- 			      unsigned int port)
- {
- 	return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
- }
- 
  int udp_v4_get_port(struct sock *sk, unsigned short snum)
  {
  	unsigned int hash2_nulladdr =
- 		udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
+ 		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
  	unsigned int hash2_partial =
- 		udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
+ 		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
  
  	/* precompute partial secondary hash */
  	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@@ -445,7 -439,7 +439,7 @@@ static struct sock *udp4_lib_lookup2(st
  				     struct sk_buff *skb)
  {
  	struct sock *sk, *result;
- 	int score, badness, matches = 0, reuseport = 0;
+ 	int score, badness;
  	u32 hash = 0;
  
  	result = NULL;
@@@ -454,23 -448,16 +448,16 @@@
  		score = compute_score(sk, net, saddr, sport,
  				      daddr, hnum, dif, sdif, exact_dif);
  		if (score > badness) {
- 			reuseport = sk->sk_reuseport;
- 			if (reuseport) {
+ 			if (sk->sk_reuseport) {
  				hash = udp_ehashfn(net, daddr, hnum,
  						   saddr, sport);
  				result = reuseport_select_sock(sk, hash, skb,
  							sizeof(struct udphdr));
  				if (result)
  					return result;
- 				matches = 1;
  			}
  			badness = score;
  			result = sk;
- 		} else if (score == badness && reuseport) {
- 			matches++;
- 			if (reciprocal_scale(hash, matches) == 0)
- 				result = sk;
- 			hash = next_pseudo_random32(hash);
  		}
  	}
  	return result;
@@@ -488,11 -475,11 +475,11 @@@ struct sock *__udp4_lib_lookup(struct n
  	unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
  	struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
  	bool exact_dif = udp_lib_exact_dif_match(net, skb);
- 	int score, badness, matches = 0, reuseport = 0;
+ 	int score, badness;
  	u32 hash = 0;
  
  	if (hslot->count > 10) {
- 		hash2 = udp4_portaddr_hash(net, daddr, hnum);
+ 		hash2 = ipv4_portaddr_hash(net, daddr, hnum);
  		slot2 = hash2 & udptable->mask;
  		hslot2 = &udptable->hash2[slot2];
  		if (hslot->count < hslot2->count)
@@@ -503,7 -490,7 +490,7 @@@
  					  exact_dif, hslot2, skb);
  		if (!result) {
  			unsigned int old_slot2 = slot2;
- 			hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
+ 			hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
  			slot2 = hash2 & udptable->mask;
  			/* avoid searching the same slot again. */
  			if (unlikely(slot2 == old_slot2))
@@@ -526,23 -513,16 +513,16 @@@ begin
  		score = compute_score(sk, net, saddr, sport,
  				      daddr, hnum, dif, sdif, exact_dif);
  		if (score > badness) {
- 			reuseport = sk->sk_reuseport;
- 			if (reuseport) {
+ 			if (sk->sk_reuseport) {
  				hash = udp_ehashfn(net, daddr, hnum,
  						   saddr, sport);
  				result = reuseport_select_sock(sk, hash, skb,
  							sizeof(struct udphdr));
  				if (result)
  					return result;
- 				matches = 1;
  			}
  			result = sk;
  			badness = score;
- 		} else if (score == badness && reuseport) {
- 			matches++;
- 			if (reciprocal_scale(hash, matches) == 0)
- 				result = sk;
- 			hash = next_pseudo_random32(hash);
  		}
  	}
  	return result;
@@@ -1775,7 -1755,7 +1755,7 @@@ EXPORT_SYMBOL(udp_lib_rehash)
  
  static void udp_v4_rehash(struct sock *sk)
  {
- 	u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+ 	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
  					  inet_sk(sk)->inet_rcv_saddr,
  					  inet_sk(sk)->inet_num);
  	udp_lib_rehash(sk, new_hash);
@@@ -1966,9 -1946,9 +1946,9 @@@ static int __udp4_lib_mcast_deliver(str
  	struct sk_buff *nskb;
  
  	if (use_hash2) {
- 		hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
+ 		hash2_any = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
  			    udptable->mask;
- 		hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
+ 		hash2 = ipv4_portaddr_hash(net, daddr, hnum) & udptable->mask;
  start_lookup:
  		hslot = &udptable->hash2[hash2];
  		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
@@@ -2200,7 -2180,7 +2180,7 @@@ static struct sock *__udp4_lib_demux_lo
  					    int dif, int sdif)
  {
  	unsigned short hnum = ntohs(loc_port);
- 	unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
+ 	unsigned int hash2 = ipv4_portaddr_hash(net, loc_addr, hnum);
  	unsigned int slot2 = hash2 & udp_table.mask;
  	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
  	INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
@@@ -2502,16 -2482,14 +2482,14 @@@ int compat_udp_getsockopt(struct sock *
   *	but then block when reading it. Add special case code
   *	to work around these arguably broken applications.
   */
 -unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 +__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
  {
 -	unsigned int mask = datagram_poll(file, sock, wait);
 +	__poll_t mask = datagram_poll(file, sock, wait);
  	struct sock *sk = sock->sk;
  
  	if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
  		mask |= POLLIN | POLLRDNORM;
  
- 	sock_rps_record_flow(sk);
- 
  	/* Check for false positives due to checksum errors */
  	if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
  	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
diff --combined net/ipv6/ip6_fib.c
index d11a5578e4f8,a64d559fa513..17b945a2c550
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@@ -893,7 -893,7 +893,7 @@@ static int fib6_add_rt2node(struct fib6
  	ins = &fn->leaf;
  
  	for (iter = leaf; iter;
- 	     iter = rcu_dereference_protected(iter->dst.rt6_next,
+ 	     iter = rcu_dereference_protected(iter->rt6_next,
  				lockdep_is_held(&rt->rt6i_table->tb6_lock))) {
  		/*
  		 *	Search for duplicates
@@@ -950,7 -950,7 +950,7 @@@
  			break;
  
  next_iter:
- 		ins = &iter->dst.rt6_next;
+ 		ins = &iter->rt6_next;
  	}
  
  	if (fallback_ins && !found) {
@@@ -979,7 -979,7 +979,7 @@@
  					      &sibling->rt6i_siblings);
  				break;
  			}
- 			sibling = rcu_dereference_protected(sibling->dst.rt6_next,
+ 			sibling = rcu_dereference_protected(sibling->rt6_next,
  				    lockdep_is_held(&rt->rt6i_table->tb6_lock));
  		}
  		/* For each sibling in the list, increment the counter of
@@@ -1009,7 -1009,7 +1009,7 @@@ add
  		if (err)
  			return err;
  
- 		rcu_assign_pointer(rt->dst.rt6_next, iter);
+ 		rcu_assign_pointer(rt->rt6_next, iter);
  		atomic_inc(&rt->rt6i_ref);
  		rcu_assign_pointer(rt->rt6i_node, fn);
  		rcu_assign_pointer(*ins, rt);
@@@ -1040,7 -1040,7 +1040,7 @@@
  
  		atomic_inc(&rt->rt6i_ref);
  		rcu_assign_pointer(rt->rt6i_node, fn);
- 		rt->dst.rt6_next = iter->dst.rt6_next;
+ 		rt->rt6_next = iter->rt6_next;
  		rcu_assign_pointer(*ins, rt);
  		call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE,
  					  rt, extack);
@@@ -1059,14 -1059,14 +1059,14 @@@
  
  		if (nsiblings) {
  			/* Replacing an ECMP route, remove all siblings */
- 			ins = &rt->dst.rt6_next;
+ 			ins = &rt->rt6_next;
  			iter = rcu_dereference_protected(*ins,
  				    lockdep_is_held(&rt->rt6i_table->tb6_lock));
  			while (iter) {
  				if (iter->rt6i_metric > rt->rt6i_metric)
  					break;
  				if (rt6_qualify_for_ecmp(iter)) {
- 					*ins = iter->dst.rt6_next;
+ 					*ins = iter->rt6_next;
  					iter->rt6i_node = NULL;
  					fib6_purge_rt(iter, fn, info->nl_net);
  					if (rcu_access_pointer(fn->rr_ptr) == iter)
@@@ -1075,7 -1075,7 +1075,7 @@@
  					nsiblings--;
  					info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
  				} else {
- 					ins = &iter->dst.rt6_next;
+ 					ins = &iter->rt6_next;
  				}
  				iter = rcu_dereference_protected(*ins,
  					lockdep_is_held(&rt->rt6i_table->tb6_lock));
@@@ -1241,28 -1241,23 +1241,28 @@@ out
  		 * If fib6_add_1 has cleared the old leaf pointer in the
  		 * super-tree leaf node we have to find a new one for it.
  		 */
 -		struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf,
 -					    lockdep_is_held(&table->tb6_lock));
 -		if (pn != fn && pn_leaf == rt) {
 -			pn_leaf = NULL;
 -			RCU_INIT_POINTER(pn->leaf, NULL);
 -			atomic_dec(&rt->rt6i_ref);
 -		}
 -		if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
 -			pn_leaf = fib6_find_prefix(info->nl_net, table, pn);
 -#if RT6_DEBUG >= 2
 -			if (!pn_leaf) {
 -				WARN_ON(!pn_leaf);
 -				pn_leaf = info->nl_net->ipv6.ip6_null_entry;
 +		if (pn != fn) {
 +			struct rt6_info *pn_leaf =
 +				rcu_dereference_protected(pn->leaf,
 +				    lockdep_is_held(&table->tb6_lock));
 +			if (pn_leaf == rt) {
 +				pn_leaf = NULL;
 +				RCU_INIT_POINTER(pn->leaf, NULL);
 +				atomic_dec(&rt->rt6i_ref);
  			}
 +			if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) {
 +				pn_leaf = fib6_find_prefix(info->nl_net, table,
 +							   pn);
 +#if RT6_DEBUG >= 2
 +				if (!pn_leaf) {
 +					WARN_ON(!pn_leaf);
 +					pn_leaf =
 +					    info->nl_net->ipv6.ip6_null_entry;
 +				}
  #endif
 -			atomic_inc(&pn_leaf->rt6i_ref);
 -			rcu_assign_pointer(pn->leaf, pn_leaf);
 +				atomic_inc(&pn_leaf->rt6i_ref);
 +				rcu_assign_pointer(pn->leaf, pn_leaf);
 +			}
  		}
  #endif
  		goto failure;
@@@ -1649,7 -1644,7 +1649,7 @@@ static void fib6_del_route(struct fib6_
  	WARN_ON_ONCE(rt->rt6i_flags & RTF_CACHE);
  
  	/* Unlink it */
- 	*rtp = rt->dst.rt6_next;
+ 	*rtp = rt->rt6_next;
  	rt->rt6i_node = NULL;
  	net->ipv6.rt6_stats->fib_rt_entries--;
  	net->ipv6.rt6_stats->fib_discarded_routes++;
@@@ -1677,7 -1672,7 +1677,7 @@@
  	FOR_WALKERS(net, w) {
  		if (w->state == FWS_C && w->leaf == rt) {
  			RT6_TRACE("walker %p adjusted by delroute\n", w);
- 			w->leaf = rcu_dereference_protected(rt->dst.rt6_next,
+ 			w->leaf = rcu_dereference_protected(rt->rt6_next,
  					    lockdep_is_held(&table->tb6_lock));
  			if (!w->leaf)
  				w->state = FWS_U;
@@@ -1736,7 -1731,7 +1736,7 @@@ int fib6_del(struct rt6_info *rt, struc
  			fib6_del_route(table, fn, rtp, info);
  			return 0;
  		}
- 		rtp_next = &cur->dst.rt6_next;
+ 		rtp_next = &cur->rt6_next;
  	}
  	return -ENOENT;
  }
@@@ -2147,8 -2142,8 +2147,8 @@@ int __init fib6_init(void
  	if (ret)
  		goto out_kmem_cache_create;
  
- 	ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
- 			      0);
+ 	ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE, NULL,
+ 				   inet6_dump_fib, 0);
  	if (ret)
  		goto out_unregister_subsys;
  
@@@ -2213,7 -2208,7 +2213,7 @@@ static int ipv6_route_yield(struct fib6
  
  	do {
  		iter->w.leaf = rcu_dereference_protected(
- 				iter->w.leaf->dst.rt6_next,
+ 				iter->w.leaf->rt6_next,
  				lockdep_is_held(&iter->tbl->tb6_lock));
  		iter->skip--;
  		if (!iter->skip && iter->w.leaf)
@@@ -2279,7 -2274,7 +2279,7 @@@ static void *ipv6_route_seq_next(struc
  	if (!v)
  		goto iter_table;
  
- 	n = rcu_dereference_bh(((struct rt6_info *)v)->dst.rt6_next);
+ 	n = rcu_dereference_bh(((struct rt6_info *)v)->rt6_next);
  	if (n) {
  		++*pos;
  		return n;
diff --combined net/ipv6/ip6_tunnel.c
index 9a7cf355bc8c,8a4610e84e58..8071f42cd8a0
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@@ -861,7 -861,7 +861,7 @@@ int ip6_tnl_rcv(struct ip6_tnl *t, stru
  		struct metadata_dst *tun_dst,
  		bool log_ecn_err)
  {
- 	return __ip6_tnl_rcv(t, skb, tpi, NULL, ip6ip6_dscp_ecn_decapsulate,
+ 	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
  			     log_ecn_err);
  }
  EXPORT_SYMBOL(ip6_tnl_rcv);
@@@ -979,6 -979,9 +979,9 @@@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t
  	int ret = 0;
  	struct net *net = t->net;
  
+ 	if (t->parms.collect_md)
+ 		return 1;
+ 
  	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
  	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
  	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
@@@ -1074,11 -1077,10 +1077,11 @@@ int ip6_tnl_xmit(struct sk_buff *skb, s
  			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
  			neigh_release(neigh);
  		}
 -	} else if (!(t->parms.flags &
 -		     (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
 -		/* enable the cache only only if the routing decision does
 -		 * not depend on the current inner header value
 +	} else if (t->parms.proto != 0 && !(t->parms.flags &
 +					    (IP6_TNL_F_USE_ORIG_TCLASS |
 +					     IP6_TNL_F_USE_ORIG_FWMARK))) {
 +		/* enable the cache only if neither the outer protocol nor the
 +		 * routing decision depends on the current inner header value
  		 */
  		use_cache = true;
  	}
@@@ -1677,11 -1679,11 +1680,11 @@@ int ip6_tnl_change_mtu(struct net_devic
  {
  	struct ip6_tnl *tnl = netdev_priv(dev);
  
 -	if (tnl->parms.proto == IPPROTO_IPIP) {
 -		if (new_mtu < ETH_MIN_MTU)
 +	if (tnl->parms.proto == IPPROTO_IPV6) {
 +		if (new_mtu < IPV6_MIN_MTU)
  			return -EINVAL;
  	} else {
 -		if (new_mtu < IPV6_MIN_MTU)
 +		if (new_mtu < ETH_MIN_MTU)
  			return -EINVAL;
  	}
  	if (new_mtu > 0xFFF8 - dev->hard_header_len)
diff --combined net/mac80211/rx.c
index 4daafb07602f,b3cff69bfd66..fd580614085b
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@@ -1607,23 -1607,16 +1607,16 @@@ ieee80211_rx_h_sta_process(struct ieee8
  
  	/*
  	 * Change STA power saving mode only at the end of a frame
- 	 * exchange sequence.
+ 	 * exchange sequence, and only for a data or management
+ 	 * frame as specified in IEEE 802.11-2016 11.2.3.2
  	 */
  	if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
  	    !ieee80211_has_morefrags(hdr->frame_control) &&
- 	    !ieee80211_is_back_req(hdr->frame_control) &&
+ 	    (ieee80211_is_mgmt(hdr->frame_control) ||
+ 	     ieee80211_is_data(hdr->frame_control)) &&
  	    !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
  	    (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
- 	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
- 	    /*
- 	     * PM bit is only checked in frames where it isn't reserved,
- 	     * in AP mode it's reserved in non-bufferable management frames
- 	     * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
- 	     * BAR frames should be ignored as specified in
- 	     * IEEE 802.11-2012 10.2.1.2.
- 	     */
- 	    (!ieee80211_is_mgmt(hdr->frame_control) ||
- 	     ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
+ 	     rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
  		if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
  			if (!ieee80211_has_pm(hdr->frame_control))
  				sta_ps_end(sta);
@@@ -3632,8 -3625,6 +3625,8 @@@ static bool ieee80211_accept_frame(stru
  		}
  		return true;
  	case NL80211_IFTYPE_MESH_POINT:
 +		if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
 +			return false;
  		if (multicast)
  			return true;
  		return ether_addr_equal(sdata->vif.addr, hdr->addr1);
diff --combined net/packet/af_packet.c
index 3b4d6a3cf190,ee7aa0ba3a67..2a80f19f0913
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@@ -247,12 -247,13 +247,13 @@@ static int packet_direct_xmit(struct sk
  	struct sk_buff *orig_skb = skb;
  	struct netdev_queue *txq;
  	int ret = NETDEV_TX_BUSY;
+ 	bool again = false;
  
  	if (unlikely(!netif_running(dev) ||
  		     !netif_carrier_ok(dev)))
  		goto drop;
  
- 	skb = validate_xmit_skb_list(skb, dev);
+ 	skb = validate_xmit_skb_list(skb, dev, &again);
  	if (skb != orig_skb)
  		goto drop;
  
@@@ -4073,12 -4074,12 +4074,12 @@@ static int packet_ioctl(struct socket *
  	return 0;
  }
  
 -static unsigned int packet_poll(struct file *file, struct socket *sock,
 +static __poll_t packet_poll(struct file *file, struct socket *sock,
  				poll_table *wait)
  {
  	struct sock *sk = sock->sk;
  	struct packet_sock *po = pkt_sk(sk);
 -	unsigned int mask = datagram_poll(file, sock, wait);
 +	__poll_t mask = datagram_poll(file, sock, wait);
  
  	spin_lock_bh(&sk->sk_receive_queue.lock);
  	if (po->rx_ring.pg_vec) {
diff --combined net/sched/act_gact.c
index a0ac42b3ed06,9d632e92cad0..b56986d41c87
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@@ -159,7 -159,7 +159,7 @@@ static void tcf_gact_stats_update(struc
  	if (action == TC_ACT_SHOT)
  		this_cpu_ptr(gact->common.cpu_qstats)->drops += packets;
  
 -	tm->lastuse = lastuse;
 +	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
  }
  
  static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a,
@@@ -235,16 -235,14 +235,14 @@@ static __net_init int gact_init_net(str
  	return tc_action_net_init(tn, &act_gact_ops);
  }
  
- static void __net_exit gact_exit_net(struct net *net)
+ static void __net_exit gact_exit_net(struct list_head *net_list)
  {
- 	struct tc_action_net *tn = net_generic(net, gact_net_id);
- 
- 	tc_action_net_exit(tn);
+ 	tc_action_net_exit(net_list, gact_net_id);
  }
  
  static struct pernet_operations gact_net_ops = {
  	.init = gact_init_net,
- 	.exit = gact_exit_net,
+ 	.exit_batch = gact_exit_net,
  	.id   = &gact_net_id,
  	.size = sizeof(struct tc_action_net),
  };
diff --combined net/sched/act_mirred.c
index 08b61849c2a2,37e5e4decbd6..e6ff88f72900
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@@ -29,7 -29,6 +29,6 @@@
  #include <net/tc_act/tc_mirred.h>
  
  static LIST_HEAD(mirred_list);
- static DEFINE_SPINLOCK(mirred_list_lock);
  
  static bool tcf_mirred_is_act_redirect(int action)
  {
@@@ -50,18 -49,15 +49,15 @@@ static bool tcf_mirred_act_wants_ingres
  	}
  }
  
- static void tcf_mirred_release(struct tc_action *a, int bind)
+ static void tcf_mirred_release(struct tc_action *a)
  {
  	struct tcf_mirred *m = to_mirred(a);
  	struct net_device *dev;
  
- 	/* We could be called either in a RCU callback or with RTNL lock held. */
- 	spin_lock_bh(&mirred_list_lock);
  	list_del(&m->tcfm_list);
- 	dev = rcu_dereference_protected(m->tcfm_dev, 1);
+ 	dev = rtnl_dereference(m->tcfm_dev);
  	if (dev)
  		dev_put(dev);
- 	spin_unlock_bh(&mirred_list_lock);
  }
  
  static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@@ -139,8 -135,6 +135,6 @@@ static int tcf_mirred_init(struct net *
  	m->tcf_action = parm->action;
  	m->tcfm_eaction = parm->eaction;
  	if (dev != NULL) {
- 		m->tcfm_ifindex = parm->ifindex;
- 		m->net = net;
  		if (ret != ACT_P_CREATED)
  			dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
  		dev_hold(dev);
@@@ -149,9 -143,7 +143,7 @@@
  	}
  
  	if (ret == ACT_P_CREATED) {
- 		spin_lock_bh(&mirred_list_lock);
  		list_add(&m->tcfm_list, &mirred_list);
- 		spin_unlock_bh(&mirred_list_lock);
  		tcf_idr_insert(tn, *a);
  	}
  
@@@ -239,7 -231,7 +231,7 @@@ static void tcf_stats_update(struct tc_
  	struct tcf_t *tm = &m->tcf_tm;
  
  	_bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
 -	tm->lastuse = lastuse;
 +	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
  }
  
  static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
@@@ -247,13 -239,14 +239,14 @@@
  {
  	unsigned char *b = skb_tail_pointer(skb);
  	struct tcf_mirred *m = to_mirred(a);
+ 	struct net_device *dev = rtnl_dereference(m->tcfm_dev);
  	struct tc_mirred opt = {
  		.index   = m->tcf_index,
  		.action  = m->tcf_action,
  		.refcnt  = m->tcf_refcnt - ref,
  		.bindcnt = m->tcf_bindcnt - bind,
  		.eaction = m->tcfm_eaction,
- 		.ifindex = m->tcfm_ifindex,
+ 		.ifindex = dev ? dev->ifindex : 0,
  	};
  	struct tcf_t t;
  
@@@ -294,7 -287,6 +287,6 @@@ static int mirred_device_event(struct n
  
  	ASSERT_RTNL();
  	if (event == NETDEV_UNREGISTER) {
- 		spin_lock_bh(&mirred_list_lock);
  		list_for_each_entry(m, &mirred_list, tcfm_list) {
  			if (rcu_access_pointer(m->tcfm_dev) == dev) {
  				dev_put(dev);
@@@ -304,7 -296,6 +296,6 @@@
  				RCU_INIT_POINTER(m->tcfm_dev, NULL);
  			}
  		}
- 		spin_unlock_bh(&mirred_list_lock);
  	}
  
  	return NOTIFY_DONE;
@@@ -318,7 -309,7 +309,7 @@@ static struct net_device *tcf_mirred_ge
  {
  	struct tcf_mirred *m = to_mirred(a);
  
- 	return __dev_get_by_index(m->net, m->tcfm_ifindex);
+ 	return rtnl_dereference(m->tcfm_dev);
  }
  
  static struct tc_action_ops act_mirred_ops = {
@@@ -343,16 -334,14 +334,14 @@@ static __net_init int mirred_init_net(s
  	return tc_action_net_init(tn, &act_mirred_ops);
  }
  
- static void __net_exit mirred_exit_net(struct net *net)
+ static void __net_exit mirred_exit_net(struct list_head *net_list)
  {
- 	struct tc_action_net *tn = net_generic(net, mirred_net_id);
- 
- 	tc_action_net_exit(tn);
+ 	tc_action_net_exit(net_list, mirred_net_id);
  }
  
  static struct pernet_operations mirred_net_ops = {
  	.init = mirred_init_net,
- 	.exit = mirred_exit_net,
+ 	.exit_batch = mirred_exit_net,
  	.id   = &mirred_net_id,
  	.size = sizeof(struct tc_action_net),
  };
diff --combined net/sctp/socket.c
index 8f7536de5f41,a5e2150ab013..eb68ae261054
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@@ -201,6 -201,22 +201,22 @@@ static void sctp_for_each_tx_datachunk(
  		cb(chunk);
  }
  
+ static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
+ 				 void (*cb)(struct sk_buff *, struct sock *))
+ 
+ {
+ 	struct sk_buff *skb, *tmp;
+ 
+ 	sctp_skb_for_each(skb, &asoc->ulpq.lobby, tmp)
+ 		cb(skb, sk);
+ 
+ 	sctp_skb_for_each(skb, &asoc->ulpq.reasm, tmp)
+ 		cb(skb, sk);
+ 
+ 	sctp_skb_for_each(skb, &asoc->ulpq.reasm_uo, tmp)
+ 		cb(skb, sk);
+ }
+ 
  /* Verify that this is a valid address. */
  static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
  				   int len)
@@@ -1528,7 -1544,7 +1544,7 @@@ static void sctp_close(struct sock *sk
  
  	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
  	sk->sk_shutdown = SHUTDOWN_MASK;
- 	sk->sk_state = SCTP_SS_CLOSING;
+ 	inet_sk_set_state(sk, SCTP_SS_CLOSING);
  
  	ep = sctp_sk(sk)->ep;
  
@@@ -1554,6 -1570,7 +1570,7 @@@
  
  		if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
  		    !skb_queue_empty(&asoc->ulpq.reasm) ||
+ 		    !skb_queue_empty(&asoc->ulpq.reasm_uo) ||
  		    (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
  			struct sctp_chunk *chunk;
  
@@@ -2002,7 -2019,20 +2019,20 @@@ static int sctp_sendmsg(struct sock *sk
  		if (err < 0)
  			goto out_free;
  
- 		wait_connect = true;
+ 		/* If stream interleave is enabled, wait_connect has to be
+ 		 * done earlier than data enqueue, as it needs to make data
+ 		 * or idata according to asoc->intl_enable which is set
+ 		 * after connection is done.
+ 		 */
+ 		if (sctp_sk(asoc->base.sk)->strm_interleave) {
+ 			timeo = sock_sndtimeo(sk, 0);
+ 			err = sctp_wait_for_connect(asoc, &timeo);
+ 			if (err)
+ 				goto out_unlock;
+ 		} else {
+ 			wait_connect = true;
+ 		}
+ 
  		pr_debug("%s: we associated primitively\n", __func__);
  	}
  
@@@ -2281,7 -2311,7 +2311,7 @@@ static int sctp_setsockopt_events(struc
  			if (!event)
  				return -ENOMEM;
  
- 			sctp_ulpq_tail_event(&asoc->ulpq, event);
+ 			asoc->stream.si->enqueue_event(&asoc->ulpq, event);
  		}
  	}
  
@@@ -3180,7 -3210,7 +3210,7 @@@ static int sctp_setsockopt_maxseg(struc
  		if (val == 0) {
  			val = asoc->pathmtu - sp->pf->af->net_header_len;
  			val -= sizeof(struct sctphdr) +
- 			       sizeof(struct sctp_data_chunk);
+ 			       sctp_datachk_len(&asoc->stream);
  		}
  		asoc->user_frag = val;
  		asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
@@@ -3350,7 -3380,10 +3380,10 @@@ static int sctp_setsockopt_fragment_int
  	if (get_user(val, (int __user *)optval))
  		return -EFAULT;
  
- 	sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1;
+ 	sctp_sk(sk)->frag_interleave = !!val;
+ 
+ 	if (!sctp_sk(sk)->frag_interleave)
+ 		sctp_sk(sk)->strm_interleave = 0;
  
  	return 0;
  }
@@@ -4023,6 -4056,40 +4056,40 @@@ out
  	return retval;
  }
  
+ static int sctp_setsockopt_interleaving_supported(struct sock *sk,
+ 						  char __user *optval,
+ 						  unsigned int optlen)
+ {
+ 	struct sctp_sock *sp = sctp_sk(sk);
+ 	struct net *net = sock_net(sk);
+ 	struct sctp_assoc_value params;
+ 	int retval = -EINVAL;
+ 
+ 	if (optlen < sizeof(params))
+ 		goto out;
+ 
+ 	optlen = sizeof(params);
+ 	if (copy_from_user(&params, optval, optlen)) {
+ 		retval = -EFAULT;
+ 		goto out;
+ 	}
+ 
+ 	if (params.assoc_id)
+ 		goto out;
+ 
+ 	if (!net->sctp.intl_enable || !sp->frag_interleave) {
+ 		retval = -EPERM;
+ 		goto out;
+ 	}
+ 
+ 	sp->strm_interleave = !!params.assoc_value;
+ 
+ 	retval = 0;
+ 
+ out:
+ 	return retval;
+ }
+ 
  /* API 6.2 setsockopt(), getsockopt()
   *
   * Applications use setsockopt() and getsockopt() to set or retrieve
@@@ -4210,6 -4277,10 +4277,10 @@@ static int sctp_setsockopt(struct sock 
  	case SCTP_STREAM_SCHEDULER_VALUE:
  		retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
  		break;
+ 	case SCTP_INTERLEAVING_SUPPORTED:
+ 		retval = sctp_setsockopt_interleaving_supported(sk, optval,
+ 								optlen);
+ 		break;
  	default:
  		retval = -ENOPROTOOPT;
  		break;
@@@ -4586,7 -4657,7 +4657,7 @@@ static void sctp_shutdown(struct sock *
  	if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) {
  		struct sctp_association *asoc;
  
- 		sk->sk_state = SCTP_SS_CLOSING;
+ 		inet_sk_set_state(sk, SCTP_SS_CLOSING);
  		asoc = list_entry(ep->asocs.next,
  				  struct sctp_association, asocs);
  		sctp_primitive_SHUTDOWN(net, asoc, NULL);
@@@ -4680,20 -4751,11 +4751,11 @@@ int sctp_get_sctp_info(struct sock *sk
  EXPORT_SYMBOL_GPL(sctp_get_sctp_info);
  
  /* use callback to avoid exporting the core structure */
- int sctp_transport_walk_start(struct rhashtable_iter *iter)
+ void sctp_transport_walk_start(struct rhashtable_iter *iter)
  {
- 	int err;
- 
  	rhltable_walk_enter(&sctp_transport_hashtable, iter);
  
- 	err = rhashtable_walk_start(iter);
- 	if (err && err != -EAGAIN) {
- 		rhashtable_walk_stop(iter);
- 		rhashtable_walk_exit(iter);
- 		return err;
- 	}
- 
- 	return 0;
+ 	rhashtable_walk_start(iter);
  }
  
  void sctp_transport_walk_stop(struct rhashtable_iter *iter)
@@@ -4784,12 -4846,10 +4846,10 @@@ int sctp_for_each_transport(int (*cb)(s
  			    struct net *net, int *pos, void *p) {
  	struct rhashtable_iter hti;
  	struct sctp_transport *tsp;
- 	int ret;
+ 	int ret = 0;
  
  again:
- 	ret = sctp_transport_walk_start(&hti);
- 	if (ret)
- 		return ret;
+ 	sctp_transport_walk_start(&hti);
  
  	tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
  	for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
@@@ -6984,6 -7044,47 +7044,47 @@@ out
  	return retval;
  }
  
+ static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
+ 						  char __user *optval,
+ 						  int __user *optlen)
+ {
+ 	struct sctp_assoc_value params;
+ 	struct sctp_association *asoc;
+ 	int retval = -EFAULT;
+ 
+ 	if (len < sizeof(params)) {
+ 		retval = -EINVAL;
+ 		goto out;
+ 	}
+ 
+ 	len = sizeof(params);
+ 	if (copy_from_user(&params, optval, len))
+ 		goto out;
+ 
+ 	asoc = sctp_id2assoc(sk, params.assoc_id);
+ 	if (asoc) {
+ 		params.assoc_value = asoc->intl_enable;
+ 	} else if (!params.assoc_id) {
+ 		struct sctp_sock *sp = sctp_sk(sk);
+ 
+ 		params.assoc_value = sp->strm_interleave;
+ 	} else {
+ 		retval = -EINVAL;
+ 		goto out;
+ 	}
+ 
+ 	if (put_user(len, optlen))
+ 		goto out;
+ 
+ 	if (copy_to_user(optval, &params, len))
+ 		goto out;
+ 
+ 	retval = 0;
+ 
+ out:
+ 	return retval;
+ }
+ 
  static int sctp_getsockopt(struct sock *sk, int level, int optname,
  			   char __user *optval, int __user *optlen)
  {
@@@ -7174,6 -7275,10 +7275,10 @@@
  		retval = sctp_getsockopt_scheduler_value(sk, len, optval,
  							 optlen);
  		break;
+ 	case SCTP_INTERLEAVING_SUPPORTED:
+ 		retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
+ 								optlen);
+ 		break;
  	default:
  		retval = -ENOPROTOOPT;
  		break;
@@@ -7408,13 -7513,13 +7513,13 @@@ static int sctp_listen_start(struct soc
  	 * sockets.
  	 *
  	 */
- 	sk->sk_state = SCTP_SS_LISTENING;
+ 	inet_sk_set_state(sk, SCTP_SS_LISTENING);
  	if (!ep->base.bind_addr.port) {
  		if (sctp_autobind(sk))
  			return -EAGAIN;
  	} else {
  		if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
- 			sk->sk_state = SCTP_SS_CLOSED;
+ 			inet_sk_set_state(sk, SCTP_SS_CLOSED);
  			return -EADDRINUSE;
  		}
  	}
@@@ -7500,11 -7605,11 +7605,11 @@@ out
   * here, again, by modeling the current TCP/UDP code.  We don't have
   * a good way to test with it yet.
   */
 -unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
 +__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
  {
  	struct sock *sk = sock->sk;
  	struct sctp_sock *sp = sctp_sk(sk);
 -	unsigned int mask;
 +	__poll_t mask;
  
  	poll_wait(file, sk_sleep(sk), wait);
  
@@@ -8411,11 -8516,7 +8516,7 @@@ static void sctp_sock_migrate(struct so
  
  	}
  
- 	sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
- 		sctp_skb_set_owner_r_frag(skb, newsk);
- 
- 	sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
- 		sctp_skb_set_owner_r_frag(skb, newsk);
+ 	sctp_for_each_rx_skb(assoc, newsk, sctp_skb_set_owner_r_frag);
  
  	/* Set the type of socket to indicate that it is peeled off from the
  	 * original UDP-style socket or created with the accept() call on a
@@@ -8441,10 -8542,10 +8542,10 @@@
  	 * is called, set RCV_SHUTDOWN flag.
  	 */
  	if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) {
- 		newsk->sk_state = SCTP_SS_CLOSED;
+ 		inet_sk_set_state(newsk, SCTP_SS_CLOSED);
  		newsk->sk_shutdown |= RCV_SHUTDOWN;
  	} else {
- 		newsk->sk_state = SCTP_SS_ESTABLISHED;
+ 		inet_sk_set_state(newsk, SCTP_SS_ESTABLISHED);
  	}
  
  	release_sock(newsk);
diff --combined net/sctp/stream.c
index 524dfeb94c41,06b644dd858c..cedf672487f9
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@@ -156,9 -156,9 +156,9 @@@ int sctp_stream_init(struct sctp_strea
  	sctp_stream_outq_migrate(stream, NULL, outcnt);
  	sched->sched_all(stream);
  
 -	i = sctp_stream_alloc_out(stream, outcnt, gfp);
 -	if (i)
 -		return i;
 +	ret = sctp_stream_alloc_out(stream, outcnt, gfp);
 +	if (ret)
 +		goto out;
  
  	stream->outcnt = outcnt;
  	for (i = 0; i < stream->outcnt; i++)
@@@ -167,20 -167,23 +167,21 @@@
  	sched->init(stream);
  
  in:
+ 	sctp_stream_interleave_init(stream);
  	if (!incnt)
  		goto out;
  
 -	i = sctp_stream_alloc_in(stream, incnt, gfp);
 -	if (i) {
 -		ret = -ENOMEM;
 -		goto free;
 +	ret = sctp_stream_alloc_in(stream, incnt, gfp);
 +	if (ret) {
 +		sched->free(stream);
 +		kfree(stream->out);
 +		stream->out = NULL;
 +		stream->outcnt = 0;
 +		goto out;
  	}
  
  	stream->incnt = incnt;
 -	goto out;
  
 -free:
 -	sched->free(stream);
 -	kfree(stream->out);
 -	stream->out = NULL;
  out:
  	return ret;
  }
@@@ -213,11 -216,13 +214,13 @@@ void sctp_stream_clear(struct sctp_stre
  {
  	int i;
  
- 	for (i = 0; i < stream->outcnt; i++)
- 		stream->out[i].ssn = 0;
+ 	for (i = 0; i < stream->outcnt; i++) {
+ 		stream->out[i].mid = 0;
+ 		stream->out[i].mid_uo = 0;
+ 	}
  
  	for (i = 0; i < stream->incnt; i++)
- 		stream->in[i].ssn = 0;
+ 		stream->in[i].mid = 0;
  }
  
  void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
@@@ -604,10 -609,10 +607,10 @@@ struct sctp_chunk *sctp_process_strrese
  		}
  
  		for (i = 0; i < nums; i++)
- 			stream->in[ntohs(str_p[i])].ssn = 0;
+ 			stream->in[ntohs(str_p[i])].mid = 0;
  	} else {
  		for (i = 0; i < stream->incnt; i++)
- 			stream->in[i].ssn = 0;
+ 			stream->in[i].mid = 0;
  	}
  
  	result = SCTP_STRRESET_PERFORMED;
@@@ -751,8 -756,7 +754,7 @@@ struct sctp_chunk *sctp_process_strrese
  	 *     performed.
  	 */
  	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
- 	sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
- 	sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+ 	asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen);
  
  	/* G1: Compute an appropriate value for the Receiver's Next TSN -- the
  	 *     TSN that the peer should use to send the next DATA chunk.  The
@@@ -781,10 -785,12 +783,12 @@@
  	/* G5:  The next expected and outgoing SSNs MUST be reset to 0 for all
  	 *      incoming and outgoing streams.
  	 */
- 	for (i = 0; i < stream->outcnt; i++)
- 		stream->out[i].ssn = 0;
+ 	for (i = 0; i < stream->outcnt; i++) {
+ 		stream->out[i].mid = 0;
+ 		stream->out[i].mid_uo = 0;
+ 	}
  	for (i = 0; i < stream->incnt; i++)
- 		stream->in[i].ssn = 0;
+ 		stream->in[i].mid = 0;
  
  	result = SCTP_STRRESET_PERFORMED;
  
@@@ -974,11 -980,15 +978,15 @@@ struct sctp_chunk *sctp_process_strrese
  
  		if (result == SCTP_STRRESET_PERFORMED) {
  			if (nums) {
- 				for (i = 0; i < nums; i++)
- 					stream->out[ntohs(str_p[i])].ssn = 0;
+ 				for (i = 0; i < nums; i++) {
+ 					stream->out[ntohs(str_p[i])].mid = 0;
+ 					stream->out[ntohs(str_p[i])].mid_uo = 0;
+ 				}
  			} else {
- 				for (i = 0; i < stream->outcnt; i++)
- 					stream->out[i].ssn = 0;
+ 				for (i = 0; i < stream->outcnt; i++) {
+ 					stream->out[i].mid = 0;
+ 					stream->out[i].mid_uo = 0;
+ 				}
  			}
  
  			flags = SCTP_STREAM_RESET_OUTGOING_SSN;
@@@ -1021,8 -1031,7 +1029,7 @@@
  						&asoc->peer.tsn_map);
  			LIST_HEAD(temp);
  
- 			sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
- 			sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
+ 			asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn);
  
  			sctp_tsnmap_init(&asoc->peer.tsn_map,
  					 SCTP_TSN_MAP_INITIAL,
@@@ -1040,10 -1049,12 +1047,12 @@@
  			asoc->ctsn_ack_point = asoc->next_tsn - 1;
  			asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
  
- 			for (i = 0; i < stream->outcnt; i++)
- 				stream->out[i].ssn = 0;
+ 			for (i = 0; i < stream->outcnt; i++) {
+ 				stream->out[i].mid = 0;
+ 				stream->out[i].mid_uo = 0;
+ 			}
  			for (i = 0; i < stream->incnt; i++)
- 				stream->in[i].ssn = 0;
+ 				stream->in[i].mid = 0;
  		}
  
  		for (i = 0; i < stream->outcnt; i++)
diff --combined net/smc/af_smc.c
index 449f62e1e270,daf8075f5a4c..b6e4e2e4fe12
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@@ -520,7 -520,7 +520,7 @@@ decline_rdma
  	smc->use_fallback = true;
  	if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
  		rc = smc_clc_send_decline(smc, reason_code);
- 		if (rc < sizeof(struct smc_clc_msg_decline))
+ 		if (rc < 0)
  			goto out_err;
  	}
  	goto out_connected;
@@@ -751,14 -751,16 +751,16 @@@ static void smc_listen_work(struct work
  {
  	struct smc_sock *new_smc = container_of(work, struct smc_sock,
  						smc_listen_work);
+ 	struct smc_clc_msg_proposal_prefix *pclc_prfx;
  	struct socket *newclcsock = new_smc->clcsock;
  	struct smc_sock *lsmc = new_smc->listen_smc;
  	struct smc_clc_msg_accept_confirm cclc;
  	int local_contact = SMC_REUSE_CONTACT;
  	struct sock *newsmcsk = &new_smc->sk;
- 	struct smc_clc_msg_proposal pclc;
+ 	struct smc_clc_msg_proposal *pclc;
  	struct smc_ib_device *smcibdev;
  	struct sockaddr_in peeraddr;
+ 	u8 buf[SMC_CLC_MAX_LEN];
  	struct smc_link *link;
  	int reason_code = 0;
  	int rc = 0, len;
@@@ -775,7 -777,7 +777,7 @@@
  	/* do inband token exchange -
  	 *wait for and receive SMC Proposal CLC message
  	 */
- 	reason_code = smc_clc_wait_msg(new_smc, &pclc, sizeof(pclc),
+ 	reason_code = smc_clc_wait_msg(new_smc, &buf, sizeof(buf),
  				       SMC_CLC_PROPOSAL);
  	if (reason_code < 0)
  		goto out_err;
@@@ -804,8 -806,11 +806,11 @@@
  		reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
  		goto decline_rdma;
  	}
- 	if ((pclc.outgoing_subnet != subnet) ||
- 	    (pclc.prefix_len != prefix_len)) {
+ 
+ 	pclc = (struct smc_clc_msg_proposal *)&buf;
+ 	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ 	if (pclc_prfx->outgoing_subnet != subnet ||
+ 	    pclc_prfx->prefix_len != prefix_len) {
  		reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
  		goto decline_rdma;
  	}
@@@ -816,7 -821,7 +821,7 @@@
  	/* allocate connection / link group */
  	mutex_lock(&smc_create_lgr_pending);
  	local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr,
- 					smcibdev, ibport, &pclc.lcl, 0);
+ 					smcibdev, ibport, &pclc->lcl, 0);
  	if (local_contact < 0) {
  		rc = local_contact;
  		if (rc == -ENOMEM)
@@@ -879,11 -884,9 +884,9 @@@
  		}
  		/* QP confirmation over RoCE fabric */
  		reason_code = smc_serv_conf_first_link(new_smc);
- 		if (reason_code < 0) {
+ 		if (reason_code < 0)
  			/* peer is not aware of a problem */
- 			rc = reason_code;
  			goto out_err_unlock;
- 		}
  		if (reason_code > 0)
  			goto decline_rdma_unlock;
  	}
@@@ -916,8 -919,7 +919,7 @@@ decline_rdma
  	smc_conn_free(&new_smc->conn);
  	new_smc->use_fallback = true;
  	if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
- 		rc = smc_clc_send_decline(new_smc, reason_code);
- 		if (rc < sizeof(struct smc_clc_msg_decline))
+ 		if (smc_clc_send_decline(new_smc, reason_code) < 0)
  			goto out_err;
  	}
  	goto out_connected;
@@@ -1107,7 -1109,7 +1109,7 @@@ out
  	return rc;
  }
  
 -static unsigned int smc_accept_poll(struct sock *parent)
 +static __poll_t smc_accept_poll(struct sock *parent)
  {
  	struct smc_sock *isk;
  	struct sock *sk;
@@@ -1126,11 -1128,11 +1128,11 @@@
  	return 0;
  }
  
 -static unsigned int smc_poll(struct file *file, struct socket *sock,
 +static __poll_t smc_poll(struct file *file, struct socket *sock,
  			     poll_table *wait)
  {
  	struct sock *sk = sock->sk;
 -	unsigned int mask = 0;
 +	__poll_t mask = 0;
  	struct smc_sock *smc;
  	int rc;
  
diff --combined net/smc/smc_clc.c
index 511548085d16,abf7ceb6690b..8ac51583a063
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@@ -22,6 -22,54 +22,54 @@@
  #include "smc_clc.h"
  #include "smc_ib.h"
  
+ /* check if received message has a correct header length and contains valid
+  * heading and trailing eyecatchers
+  */
+ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+ {
+ 	struct smc_clc_msg_proposal_prefix *pclc_prfx;
+ 	struct smc_clc_msg_accept_confirm *clc;
+ 	struct smc_clc_msg_proposal *pclc;
+ 	struct smc_clc_msg_decline *dclc;
+ 	struct smc_clc_msg_trail *trl;
+ 
+ 	if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+ 		return false;
+ 	switch (clcm->type) {
+ 	case SMC_CLC_PROPOSAL:
+ 		pclc = (struct smc_clc_msg_proposal *)clcm;
+ 		pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ 		if (ntohs(pclc->hdr.length) !=
+ 			sizeof(*pclc) + ntohs(pclc->iparea_offset) +
+ 			sizeof(*pclc_prfx) +
+ 			pclc_prfx->ipv6_prefixes_cnt *
+ 				sizeof(struct smc_clc_ipv6_prefix) +
+ 			sizeof(*trl))
+ 			return false;
+ 		trl = (struct smc_clc_msg_trail *)
+ 			((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
+ 		break;
+ 	case SMC_CLC_ACCEPT:
+ 	case SMC_CLC_CONFIRM:
+ 		clc = (struct smc_clc_msg_accept_confirm *)clcm;
+ 		if (ntohs(clc->hdr.length) != sizeof(*clc))
+ 			return false;
+ 		trl = &clc->trl;
+ 		break;
+ 	case SMC_CLC_DECLINE:
+ 		dclc = (struct smc_clc_msg_decline *)clcm;
+ 		if (ntohs(dclc->hdr.length) != sizeof(*dclc))
+ 			return false;
+ 		trl = &dclc->trl;
+ 		break;
+ 	default:
+ 		return false;
+ 	}
+ 	if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)))
+ 		return false;
+ 	return true;
+ }
+ 
  /* Wait for data on the tcp-socket, analyze received data
   * Returns:
   * 0 if success and it was not a decline that we received.
@@@ -35,7 -83,7 +83,7 @@@ int smc_clc_wait_msg(struct smc_sock *s
  	struct smc_clc_msg_hdr *clcm = buf;
  	struct msghdr msg = {NULL, 0};
  	int reason_code = 0;
 -	struct kvec vec;
 +	struct kvec vec = {buf, buflen};
  	int len, datlen;
  	int krflags;
  
@@@ -43,15 -91,12 +91,15 @@@
  	 * so we don't consume any subsequent CLC message or payload data
  	 * in the TCP byte stream
  	 */
 -	vec.iov_base = buf;
 -	vec.iov_len = buflen;
 +	/*
 +	 * Caller must make sure that buflen is no less than
 +	 * sizeof(struct smc_clc_msg_hdr)
 +	 */
  	krflags = MSG_PEEK | MSG_WAITALL;
  	smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
 -	len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1,
 -			     sizeof(struct smc_clc_msg_hdr), krflags);
 +	iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1,
 +			sizeof(struct smc_clc_msg_hdr));
 +	len = sock_recvmsg(smc->clcsock, &msg, krflags);
  	if (signal_pending(current)) {
  		reason_code = -EINTR;
  		clc_sk->sk_err = EINTR;
@@@ -75,9 -120,7 +123,7 @@@
  	}
  	datlen = ntohs(clcm->length);
  	if ((len < sizeof(struct smc_clc_msg_hdr)) ||
- 	    (datlen < sizeof(struct smc_clc_msg_decline)) ||
- 	    (datlen > sizeof(struct smc_clc_msg_accept_confirm)) ||
- 	    memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) ||
+ 	    (datlen > buflen) ||
  	    ((clcm->type != SMC_CLC_DECLINE) &&
  	     (clcm->type != expected_type))) {
  		smc->sk.sk_err = EPROTO;
@@@ -86,12 -129,13 +132,12 @@@
  	}
  
  	/* receive the complete CLC message */
 -	vec.iov_base = buf;
 -	vec.iov_len = buflen;
  	memset(&msg, 0, sizeof(struct msghdr));
 +	iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, buflen);
  	krflags = MSG_WAITALL;
  	smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
 -	len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1, datlen, krflags);
 +	len = sock_recvmsg(smc->clcsock, &msg, krflags);
- 	if (len < datlen) {
+ 	if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
  		smc->sk.sk_err = EPROTO;
  		reason_code = -EPROTO;
  		goto out;
@@@ -135,7 -179,7 +181,7 @@@ int smc_clc_send_decline(struct smc_soc
  		smc->sk.sk_err = EPROTO;
  	if (len < 0)
  		smc->sk.sk_err = -len;
- 	return len;
+ 	return sock_error(&smc->sk);
  }
  
  /* send CLC PROPOSAL message across internal TCP socket */
@@@ -143,33 -187,43 +189,43 @@@ int smc_clc_send_proposal(struct smc_so
  			  struct smc_ib_device *smcibdev,
  			  u8 ibport)
  {
+ 	struct smc_clc_msg_proposal_prefix pclc_prfx;
  	struct smc_clc_msg_proposal pclc;
+ 	struct smc_clc_msg_trail trl;
  	int reason_code = 0;
+ 	struct kvec vec[3];
  	struct msghdr msg;
- 	struct kvec vec;
- 	int len, rc;
+ 	int len, plen, rc;
  
  	/* send SMC Proposal CLC message */
+ 	plen = sizeof(pclc) + sizeof(pclc_prfx) + sizeof(trl);
  	memset(&pclc, 0, sizeof(pclc));
  	memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  	pclc.hdr.type = SMC_CLC_PROPOSAL;
- 	pclc.hdr.length = htons(sizeof(pclc));
+ 	pclc.hdr.length = htons(plen);
  	pclc.hdr.version = SMC_CLC_V1;		/* SMC version */
  	memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
  	memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
  	memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
+ 	pclc.iparea_offset = htons(0);
  
+ 	memset(&pclc_prfx, 0, sizeof(pclc_prfx));
  	/* determine subnet and mask from internal TCP socket */
- 	rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc.outgoing_subnet,
- 				  &pclc.prefix_len);
+ 	rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc_prfx.outgoing_subnet,
+ 				  &pclc_prfx.prefix_len);
  	if (rc)
  		return SMC_CLC_DECL_CNFERR; /* configuration error */
- 	memcpy(pclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+ 	pclc_prfx.ipv6_prefixes_cnt = 0;
+ 	memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
  	memset(&msg, 0, sizeof(msg));
- 	vec.iov_base = &pclc;
- 	vec.iov_len = sizeof(pclc);
+ 	vec[0].iov_base = &pclc;
+ 	vec[0].iov_len = sizeof(pclc);
+ 	vec[1].iov_base = &pclc_prfx;
+ 	vec[1].iov_len = sizeof(pclc_prfx);
+ 	vec[2].iov_base = &trl;
+ 	vec[2].iov_len = sizeof(trl);
  	/* due to the few bytes needed for clc-handshake this cannot block */
- 	len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(pclc));
+ 	len = kernel_sendmsg(smc->clcsock, &msg, vec, 3, plen);
  	if (len < sizeof(pclc)) {
  		if (len >= 0) {
  			reason_code = -ENETUNREACH;
diff --combined net/socket.c
index 092baa464afc,bbd2e9ceb692..60d05479b2c1
--- a/net/socket.c
+++ b/net/socket.c
@@@ -118,7 -118,7 +118,7 @@@ static ssize_t sock_write_iter(struct k
  static int sock_mmap(struct file *file, struct vm_area_struct *vma);
  
  static int sock_close(struct inode *inode, struct file *file);
 -static unsigned int sock_poll(struct file *file,
 +static __poll_t sock_poll(struct file *file,
  			      struct poll_table_struct *wait);
  static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
  #ifdef CONFIG_COMPAT
@@@ -163,12 -163,6 +163,6 @@@ static DEFINE_SPINLOCK(net_family_lock)
  static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
  
  /*
-  *	Statistics counters of the socket lists
-  */
- 
- static DEFINE_PER_CPU(int, sockets_in_use);
- 
- /*
   * Support routines.
   * Move socket addresses back and forth across the kernel/user
   * divide and look after the messy bits.
@@@ -578,7 -572,6 +572,6 @@@ struct socket *sock_alloc(void
  	inode->i_gid = current_fsgid();
  	inode->i_op = &sockfs_inode_ops;
  
- 	this_cpu_add(sockets_in_use, 1);
  	return sock;
  }
  EXPORT_SYMBOL(sock_alloc);
@@@ -605,7 -598,6 +598,6 @@@ void sock_release(struct socket *sock
  	if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
  		pr_err("%s: fasync list not empty!\n", __func__);
  
- 	this_cpu_sub(sockets_in_use, 1);
  	if (!sock->file) {
  		iput(SOCK_INODE(sock));
  		return;
@@@ -1095,9 -1087,9 +1087,9 @@@ out_release
  EXPORT_SYMBOL(sock_create_lite);
  
  /* No kernel lock held - perfect */
 -static unsigned int sock_poll(struct file *file, poll_table *wait)
 +static __poll_t sock_poll(struct file *file, poll_table *wait)
  {
 -	unsigned int busy_flag = 0;
 +	__poll_t busy_flag = 0;
  	struct socket *sock;
  
  	/*
@@@ -2622,17 -2614,8 +2614,8 @@@ core_initcall(sock_init);	/* early init
  #ifdef CONFIG_PROC_FS
  void socket_seq_show(struct seq_file *seq)
  {
- 	int cpu;
- 	int counter = 0;
- 
- 	for_each_possible_cpu(cpu)
- 	    counter += per_cpu(sockets_in_use, cpu);
- 
- 	/* It can be negative, by the way. 8) */
- 	if (counter < 0)
- 		counter = 0;
- 
- 	seq_printf(seq, "sockets: used %d\n", counter);
+ 	seq_printf(seq, "sockets: used %d\n",
+ 		   sock_inuse_get(seq->private));
  }
  #endif				/* CONFIG_PROC_FS */
  
diff --combined net/tipc/group.c
index 5f4ffae807ee,fb7fe971e51b..3e8268d966fa
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@@ -64,7 -64,7 +64,7 @@@ enum mbr_state 
  struct tipc_member {
  	struct rb_node tree_node;
  	struct list_head list;
- 	struct list_head congested;
+ 	struct list_head small_win;
  	struct sk_buff *event_msg;
  	struct sk_buff_head deferredq;
  	struct tipc_group *group;
@@@ -82,7 -82,7 +82,7 @@@
  
  struct tipc_group {
  	struct rb_root members;
- 	struct list_head congested;
+ 	struct list_head small_win;
  	struct list_head pending;
  	struct list_head active;
  	struct list_head reclaiming;
@@@ -109,8 -109,7 +109,8 @@@ static void tipc_group_proto_xmit(struc
  static void tipc_group_decr_active(struct tipc_group *grp,
  				   struct tipc_member *m)
  {
 -	if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING)
 +	if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
 +	    m->state == MBR_REMITTED)
  		grp->active_cnt--;
  }
  
@@@ -137,12 -136,12 +137,12 @@@ u16 tipc_group_bc_snd_nxt(struct tipc_g
  	return grp->bc_snd_nxt;
  }
  
- static bool tipc_group_is_enabled(struct tipc_member *m)
+ static bool tipc_group_is_receiver(struct tipc_member *m)
  {
  	return m->state != MBR_QUARANTINED && m->state != MBR_LEAVING;
  }
  
- static bool tipc_group_is_receiver(struct tipc_member *m)
+ static bool tipc_group_is_sender(struct tipc_member *m)
  {
  	return m && m->state >= MBR_JOINED;
  }
@@@ -169,7 -168,7 +169,7 @@@ struct tipc_group *tipc_group_create(st
  	if (!grp)
  		return NULL;
  	tipc_nlist_init(&grp->dests, tipc_own_addr(net));
- 	INIT_LIST_HEAD(&grp->congested);
+ 	INIT_LIST_HEAD(&grp->small_win);
  	INIT_LIST_HEAD(&grp->active);
  	INIT_LIST_HEAD(&grp->pending);
  	INIT_LIST_HEAD(&grp->reclaiming);
@@@ -233,7 -232,7 +233,7 @@@ static struct tipc_member *tipc_group_f
  	struct tipc_member *m;
  
  	m = tipc_group_find_member(grp, node, port);
- 	if (m && tipc_group_is_enabled(m))
+ 	if (m && tipc_group_is_receiver(m))
  		return m;
  	return NULL;
  }
@@@ -286,7 -285,7 +286,7 @@@ static struct tipc_member *tipc_group_c
  	if (!m)
  		return NULL;
  	INIT_LIST_HEAD(&m->list);
- 	INIT_LIST_HEAD(&m->congested);
+ 	INIT_LIST_HEAD(&m->small_win);
  	__skb_queue_head_init(&m->deferredq);
  	m->group = grp;
  	m->node = node;
@@@ -315,7 -314,7 +315,7 @@@ static void tipc_group_delete_member(st
  		grp->bc_ackers--;
  
  	list_del_init(&m->list);
- 	list_del_init(&m->congested);
+ 	list_del_init(&m->small_win);
  	tipc_group_decr_active(grp, m);
  
  	/* If last member on a node, remove node from dest list */
@@@ -344,7 -343,7 +344,7 @@@ void tipc_group_update_member(struct ti
  	struct tipc_group *grp = m->group;
  	struct tipc_member *_m, *tmp;
  
- 	if (!tipc_group_is_enabled(m))
+ 	if (!tipc_group_is_receiver(m))
  		return;
  
  	m->window -= len;
@@@ -352,16 -351,14 +352,14 @@@
  	if (m->window >= ADV_IDLE)
  		return;
  
- 	list_del_init(&m->congested);
+ 	list_del_init(&m->small_win);
  
- 	/* Sort member into congested members' list */
- 	list_for_each_entry_safe(_m, tmp, &grp->congested, congested) {
- 		if (m->window > _m->window)
- 			continue;
- 		list_add_tail(&m->congested, &_m->congested);
- 		return;
+ 	/* Sort member into small_window members' list */
+ 	list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
+ 		if (_m->window > m->window)
+ 			break;
  	}
- 	list_add_tail(&m->congested, &grp->congested);
+ 	list_add_tail(&m->small_win, &_m->small_win);
  }
  
  void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
@@@ -373,7 -370,7 +371,7 @@@
  
  	for (n = rb_first(&grp->members); n; n = rb_next(n)) {
  		m = container_of(n, struct tipc_member, tree_node);
- 		if (tipc_group_is_enabled(m)) {
+ 		if (tipc_group_is_receiver(m)) {
  			tipc_group_update_member(m, len);
  			m->bc_acked = prev;
  			ackers++;
@@@ -428,10 -425,10 +426,10 @@@ bool tipc_group_bc_cong(struct tipc_gro
  	if (grp->bc_ackers)
  		return true;
  
- 	if (list_empty(&grp->congested))
+ 	if (list_empty(&grp->small_win))
  		return false;
  
- 	m = list_first_entry(&grp->congested, struct tipc_member, congested);
+ 	m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
  	if (m->window >= len)
  		return false;
  
@@@ -486,7 -483,7 +484,7 @@@ void tipc_group_filter_msg(struct tipc_
  		goto drop;
  
  	m = tipc_group_find_member(grp, node, port);
- 	if (!tipc_group_is_receiver(m))
+ 	if (!tipc_group_is_sender(m))
  		goto drop;
  
  	if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
@@@ -563,7 -560,7 +561,7 @@@ void tipc_group_update_rcv_win(struct t
  	int max_active = grp->max_active;
  	int reclaim_limit = max_active * 3 / 4;
  	int active_cnt = grp->active_cnt;
 -	struct tipc_member *m, *rm;
 +	struct tipc_member *m, *rm, *pm;
  
  	m = tipc_group_find_member(grp, node, port);
  	if (!m)
@@@ -606,17 -603,6 +604,17 @@@
  			pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
  			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
  		}
 +		grp->active_cnt--;
 +		list_del_init(&m->list);
 +		if (list_empty(&grp->pending))
 +			return;
 +
 +		/* Set oldest pending member to active and advertise */
 +		pm = list_first_entry(&grp->pending, struct tipc_member, list);
 +		pm->state = MBR_ACTIVE;
 +		list_move_tail(&pm->list, &grp->active);
 +		grp->active_cnt++;
 +		tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
  		break;
  	case MBR_RECLAIMING:
  	case MBR_DISCOVERED:
@@@ -703,7 -689,7 +701,7 @@@ void tipc_group_proto_rcv(struct tipc_g
  			msg_set_grp_bc_seqno(ehdr, m->bc_syncpt);
  			__skb_queue_tail(inputq, m->event_msg);
  		}
- 		list_del_init(&m->congested);
+ 		list_del_init(&m->small_win);
  		tipc_group_update_member(m, 0);
  		return;
  	case GRP_LEAVE_MSG:
@@@ -711,7 -697,7 +709,7 @@@
  			return;
  		m->bc_syncpt = msg_grp_bc_syncpt(hdr);
  		list_del_init(&m->list);
- 		list_del_init(&m->congested);
+ 		list_del_init(&m->small_win);
  		*usr_wakeup = true;
  
  		/* Wait until WITHDRAW event is received */
@@@ -731,7 -717,7 +729,7 @@@
  		m->window += msg_adv_win(hdr);
  		*usr_wakeup = m->usr_pending;
  		m->usr_pending = false;
- 		list_del_init(&m->congested);
+ 		list_del_init(&m->small_win);
  		return;
  	case GRP_ACK_MSG:
  		if (!m)
@@@ -754,14 -740,14 +752,14 @@@
  		if (!m || m->state != MBR_RECLAIMING)
  			return;
  
 -		list_del_init(&m->list);
 -		grp->active_cnt--;
  		remitted = msg_grp_remitted(hdr);
  
  		/* Messages preceding the REMIT still in receive queue */
  		if (m->advertised > remitted) {
  			m->state = MBR_REMITTED;
  			in_flight = m->advertised - remitted;
 +			m->advertised = ADV_IDLE + in_flight;
 +			return;
  		}
  		/* All messages preceding the REMIT have been read */
  		if (m->advertised <= remitted) {
@@@ -773,8 -759,6 +771,8 @@@
  			tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
  
  		m->advertised = ADV_IDLE + in_flight;
 +		grp->active_cnt--;
 +		list_del_init(&m->list);
  
  		/* Set oldest pending member to active and advertise */
  		if (list_empty(&grp->pending))
@@@ -851,10 -835,7 +849,7 @@@ void tipc_group_member_evt(struct tipc_
  		m->instance = instance;
  		TIPC_SKB_CB(skb)->orig_member = m->instance;
  		tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
- 		if (m->window < ADV_IDLE)
- 			tipc_group_update_member(m, 0);
- 		else
- 			list_del_init(&m->congested);
+ 		tipc_group_update_member(m, 0);
  	} else if (event == TIPC_WITHDRAWN) {
  		if (!m)
  			goto drop;
@@@ -887,7 -868,7 +882,7 @@@
  			__skb_queue_tail(inputq, skb);
  		}
  		list_del_init(&m->list);
- 		list_del_init(&m->congested);
+ 		list_del_init(&m->small_win);
  	}
  	*sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
  	return;
diff --combined net/tipc/socket.c
index 2aa46e8cd8fe,b51d5cba5094..cf9644ea13ba
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@@ -710,13 -710,13 +710,13 @@@ static int tipc_getname(struct socket *
   * imply that the operation will succeed, merely that it should be performed
   * and will not block.
   */
 -static unsigned int tipc_poll(struct file *file, struct socket *sock,
 +static __poll_t tipc_poll(struct file *file, struct socket *sock,
  			      poll_table *wait)
  {
  	struct sock *sk = sock->sk;
  	struct tipc_sock *tsk = tipc_sk(sk);
  	struct tipc_group *grp = tsk->group;
 -	u32 revents = 0;
 +	__poll_t revents = 0;
  
  	sock_poll_wait(file, sk_sleep(sk), wait);
  
@@@ -2640,9 -2640,7 +2640,7 @@@ void tipc_sk_reinit(struct net *net
  	rhashtable_walk_enter(&tn->sk_rht, &iter);
  
  	do {
- 		tsk = ERR_PTR(rhashtable_walk_start(&iter));
- 		if (IS_ERR(tsk))
- 			goto walk_stop;
+ 		rhashtable_walk_start(&iter);
  
  		while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
  			spin_lock_bh(&tsk->sk.sk_lock.slock);
@@@ -2651,7 -2649,7 +2649,7 @@@
  			msg_set_orignode(msg, tn->own_addr);
  			spin_unlock_bh(&tsk->sk.sk_lock.slock);
  		}
- walk_stop:
+ 
  		rhashtable_walk_stop(&iter);
  	} while (tsk == ERR_PTR(-EAGAIN));
  }
diff --combined net/wireless/nl80211.c
index 2b3dbcd40e46,79a9ff682b7e..c084dd2205ac
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@@ -734,11 -734,12 +734,12 @@@ struct key_parse 
  	bool def_uni, def_multi;
  };
  
- static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
+ static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key,
+ 				 struct key_parse *k)
  {
  	struct nlattr *tb[NL80211_KEY_MAX + 1];
  	int err = nla_parse_nested(tb, NL80211_KEY_MAX, key,
- 				   nl80211_key_policy, NULL);
+ 				   nl80211_key_policy, info->extack);
  	if (err)
  		return err;
  
@@@ -771,7 -772,8 +772,8 @@@
  	if (tb[NL80211_KEY_TYPE]) {
  		k->type = nla_get_u32(tb[NL80211_KEY_TYPE]);
  		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
- 			return -EINVAL;
+ 			return genl_err_attr(info, -EINVAL,
+ 					     tb[NL80211_KEY_TYPE]);
  	}
  
  	if (tb[NL80211_KEY_DEFAULT_TYPES]) {
@@@ -779,7 -781,8 +781,8 @@@
  
  		err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1,
  				       tb[NL80211_KEY_DEFAULT_TYPES],
- 				       nl80211_key_default_policy, NULL);
+ 				       nl80211_key_default_policy,
+ 				       info->extack);
  		if (err)
  			return err;
  
@@@ -820,8 -823,10 +823,10 @@@ static int nl80211_parse_key_old(struc
  
  	if (info->attrs[NL80211_ATTR_KEY_TYPE]) {
  		k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]);
- 		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES)
+ 		if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) {
+ 			GENL_SET_ERR_MSG(info, "key type out of range");
  			return -EINVAL;
+ 		}
  	}
  
  	if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) {
@@@ -850,31 -855,42 +855,42 @@@ static int nl80211_parse_key(struct gen
  	k->type = -1;
  
  	if (info->attrs[NL80211_ATTR_KEY])
- 		err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k);
+ 		err = nl80211_parse_key_new(info, info->attrs[NL80211_ATTR_KEY], k);
  	else
  		err = nl80211_parse_key_old(info, k);
  
  	if (err)
  		return err;
  
- 	if (k->def && k->defmgmt)
+ 	if (k->def && k->defmgmt) {
+ 		GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid");
  		return -EINVAL;
+ 	}
  
  	if (k->defmgmt) {
- 		if (k->def_uni || !k->def_multi)
+ 		if (k->def_uni || !k->def_multi) {
+ 			GENL_SET_ERR_MSG(info, "defmgmt key must be mcast");
  			return -EINVAL;
+ 		}
  	}
  
  	if (k->idx != -1) {
  		if (k->defmgmt) {
- 			if (k->idx < 4 || k->idx > 5)
+ 			if (k->idx < 4 || k->idx > 5) {
+ 				GENL_SET_ERR_MSG(info,
+ 						 "defmgmt key idx not 4 or 5");
  				return -EINVAL;
+ 			}
  		} else if (k->def) {
- 			if (k->idx < 0 || k->idx > 3)
+ 			if (k->idx < 0 || k->idx > 3) {
+ 				GENL_SET_ERR_MSG(info, "def key idx not 0-3");
  				return -EINVAL;
+ 			}
  		} else {
- 			if (k->idx < 0 || k->idx > 5)
+ 			if (k->idx < 0 || k->idx > 5) {
+ 				GENL_SET_ERR_MSG(info, "key idx not 0-5");
  				return -EINVAL;
+ 			}
  		}
  	}
  
@@@ -883,8 -899,9 +899,9 @@@
  
  static struct cfg80211_cached_keys *
  nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
- 		       struct nlattr *keys, bool *no_ht)
+ 		       struct genl_info *info, bool *no_ht)
  {
+ 	struct nlattr *keys = info->attrs[NL80211_ATTR_KEYS];
  	struct key_parse parse;
  	struct nlattr *key;
  	struct cfg80211_cached_keys *result;
@@@ -909,17 -926,22 +926,22 @@@
  		memset(&parse, 0, sizeof(parse));
  		parse.idx = -1;
  
- 		err = nl80211_parse_key_new(key, &parse);
+ 		err = nl80211_parse_key_new(info, key, &parse);
  		if (err)
  			goto error;
  		err = -EINVAL;
  		if (!parse.p.key)
  			goto error;
- 		if (parse.idx < 0 || parse.idx > 3)
+ 		if (parse.idx < 0 || parse.idx > 3) {
+ 			GENL_SET_ERR_MSG(info, "key index out of range [0-3]");
  			goto error;
+ 		}
  		if (parse.def) {
- 			if (def)
+ 			if (def) {
+ 				GENL_SET_ERR_MSG(info,
+ 						 "only one key can be default");
  				goto error;
+ 			}
  			def = 1;
  			result->def = parse.idx;
  			if (!parse.def_uni || !parse.def_multi)
@@@ -932,6 -954,7 +954,7 @@@
  			goto error;
  		if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 &&
  		    parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) {
+ 			GENL_SET_ERR_MSG(info, "connect key must be WEP");
  			err = -EINVAL;
  			goto error;
  		}
@@@ -947,6 -970,7 +970,7 @@@
  
  	if (result->def < 0) {
  		err = -EINVAL;
+ 		GENL_SET_ERR_MSG(info, "need a default/TX key");
  		goto error;
  	}
  
@@@ -7817,6 -7841,11 +7841,11 @@@ static int nl80211_send_bss(struct sk_b
  			      intbss->ts_boottime, NL80211_BSS_PAD))
  		goto nla_put_failure;
  
+ 	if (!nl80211_put_signal(msg, intbss->pub.chains,
+ 				intbss->pub.chain_signal,
+ 				NL80211_BSS_CHAIN_SIGNAL))
+ 		goto nla_put_failure;
+ 
  	switch (rdev->wiphy.signal_type) {
  	case CFG80211_SIGNAL_TYPE_MBM:
  		if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
@@@ -8613,9 -8642,7 +8642,7 @@@ static int nl80211_join_ibss(struct sk_
  	if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
  		bool no_ht = false;
  
- 		connkeys = nl80211_parse_connkeys(rdev,
- 					  info->attrs[NL80211_ATTR_KEYS],
- 					  &no_ht);
+ 		connkeys = nl80211_parse_connkeys(rdev, info, &no_ht);
  		if (IS_ERR(connkeys))
  			return PTR_ERR(connkeys);
  
@@@ -9019,8 -9046,7 +9046,7 @@@ static int nl80211_connect(struct sk_bu
  	}
  
  	if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
- 		connkeys = nl80211_parse_connkeys(rdev,
- 					  info->attrs[NL80211_ATTR_KEYS], NULL);
+ 		connkeys = nl80211_parse_connkeys(rdev, info, NULL);
  		if (IS_ERR(connkeys))
  			return PTR_ERR(connkeys);
  	}
@@@ -11361,8 -11387,7 +11387,8 @@@ static int nl80211_nan_add_func(struct 
  		break;
  	case NL80211_NAN_FUNC_FOLLOW_UP:
  		if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] ||
 -		    !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) {
 +		    !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] ||
 +		    !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) {
  			err = -EINVAL;
  			goto out;
  		}
@@@ -13945,7 -13970,7 +13971,7 @@@ void nl80211_send_disconnected(struct c
  
  	if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
  	    nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
- 	    (from_ap && reason &&
+ 	    (reason &&
  	     nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
  	    (from_ap &&
  	     nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
diff --combined net/xfrm/xfrm_input.c
index 5b2409746ae0,26b10eb7a206..1472c0857975
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@@ -257,7 -257,6 +257,6 @@@ int xfrm_input(struct sk_buff *skb, in
  
  		if (xo && (xo->flags & CRYPTO_DONE)) {
  			crypto_done = true;
- 			x = xfrm_input_state(skb);
  			family = XFRM_SPI_SKB_CB(skb)->family;
  
  			if (!(xo->status & CRYPTO_SUCCESS)) {
@@@ -518,7 -517,7 +517,7 @@@ int xfrm_trans_queue(struct sk_buff *sk
  		return -ENOBUFS;
  
  	XFRM_TRANS_SKB_CB(skb)->finish = finish;
 -	skb_queue_tail(&trans->queue, skb);
 +	__skb_queue_tail(&trans->queue, skb);
  	tasklet_schedule(&trans->tasklet);
  	return 0;
  }
diff --combined net/xfrm/xfrm_policy.c
index 2ef6db98e9ba,d8a8129b9232..26f56e64654a
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@@ -54,7 -54,7 +54,7 @@@ static struct xfrm_policy_afinfo const 
  static struct kmem_cache *xfrm_dst_cache __read_mostly;
  static __read_mostly seqcount_t xfrm_policy_hash_generation;
  
- static void xfrm_init_pmtu(struct dst_entry *dst);
+ static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
  static int stale_bundle(struct dst_entry *dst);
  static int xfrm_bundle_ok(struct xfrm_dst *xdst);
  static void xfrm_policy_queue_process(struct timer_list *t);
@@@ -609,8 -609,7 +609,8 @@@ static void xfrm_hash_rebuild(struct wo
  
  	/* re-insert all policies by order of creation */
  	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
 -		if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
 +		if (policy->walk.dead ||
 +		    xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
  			/* skip socket policies */
  			continue;
  		}
@@@ -1258,7 -1257,7 +1258,7 @@@ EXPORT_SYMBOL(xfrm_policy_delete)
  
  int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
  {
- 	struct net *net = xp_net(pol);
+ 	struct net *net = sock_net(sk);
  	struct xfrm_policy *old_pol;
  
  #ifdef CONFIG_XFRM_SUB_POLICY
@@@ -1545,7 -1544,9 +1545,9 @@@ static inline int xfrm_fill_dst(struct 
   */
  
  static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
- 					    struct xfrm_state **xfrm, int nx,
+ 					    struct xfrm_state **xfrm,
+ 					    struct xfrm_dst **bundle,
+ 					    int nx,
  					    const struct flowi *fl,
  					    struct dst_entry *dst)
  {
@@@ -1553,8 -1554,8 +1555,8 @@@
  	unsigned long now = jiffies;
  	struct net_device *dev;
  	struct xfrm_mode *inner_mode;
- 	struct dst_entry *dst_prev = NULL;
- 	struct dst_entry *dst0 = NULL;
+ 	struct xfrm_dst *xdst_prev = NULL;
+ 	struct xfrm_dst *xdst0 = NULL;
  	int i = 0;
  	int err;
  	int header_len = 0;
@@@ -1580,13 -1581,14 +1582,14 @@@
  			goto put_states;
  		}
  
- 		if (!dst_prev)
- 			dst0 = dst1;
+ 		bundle[i] = xdst;
+ 		if (!xdst_prev)
+ 			xdst0 = xdst;
  		else
  			/* Ref count is taken during xfrm_alloc_dst()
  			 * No need to do dst_clone() on dst1
  			 */
- 			dst_prev->child = dst1;
+ 			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
  
  		if (xfrm[i]->sel.family == AF_UNSPEC) {
  			inner_mode = xfrm_ip2inner_mode(xfrm[i],
@@@ -1623,8 -1625,7 +1626,7 @@@
  		dst1->input = dst_discard;
  		dst1->output = inner_mode->afinfo->output;
  
- 		dst1->next = dst_prev;
- 		dst_prev = dst1;
+ 		xdst_prev = xdst;
  
  		header_len += xfrm[i]->props.header_len;
  		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
@@@ -1632,40 -1633,39 +1634,39 @@@
  		trailer_len += xfrm[i]->props.trailer_len;
  	}
  
- 	dst_prev->child = dst;
- 	dst0->path = dst;
+ 	xfrm_dst_set_child(xdst_prev, dst);
+ 	xdst0->path = dst;
  
  	err = -ENODEV;
  	dev = dst->dev;
  	if (!dev)
  		goto free_dst;
  
- 	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
- 	xfrm_init_pmtu(dst_prev);
- 
- 	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
- 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
+ 	xfrm_init_path(xdst0, dst, nfheader_len);
+ 	xfrm_init_pmtu(bundle, nx);
  
- 		err = xfrm_fill_dst(xdst, dev, fl);
+ 	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
+ 	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
+ 		err = xfrm_fill_dst(xdst_prev, dev, fl);
  		if (err)
  			goto free_dst;
  
- 		dst_prev->header_len = header_len;
- 		dst_prev->trailer_len = trailer_len;
- 		header_len -= xdst->u.dst.xfrm->props.header_len;
- 		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
+ 		xdst_prev->u.dst.header_len = header_len;
+ 		xdst_prev->u.dst.trailer_len = trailer_len;
+ 		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
+ 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
  	}
  
  out:
- 	return dst0;
+ 	return &xdst0->u.dst;
  
  put_states:
  	for (; i < nx; i++)
  		xfrm_state_put(xfrm[i]);
  free_dst:
- 	if (dst0)
- 		dst_release_immediate(dst0);
- 	dst0 = ERR_PTR(err);
+ 	if (xdst0)
+ 		dst_release_immediate(&xdst0->u.dst);
+ 	xdst0 = ERR_PTR(err);
  	goto out;
  }
  
@@@ -1807,7 -1807,7 +1808,7 @@@ static bool xfrm_xdst_can_reuse(struct 
  	for (i = 0; i < num; i++) {
  		if (!dst || dst->xfrm != xfrm[i])
  			return false;
- 		dst = dst->child;
+ 		dst = xfrm_dst_child(dst);
  	}
  
  	return xfrm_bundle_ok(xdst);
@@@ -1820,6 -1820,7 +1821,7 @@@ xfrm_resolve_and_create_bundle(struct x
  {
  	struct net *net = xp_net(pols[0]);
  	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
+ 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
  	struct xfrm_dst *xdst, *old;
  	struct dst_entry *dst;
  	int err;
@@@ -1848,7 -1849,7 +1850,7 @@@
  
  	old = xdst;
  
- 	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
+ 	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
  	if (IS_ERR(dst)) {
  		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
  		return ERR_CAST(dst);
@@@ -1888,8 -1889,8 +1890,8 @@@ static void xfrm_policy_queue_process(s
  	xfrm_decode_session(skb, &fl, dst->ops->family);
  	spin_unlock(&pq->hold_queue.lock);
  
- 	dst_hold(dst->path);
- 	dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
+ 	dst_hold(xfrm_dst_path(dst));
+ 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, 0);
  	if (IS_ERR(dst))
  		goto purge_queue;
  
@@@ -1918,8 -1919,8 +1920,8 @@@
  		skb = __skb_dequeue(&list);
  
  		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
- 		dst_hold(skb_dst(skb)->path);
- 		dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
+ 		dst_hold(xfrm_dst_path(skb_dst(skb)));
+ 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
  		if (IS_ERR(dst)) {
  			kfree_skb(skb);
  			continue;
@@@ -2020,8 -2021,8 +2022,8 @@@ static struct xfrm_dst *xfrm_create_dum
  	dst1->output = xdst_queue_output;
  
  	dst_hold(dst);
- 	dst1->child = dst;
- 	dst1->path = dst;
+ 	xfrm_dst_set_child(xdst, dst);
+ 	xdst->path = dst;
  
  	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
  
@@@ -2584,7 -2585,7 +2586,7 @@@ static int stale_bundle(struct dst_entr
  
  void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
  {
- 	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
+ 	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
  		dst->dev = dev_net(dev)->loopback_dev;
  		dev_hold(dst->dev);
  		dev_put(dev);
@@@ -2608,13 -2609,15 +2610,15 @@@ static struct dst_entry *xfrm_negative_
  	return dst;
  }
  
- static void xfrm_init_pmtu(struct dst_entry *dst)
+ static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
  {
- 	do {
- 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ 	while (nr--) {
+ 		struct xfrm_dst *xdst = bundle[nr];
  		u32 pmtu, route_mtu_cached;
+ 		struct dst_entry *dst;
  
- 		pmtu = dst_mtu(dst->child);
+ 		dst = &xdst->u.dst;
+ 		pmtu = dst_mtu(xfrm_dst_child(dst));
  		xdst->child_mtu_cached = pmtu;
  
  		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
@@@ -2626,7 -2629,7 +2630,7 @@@
  			pmtu = route_mtu_cached;
  
  		dst_metric_set(dst, RTAX_MTU, pmtu);
- 	} while ((dst = dst->next));
+ 	}
  }
  
  /* Check that the bundle accepts the flow and its components are
@@@ -2635,19 -2638,20 +2639,20 @@@
  
  static int xfrm_bundle_ok(struct xfrm_dst *first)
  {
+ 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
  	struct dst_entry *dst = &first->u.dst;
- 	struct xfrm_dst *last;
+ 	struct xfrm_dst *xdst;
+ 	int start_from, nr;
  	u32 mtu;
  
- 	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
+ 	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
  	    (dst->dev && !netif_running(dst->dev)))
  		return 0;
  
  	if (dst->flags & DST_XFRM_QUEUE)
  		return 1;
  
- 	last = NULL;
- 
+ 	start_from = nr = 0;
  	do {
  		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
  
@@@ -2659,9 -2663,11 +2664,11 @@@
  		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
  			return 0;
  
- 		mtu = dst_mtu(dst->child);
+ 		bundle[nr++] = xdst;
+ 
+ 		mtu = dst_mtu(xfrm_dst_child(dst));
  		if (xdst->child_mtu_cached != mtu) {
- 			last = xdst;
+ 			start_from = nr;
  			xdst->child_mtu_cached = mtu;
  		}
  
@@@ -2669,30 -2675,30 +2676,30 @@@
  			return 0;
  		mtu = dst_mtu(xdst->route);
  		if (xdst->route_mtu_cached != mtu) {
- 			last = xdst;
+ 			start_from = nr;
  			xdst->route_mtu_cached = mtu;
  		}
  
- 		dst = dst->child;
+ 		dst = xfrm_dst_child(dst);
  	} while (dst->xfrm);
  
- 	if (likely(!last))
+ 	if (likely(!start_from))
  		return 1;
  
- 	mtu = last->child_mtu_cached;
- 	for (;;) {
- 		dst = &last->u.dst;
+ 	xdst = bundle[start_from - 1];
+ 	mtu = xdst->child_mtu_cached;
+ 	while (start_from--) {
+ 		dst = &xdst->u.dst;
  
  		mtu = xfrm_state_mtu(dst->xfrm, mtu);
- 		if (mtu > last->route_mtu_cached)
- 			mtu = last->route_mtu_cached;
+ 		if (mtu > xdst->route_mtu_cached)
+ 			mtu = xdst->route_mtu_cached;
  		dst_metric_set(dst, RTAX_MTU, mtu);
- 
- 		if (last == first)
+ 		if (!start_from)
  			break;
  
- 		last = (struct xfrm_dst *)last->u.dst.next;
- 		last->child_mtu_cached = mtu;
+ 		xdst = bundle[start_from - 1];
+ 		xdst->child_mtu_cached = mtu;
  	}
  
  	return 1;
@@@ -2700,22 -2706,20 +2707,20 @@@
  
  static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
  {
- 	return dst_metric_advmss(dst->path);
+ 	return dst_metric_advmss(xfrm_dst_path(dst));
  }
  
  static unsigned int xfrm_mtu(const struct dst_entry *dst)
  {
  	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  
- 	return mtu ? : dst_mtu(dst->path);
+ 	return mtu ? : dst_mtu(xfrm_dst_path(dst));
  }
  
  static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
  					const void *daddr)
  {
- 	const struct dst_entry *path = dst->path;
- 
- 	for (; dst != path; dst = dst->child) {
+ 	while (dst->xfrm) {
  		const struct xfrm_state *xfrm = dst->xfrm;
  
  		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
@@@ -2724,6 -2728,8 +2729,8 @@@
  			daddr = xfrm->coaddr;
  		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
  			daddr = &xfrm->id.daddr;
+ 
+ 		dst = xfrm_dst_child(dst);
  	}
  	return daddr;
  }
@@@ -2732,7 -2738,7 +2739,7 @@@ static struct neighbour *xfrm_neigh_loo
  					   struct sk_buff *skb,
  					   const void *daddr)
  {
- 	const struct dst_entry *path = dst->path;
+ 	const struct dst_entry *path = xfrm_dst_path(dst);
  
  	if (!skb)
  		daddr = xfrm_get_dst_nexthop(dst, daddr);
@@@ -2741,7 -2747,7 +2748,7 @@@
  
  static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
  {
- 	const struct dst_entry *path = dst->path;
+ 	const struct dst_entry *path = xfrm_dst_path(dst);
  
  	daddr = xfrm_get_dst_nexthop(dst, daddr);
  	path->ops->confirm_neigh(path, daddr);
diff --combined net/xfrm/xfrm_state.c
index 429957412633,cc4c519cad76..20b1e414dbee
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@@ -313,14 -313,13 +313,14 @@@ retry
  	if ((type && !try_module_get(type->owner)))
  		type = NULL;
  
 +	rcu_read_unlock();
 +
  	if (!type && try_load) {
  		request_module("xfrm-offload-%d-%d", family, proto);
  		try_load = 0;
  		goto retry;
  	}
  
 -	rcu_read_unlock();
  	return type;
  }
  
@@@ -1535,12 -1534,8 +1535,12 @@@ out
  	err = -EINVAL;
  	spin_lock_bh(&x1->lock);
  	if (likely(x1->km.state == XFRM_STATE_VALID)) {
 -		if (x->encap && x1->encap)
 +		if (x->encap && x1->encap &&
 +		    x->encap->encap_type == x1->encap->encap_type)
  			memcpy(x1->encap, x->encap, sizeof(*x1->encap));
 +		else if (x->encap || x1->encap)
 +			goto fail;
 +
  		if (x->coaddr && x1->coaddr) {
  			memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
  		}
@@@ -1557,8 -1552,6 +1557,8 @@@
  		x->km.state = XFRM_STATE_DEAD;
  		__xfrm_state_put(x);
  	}
 +
 +fail:
  	spin_unlock_bh(&x1->lock);
  
  	xfrm_state_put(x1);
@@@ -2056,6 -2049,13 +2056,13 @@@ int xfrm_user_policy(struct sock *sk, i
  	struct xfrm_mgr *km;
  	struct xfrm_policy *pol = NULL;
  
+ 	if (!optval && !optlen) {
+ 		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
+ 		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
+ 		__sk_dst_reset(sk);
+ 		return 0;
+ 	}
+ 
  	if (optlen <= 0 || optlen > PAGE_SIZE)
  		return -EMSGSIZE;
  

-- 
LinuxNextTracking


More information about the linux-merge mailing list