[linux-next] LinuxNextTracking branch, master, updated. next-20180307

batman at open-mesh.org batman at open-mesh.org
Thu Mar 8 00:16:54 CET 2018


The following commit has been merged in the master branch:
commit 0f3e9c97eb5a97972b0c0076a5cc01bb142f8e70
Merge: ef3f6c256f0b4711a3ef1489797b95820be5ab01 ce380619fab99036f5e745c7a865b21c59f005f6
Author: David S. Miller <davem at davemloft.net>
Date:   Tue Mar 6 00:53:44 2018 -0500

    Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
    
    All of the conflicts were cases of overlapping changes.
    
    In net/core/devlink.c, we have to make care that the
    resouce size_params have become a struct member rather
    than a pointer to such an object.
    
    Signed-off-by: David S. Miller <davem at davemloft.net>

diff --combined MAINTAINERS
index e0b39004edc0,4623caf8d72d..9d632b83c4ce
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -1238,7 -1238,7 +1238,7 @@@ F:	drivers/clk/at9
  
  ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
  M:	Nicolas Ferre <nicolas.ferre at microchip.com>
- M:	Alexandre Belloni <alexandre.belloni at free-electrons.com>
+ M:	Alexandre Belloni <alexandre.belloni at bootlin.com>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  W:	http://www.linux4sam.org
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/nferre/linux-at91.git
@@@ -1590,7 -1590,7 +1590,7 @@@ ARM/Marvell Dove/MV78xx0/Orion SOC supp
  M:	Jason Cooper <jason at lakedaemon.net>
  M:	Andrew Lunn <andrew at lunn.ch>
  M:	Sebastian Hesselbarth <sebastian.hesselbarth at gmail.com>
- M:	Gregory Clement <gregory.clement at free-electrons.com>
+ M:	Gregory Clement <gregory.clement at bootlin.com>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  S:	Maintained
  F:	Documentation/devicetree/bindings/soc/dove/
@@@ -1604,7 -1604,7 +1604,7 @@@ F:	arch/arm/boot/dts/orion5x
  ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
  M:	Jason Cooper <jason at lakedaemon.net>
  M:	Andrew Lunn <andrew at lunn.ch>
- M:	Gregory Clement <gregory.clement at free-electrons.com>
+ M:	Gregory Clement <gregory.clement at bootlin.com>
  M:	Sebastian Hesselbarth <sebastian.hesselbarth at gmail.com>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  S:	Maintained
@@@ -1999,8 -1999,10 +1999,10 @@@ M:	Maxime Coquelin <mcoquelin.stm32 at gma
  M:	Alexandre Torgue <alexandre.torgue at st.com>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
  S:	Maintained
- T:	git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/atorgue/stm32.git stm32-next
  N:	stm32
+ F:	arch/arm/boot/dts/stm32*
+ F:	arch/arm/mach-stm32/
  F:	drivers/clocksource/armv7m_systick.c
  
  ARM/TANGO ARCHITECTURE
@@@ -7600,8 -7602,10 +7602,10 @@@ F:	mm/kasan
  F:	scripts/Makefile.kasan
  
  KCONFIG
+ M:	Masahiro Yamada <yamada.masahiro at socionext.com>
+ T:	git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git kconfig
  L:	linux-kbuild at vger.kernel.org
- S:	Orphan
+ S:	Maintained
  F:	Documentation/kbuild/kconfig-language.txt
  F:	scripts/kconfig/
  
@@@ -8592,15 -8596,6 +8596,15 @@@ S:	Maintaine
  F:	Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
  F:	drivers/iio/potentiometer/mcp4531.c
  
 +MCR20A IEEE-802.15.4 RADIO DRIVER
 +M:	Xue Liu <liuxuenetmail at gmail.com>
 +L:	linux-wpan at vger.kernel.org
 +W:	https://github.com/xueliu/mcr20a-linux
 +S:	Maintained
 +F:	drivers/net/ieee802154/mcr20a.c
 +F:	drivers/net/ieee802154/mcr20a.h
 +F:	Documentation/devicetree/bindings/net/ieee802154/mcr20a.txt
 +
  MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
  M:	William Breathitt Gray <vilhelm.gray at gmail.com>
  L:	linux-iio at vger.kernel.org
@@@ -10935,6 -10930,17 +10939,17 @@@ L:	linux-gpio at vger.kernel.or
  S:	Supported
  F:	drivers/pinctrl/pinctrl-at91-pio4.*
  
+ PIN CONTROLLER - FREESCALE
+ M:	Dong Aisheng <aisheng.dong at nxp.com>
+ M:	Fabio Estevam <festevam at gmail.com>
+ M:	Shawn Guo <shawnguo at kernel.org>
+ M:	Stefan Agner <stefan at agner.ch>
+ R:	Pengutronix Kernel Team <kernel at pengutronix.de>
+ L:	linux-gpio at vger.kernel.org
+ S:	Maintained
+ F:	drivers/pinctrl/freescale/
+ F:	Documentation/devicetree/bindings/pinctrl/fsl,*
+ 
  PIN CONTROLLER - INTEL
  M:	Mika Westerberg <mika.westerberg at linux.intel.com>
  M:	Heikki Krogerus <heikki.krogerus at linux.intel.com>
diff --combined drivers/bluetooth/btusb.c
index c8e9ae6b99e1,60bf04b8f103..fa4ce83893bb
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@@ -21,6 -21,7 +21,7 @@@
   *
   */
  
+ #include <linux/dmi.h>
  #include <linux/module.h>
  #include <linux/usb.h>
  #include <linux/usb/quirks.h>
@@@ -339,7 -340,6 +340,7 @@@ static const struct usb_device_id black
  
  	/* Intel Bluetooth devices */
  	{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
 +	{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
  	{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
  	{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
  	{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@@ -374,15 -374,27 +375,30 @@@
  	{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
  	{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
  
 +	/* Additional Realtek 8822BE Bluetooth devices */
 +	{ USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
 +
  	/* Silicon Wave based devices */
  	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
  
  	{ }	/* Terminating entry */
  };
  
+ /* The Bluetooth USB module build into some devices needs to be reset on resume,
+  * this is a problem with the platform (likely shutting off all power) not with
+  * the module itself. So we use a DMI list to match known broken platforms.
+  */
+ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
+ 	{
+ 		/* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
+ 		},
+ 	},
+ 	{}
+ };
+ 
  #define BTUSB_MAX_ISOC_FRAMES	10
  
  #define BTUSB_INTR_RUNNING	0
@@@ -2061,8 -2073,6 +2077,8 @@@ static int btusb_setup_intel_new(struc
  	case 0x0c:	/* WsP */
  	case 0x11:	/* JfP */
  	case 0x12:	/* ThP */
 +	case 0x13:	/* HrP */
 +	case 0x14:	/* QnJ, IcP */
  		break;
  	default:
  		BT_ERR("%s: Unsupported Intel hardware variant (%u)",
@@@ -2155,8 -2165,6 +2171,8 @@@
  		break;
  	case 0x11:	/* JfP */
  	case 0x12:	/* ThP */
 +	case 0x13:	/* HrP */
 +	case 0x14:	/* QnJ, IcP */
  		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
  			 le16_to_cpu(ver.hw_variant),
  			 le16_to_cpu(ver.hw_revision),
@@@ -2188,8 -2196,6 +2204,8 @@@
  		break;
  	case 0x11:	/* JfP */
  	case 0x12:	/* ThP */
 +	case 0x13:	/* HrP */
 +	case 0x14:	/* QnJ, IcP */
  		snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
  			 le16_to_cpu(ver.hw_variant),
  			 le16_to_cpu(ver.hw_revision),
@@@ -2955,6 -2961,9 +2971,9 @@@ static int btusb_probe(struct usb_inter
  	hdev->send   = btusb_send_frame;
  	hdev->notify = btusb_notify;
  
+ 	if (dmi_check_system(btusb_needs_reset_resume_table))
+ 		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
+ 
  #ifdef CONFIG_PM
  	err = btusb_config_oob_wake(hdev);
  	if (err)
@@@ -3041,12 -3050,6 +3060,6 @@@
  	if (id->driver_info & BTUSB_QCA_ROME) {
  		data->setup_on_usb = btusb_setup_qca;
  		hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- 
- 		/* QCA Rome devices lose their updated firmware over suspend,
- 		 * but the USB hub doesn't notice any status change.
- 		 * explicitly request a device reset on resume.
- 		 */
- 		interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
  	}
  
  #ifdef CONFIG_BT_HCIBTUSB_RTL
diff --combined drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index b032091022a8,9fc063af233c..85369423452d
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@@ -1888,6 -1888,14 +1888,14 @@@ static void ixgbe_dma_sync_frag(struct 
  				     ixgbe_rx_pg_size(rx_ring),
  				     DMA_FROM_DEVICE,
  				     IXGBE_RX_DMA_ATTR);
+ 	} else if (ring_uses_build_skb(rx_ring)) {
+ 		unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
+ 
+ 		dma_sync_single_range_for_cpu(rx_ring->dev,
+ 					      IXGBE_CB(skb)->dma,
+ 					      offset,
+ 					      skb_headlen(skb),
+ 					      DMA_FROM_DEVICE);
  	} else {
  		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  
@@@ -7703,8 -7711,7 +7711,8 @@@ static void ixgbe_service_task(struct w
  
  	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
  		ixgbe_ptp_overflow_check(adapter);
 -		ixgbe_ptp_rx_hang(adapter);
 +		if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
 +			ixgbe_ptp_rx_hang(adapter);
  		ixgbe_ptp_tx_hang(adapter);
  	}
  
diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 7c6204f701ae,c7e941aecc2a..7884e8a2de35
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@@ -1,6 -1,6 +1,6 @@@
  /*
   * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
 - * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
 + * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
   * Copyright (c) 2015-2017 Jiri Pirko <jiri at mellanox.com>
   * Copyright (c) 2015 Ido Schimmel <idosch at mellanox.com>
   * Copyright (c) 2015 Elad Raz <eladr at mellanox.com>
@@@ -71,7 -71,6 +71,7 @@@
  #include "spectrum_cnt.h"
  #include "spectrum_dpipe.h"
  #include "spectrum_acl_flex_actions.h"
 +#include "spectrum_span.h"
  #include "../mlxfw/mlxfw.h"
  
  #define MLXSW_FWREV_MAJOR 13
@@@ -488,6 -487,327 +488,6 @@@ static int mlxsw_sp_base_mac_get(struc
  	return 0;
  }
  
 -static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
 -{
 -	int i;
 -
 -	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
 -		return -EIO;
 -
 -	mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
 -							  MAX_SPAN);
 -	mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
 -					 sizeof(struct mlxsw_sp_span_entry),
 -					 GFP_KERNEL);
 -	if (!mlxsw_sp->span.entries)
 -		return -ENOMEM;
 -
 -	for (i = 0; i < mlxsw_sp->span.entries_count; i++)
 -		INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
 -
 -	return 0;
 -}
 -
 -static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
 -{
 -	int i;
 -
 -	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 -		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 -
 -		WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
 -	}
 -	kfree(mlxsw_sp->span.entries);
 -}
 -
 -static struct mlxsw_sp_span_entry *
 -mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
 -{
 -	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 -	struct mlxsw_sp_span_entry *span_entry;
 -	char mpat_pl[MLXSW_REG_MPAT_LEN];
 -	u8 local_port = port->local_port;
 -	int index;
 -	int i;
 -	int err;
 -
 -	/* find a free entry to use */
 -	index = -1;
 -	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 -		if (!mlxsw_sp->span.entries[i].used) {
 -			index = i;
 -			span_entry = &mlxsw_sp->span.entries[i];
 -			break;
 -		}
 -	}
 -	if (index < 0)
 -		return NULL;
 -
 -	/* create a new port analayzer entry for local_port */
 -	mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
 -	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
 -	if (err)
 -		return NULL;
 -
 -	span_entry->used = true;
 -	span_entry->id = index;
 -	span_entry->ref_count = 1;
 -	span_entry->local_port = local_port;
 -	return span_entry;
 -}
 -
 -static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
 -					struct mlxsw_sp_span_entry *span_entry)
 -{
 -	u8 local_port = span_entry->local_port;
 -	char mpat_pl[MLXSW_REG_MPAT_LEN];
 -	int pa_id = span_entry->id;
 -
 -	mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
 -	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
 -	span_entry->used = false;
 -}
 -
 -struct mlxsw_sp_span_entry *
 -mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
 -{
 -	int i;
 -
 -	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 -		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 -
 -		if (curr->used && curr->local_port == local_port)
 -			return curr;
 -	}
 -	return NULL;
 -}
 -
 -static struct mlxsw_sp_span_entry
 -*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
 -{
 -	struct mlxsw_sp_span_entry *span_entry;
 -
 -	span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
 -					      port->local_port);
 -	if (span_entry) {
 -		/* Already exists, just take a reference */
 -		span_entry->ref_count++;
 -		return span_entry;
 -	}
 -
 -	return mlxsw_sp_span_entry_create(port);
 -}
 -
 -static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
 -				   struct mlxsw_sp_span_entry *span_entry)
 -{
 -	WARN_ON(!span_entry->ref_count);
 -	if (--span_entry->ref_count == 0)
 -		mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
 -	return 0;
 -}
 -
 -static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
 -{
 -	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 -	struct mlxsw_sp_span_inspected_port *p;
 -	int i;
 -
 -	for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
 -		struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
 -
 -		list_for_each_entry(p, &curr->bound_ports_list, list)
 -			if (p->local_port == port->local_port &&
 -			    p->type == MLXSW_SP_SPAN_EGRESS)
 -				return true;
 -	}
 -
 -	return false;
 -}
 -
 -static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
 -					 int mtu)
 -{
 -	return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
 -}
 -
 -static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
 -{
 -	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 -	char sbib_pl[MLXSW_REG_SBIB_LEN];
 -	int err;
 -
 -	/* If port is egress mirrored, the shared buffer size should be
 -	 * updated according to the mtu value
 -	 */
 -	if (mlxsw_sp_span_is_egress_mirror(port)) {
 -		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
 -
 -		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
 -		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 -		if (err) {
 -			netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
 -			return err;
 -		}
 -	}
 -
 -	return 0;
 -}
 -
 -static struct mlxsw_sp_span_inspected_port *
 -mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
 -				    struct mlxsw_sp_span_entry *span_entry)
 -{
 -	struct mlxsw_sp_span_inspected_port *p;
 -
 -	list_for_each_entry(p, &span_entry->bound_ports_list, list)
 -		if (port->local_port == p->local_port)
 -			return p;
 -	return NULL;
 -}
 -
 -static int
 -mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
 -				  struct mlxsw_sp_span_entry *span_entry,
 -				  enum mlxsw_sp_span_type type,
 -				  bool bind)
 -{
 -	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 -	char mpar_pl[MLXSW_REG_MPAR_LEN];
 -	int pa_id = span_entry->id;
 -
 -	/* bind the port to the SPAN entry */
 -	mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
 -			    (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
 -	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
 -}
 -
 -static int
 -mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
 -				 struct mlxsw_sp_span_entry *span_entry,
 -				 enum mlxsw_sp_span_type type,
 -				 bool bind)
 -{
 -	struct mlxsw_sp_span_inspected_port *inspected_port;
 -	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 -	char sbib_pl[MLXSW_REG_SBIB_LEN];
 -	int err;
 -
 -	/* if it is an egress SPAN, bind a shared buffer to it */
 -	if (type == MLXSW_SP_SPAN_EGRESS) {
 -		u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
 -							     port->dev->mtu);
 -
 -		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
 -		err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 -		if (err) {
 -			netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
 -			return err;
 -		}
 -	}
 -
 -	if (bind) {
 -		err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
 -							true);
 -		if (err)
 -			goto err_port_bind;
 -	}
 -
 -	inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
 -	if (!inspected_port) {
 -		err = -ENOMEM;
 -		goto err_inspected_port_alloc;
 -	}
 -	inspected_port->local_port = port->local_port;
 -	inspected_port->type = type;
 -	list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
 -
 -	return 0;
 -
 -err_inspected_port_alloc:
 -	if (bind)
 -		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
 -						  false);
 -err_port_bind:
 -	if (type == MLXSW_SP_SPAN_EGRESS) {
 -		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
 -		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 -	}
 -	return err;
 -}
 -
 -static void
 -mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
 -				 struct mlxsw_sp_span_entry *span_entry,
 -				 enum mlxsw_sp_span_type type,
 -				 bool bind)
 -{
 -	struct mlxsw_sp_span_inspected_port *inspected_port;
 -	struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
 -	char sbib_pl[MLXSW_REG_SBIB_LEN];
 -
 -	inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
 -	if (!inspected_port)
 -		return;
 -
 -	if (bind)
 -		mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
 -						  false);
 -	/* remove the SBIB buffer if it was egress SPAN */
 -	if (type == MLXSW_SP_SPAN_EGRESS) {
 -		mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
 -		mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 -	}
 -
 -	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
 -
 -	list_del(&inspected_port->list);
 -	kfree(inspected_port);
 -}
 -
 -int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
 -			     struct mlxsw_sp_port *to,
 -			     enum mlxsw_sp_span_type type, bool bind)
 -{
 -	struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
 -	struct mlxsw_sp_span_entry *span_entry;
 -	int err;
 -
 -	span_entry = mlxsw_sp_span_entry_get(to);
 -	if (!span_entry)
 -		return -ENOENT;
 -
 -	netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
 -		   span_entry->id);
 -
 -	err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
 -	if (err)
 -		goto err_port_bind;
 -
 -	return 0;
 -
 -err_port_bind:
 -	mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
 -	return err;
 -}
 -
 -void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
 -			      enum mlxsw_sp_span_type type, bool bind)
 -{
 -	struct mlxsw_sp_span_entry *span_entry;
 -
 -	span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
 -					      destination_port);
 -	if (!span_entry) {
 -		netdev_err(from->dev, "no span entry found\n");
 -		return;
 -	}
 -
 -	netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
 -		   span_entry->id);
 -	mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
 -}
 -
  static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
  				    bool enable, u32 rate)
  {
@@@ -1040,16 -1360,6 +1040,16 @@@ mlxsw_sp_port_get_hw_xstats(struct net_
  		xstats->tail_drop[i] =
  			mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
  	}
 +
 +	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
 +		err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
 +						  i, ppcnt_pl);
 +		if (err)
 +			continue;
 +
 +		xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
 +		xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
 +	}
  }
  
  static void update_stats_cache(struct work_struct *work)
@@@ -1149,6 -1459,7 +1149,7 @@@ mlxsw_sp_port_vlan_create(struct mlxsw_
  	}
  
  	mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
+ 	mlxsw_sp_port_vlan->ref_count = 1;
  	mlxsw_sp_port_vlan->vid = vid;
  	list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
  
@@@ -1176,8 -1487,10 +1177,10 @@@ mlxsw_sp_port_vlan_get(struct mlxsw_sp_
  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  
  	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
- 	if (mlxsw_sp_port_vlan)
+ 	if (mlxsw_sp_port_vlan) {
+ 		mlxsw_sp_port_vlan->ref_count++;
  		return mlxsw_sp_port_vlan;
+ 	}
  
  	return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
  }
@@@ -1186,6 -1499,9 +1189,9 @@@ void mlxsw_sp_port_vlan_put(struct mlxs
  {
  	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
  
+ 	if (--mlxsw_sp_port_vlan->ref_count != 0)
+ 		return;
+ 
  	if (mlxsw_sp_port_vlan->bridge_port)
  		mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
  	else if (fid)
@@@ -1268,6 -1584,7 +1274,6 @@@ mlxsw_sp_port_add_cls_matchall_mirror(s
  				      bool ingress)
  {
  	enum mlxsw_sp_span_type span_type;
 -	struct mlxsw_sp_port *to_port;
  	struct net_device *to_dev;
  
  	to_dev = tcf_mirred_dev(a);
@@@ -1276,10 -1593,17 +1282,10 @@@
  		return -EINVAL;
  	}
  
 -	if (!mlxsw_sp_port_dev_check(to_dev)) {
 -		netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
 -		return -EOPNOTSUPP;
 -	}
 -	to_port = netdev_priv(to_dev);
 -
 -	mirror->to_local_port = to_port->local_port;
  	mirror->ingress = ingress;
  	span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
 -	return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
 -					true);
 +	return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
 +					true, &mirror->span_id);
  }
  
  static void
@@@ -1290,7 -1614,7 +1296,7 @@@ mlxsw_sp_port_del_cls_matchall_mirror(s
  
  	span_type = mirror->ingress ?
  			MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
 -	mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
 +	mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
  				 span_type, true);
  }
  
@@@ -3677,24 -4001,14 +3683,24 @@@ static int mlxsw_sp_init(struct mlxsw_c
  		goto err_afa_init;
  	}
  
 +	err = mlxsw_sp_span_init(mlxsw_sp);
 +	if (err) {
 +		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
 +		goto err_span_init;
 +	}
 +
 +	/* Initialize router after SPAN is initialized, so that the FIB and
 +	 * neighbor event handlers can issue SPAN respin.
 +	 */
  	err = mlxsw_sp_router_init(mlxsw_sp);
  	if (err) {
  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
  		goto err_router_init;
  	}
  
 -	/* Initialize netdevice notifier after router is initialized, so that
 -	 * the event handler can use router structures.
 +	/* Initialize netdevice notifier after router and SPAN is initialized,
 +	 * so that the event handler can use router structures and call SPAN
 +	 * respin.
  	 */
  	mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
  	err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
@@@ -3703,6 -4017,12 +3709,6 @@@
  		goto err_netdev_notifier;
  	}
  
 -	err = mlxsw_sp_span_init(mlxsw_sp);
 -	if (err) {
 -		dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
 -		goto err_span_init;
 -	}
 -
  	err = mlxsw_sp_acl_init(mlxsw_sp);
  	if (err) {
  		dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
@@@ -3728,12 -4048,12 +3734,12 @@@ err_ports_create
  err_dpipe_init:
  	mlxsw_sp_acl_fini(mlxsw_sp);
  err_acl_init:
 -	mlxsw_sp_span_fini(mlxsw_sp);
 -err_span_init:
  	unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
  err_netdev_notifier:
  	mlxsw_sp_router_fini(mlxsw_sp);
  err_router_init:
 +	mlxsw_sp_span_fini(mlxsw_sp);
 +err_span_init:
  	mlxsw_sp_afa_fini(mlxsw_sp);
  err_afa_init:
  	mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@@ -3759,9 -4079,9 +3765,9 @@@ static void mlxsw_sp_fini(struct mlxsw_
  	mlxsw_sp_ports_remove(mlxsw_sp);
  	mlxsw_sp_dpipe_fini(mlxsw_sp);
  	mlxsw_sp_acl_fini(mlxsw_sp);
 -	mlxsw_sp_span_fini(mlxsw_sp);
  	unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
  	mlxsw_sp_router_fini(mlxsw_sp);
 +	mlxsw_sp_span_fini(mlxsw_sp);
  	mlxsw_sp_afa_fini(mlxsw_sp);
  	mlxsw_sp_counter_pool_fini(mlxsw_sp);
  	mlxsw_sp_switchdev_fini(mlxsw_sp);
@@@ -3804,6 -4124,70 +3810,6 @@@ static const struct mlxsw_config_profil
  	.resource_query_enable		= 1,
  };
  
 -static bool
 -mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
 -					   u64 size)
 -{
 -	const struct mlxsw_config_profile *profile;
 -
 -	profile = &mlxsw_sp_config_profile;
 -	if (size % profile->kvd_hash_granularity) {
 -		NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
 -		return false;
 -	}
 -	return true;
 -}
 -
 -static int
 -mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
 -				    struct netlink_ext_ack *extack)
 -{
 -	NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
 -	return -EINVAL;
 -}
 -
 -static int
 -mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
 -					   struct netlink_ext_ack *extack)
 -{
 -	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
 -		return -EINVAL;
 -
 -	return 0;
 -}
 -
 -static int
 -mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
 -						struct netlink_ext_ack *extack)
 -{
 -	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 -
 -	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
 -		return -EINVAL;
 -
 -	if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
 -		NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
 -		return -EINVAL;
 -	}
 -	return 0;
 -}
 -
 -static int
 -mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
 -						struct netlink_ext_ack *extack)
 -{
 -	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
 -
 -	if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
 -		return -EINVAL;
 -
 -	if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
 -		NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
 -		return -EINVAL;
 -	}
 -	return 0;
 -}
 -
  static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
  {
  	struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@@ -3812,17 -4196,29 +3818,16 @@@
  	return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
  }
  
 -static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
 -	.size_validate = mlxsw_sp_resource_kvd_size_validate,
 -};
 -
  static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
 -	.size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
  	.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
  };
  
- static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
- static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
- static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
- static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
 -static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
 -	.size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
 -};
 -
 -static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
 -	.size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
 -};
--
  static void
- mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
+ 				      struct devlink_resource_size_params *kvd_size_params,
+ 				      struct devlink_resource_size_params *linear_size_params,
+ 				      struct devlink_resource_size_params *hash_double_size_params,
+ 				      struct devlink_resource_size_params *hash_single_size_params)
  {
  	u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
  						 KVD_SINGLE_MIN_SIZE);
@@@ -3831,37 -4227,35 +3836,35 @@@
  	u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
  	u32 linear_size_min = 0;
  
- 	/* KVD top resource */
- 	mlxsw_sp_kvd_size_params.size_min = kvd_size;
- 	mlxsw_sp_kvd_size_params.size_max = kvd_size;
- 	mlxsw_sp_kvd_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
- 	mlxsw_sp_kvd_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
- 
- 	/* Linear part init */
- 	mlxsw_sp_linear_size_params.size_min = linear_size_min;
- 	mlxsw_sp_linear_size_params.size_max = kvd_size - single_size_min -
- 					       double_size_min;
- 	mlxsw_sp_linear_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
- 	mlxsw_sp_linear_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
- 
- 	/* Hash double part init */
- 	mlxsw_sp_hash_double_size_params.size_min = double_size_min;
- 	mlxsw_sp_hash_double_size_params.size_max = kvd_size - single_size_min -
- 						    linear_size_min;
- 	mlxsw_sp_hash_double_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
- 	mlxsw_sp_hash_double_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
- 
- 	/* Hash single part init */
- 	mlxsw_sp_hash_single_size_params.size_min = single_size_min;
- 	mlxsw_sp_hash_single_size_params.size_max = kvd_size - double_size_min -
- 						    linear_size_min;
- 	mlxsw_sp_hash_single_size_params.size_granularity = MLXSW_SP_KVD_GRANULARITY;
- 	mlxsw_sp_hash_single_size_params.unit = DEVLINK_RESOURCE_UNIT_ENTRY;
+ 	devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
+ 					  MLXSW_SP_KVD_GRANULARITY,
+ 					  DEVLINK_RESOURCE_UNIT_ENTRY);
+ 	devlink_resource_size_params_init(linear_size_params, linear_size_min,
+ 					  kvd_size - single_size_min -
+ 					  double_size_min,
+ 					  MLXSW_SP_KVD_GRANULARITY,
+ 					  DEVLINK_RESOURCE_UNIT_ENTRY);
+ 	devlink_resource_size_params_init(hash_double_size_params,
+ 					  double_size_min,
+ 					  kvd_size - single_size_min -
+ 					  linear_size_min,
+ 					  MLXSW_SP_KVD_GRANULARITY,
+ 					  DEVLINK_RESOURCE_UNIT_ENTRY);
+ 	devlink_resource_size_params_init(hash_single_size_params,
+ 					  single_size_min,
+ 					  kvd_size - double_size_min -
+ 					  linear_size_min,
+ 					  MLXSW_SP_KVD_GRANULARITY,
+ 					  DEVLINK_RESOURCE_UNIT_ENTRY);
  }
  
  static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
  {
  	struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ 	struct devlink_resource_size_params hash_single_size_params;
+ 	struct devlink_resource_size_params hash_double_size_params;
+ 	struct devlink_resource_size_params linear_size_params;
+ 	struct devlink_resource_size_params kvd_size_params;
  	u32 kvd_size, single_size, double_size, linear_size;
  	const struct mlxsw_config_profile *profile;
  	int err;
@@@ -3870,14 -4264,18 +3873,18 @@@
  	if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
  		return -EIO;
  
- 	mlxsw_sp_resource_size_params_prepare(mlxsw_core);
+ 	mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
+ 					      &linear_size_params,
+ 					      &hash_double_size_params,
+ 					      &hash_single_size_params);
+ 
  	kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
  	err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
  					true, kvd_size,
  					MLXSW_SP_RESOURCE_KVD,
  					DEVLINK_RESOURCE_ID_PARENT_TOP,
- 					&mlxsw_sp_kvd_size_params,
+ 					&kvd_size_params,
 -					&mlxsw_sp_resource_kvd_ops);
 +					NULL);
  	if (err)
  		return err;
  
@@@ -3886,15 -4284,11 +3893,15 @@@
  					false, linear_size,
  					MLXSW_SP_RESOURCE_KVD_LINEAR,
  					MLXSW_SP_RESOURCE_KVD,
- 					&mlxsw_sp_linear_size_params,
+ 					&linear_size_params,
  					&mlxsw_sp_resource_kvd_linear_ops);
  	if (err)
  		return err;
  
 +	err = mlxsw_sp_kvdl_resources_register(devlink);
 +	if  (err)
 +		return err;
 +
  	double_size = kvd_size - linear_size;
  	double_size *= profile->kvd_hash_double_parts;
  	double_size /= profile->kvd_hash_double_parts +
@@@ -3904,8 -4298,8 +3911,8 @@@
  					false, double_size,
  					MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
  					MLXSW_SP_RESOURCE_KVD,
- 					&mlxsw_sp_hash_double_size_params,
+ 					&hash_double_size_params,
 -					&mlxsw_sp_resource_kvd_hash_double_ops);
 +					NULL);
  	if (err)
  		return err;
  
@@@ -3914,8 -4308,8 +3921,8 @@@
  					false, single_size,
  					MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
  					MLXSW_SP_RESOURCE_KVD,
- 					&mlxsw_sp_hash_single_size_params,
+ 					&hash_single_size_params,
 -					&mlxsw_sp_resource_kvd_hash_single_ops);
 +					NULL);
  	if (err)
  		return err;
  
@@@ -4169,11 -4563,13 +4176,11 @@@ mlxsw_sp_master_lag_check(struct mlxsw_
  	u16 lag_id;
  
  	if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
 -		NL_SET_ERR_MSG(extack,
 -			       "spectrum: Exceeded number of supported LAG devices");
 +		NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
  		return false;
  	}
  	if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
 -		NL_SET_ERR_MSG(extack,
 -			       "spectrum: LAG device using unsupported Tx type");
 +		NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
  		return false;
  	}
  	return true;
@@@ -4415,7 -4811,8 +4422,7 @@@ static int mlxsw_sp_netdevice_port_uppe
  		    !netif_is_lag_master(upper_dev) &&
  		    !netif_is_bridge_master(upper_dev) &&
  		    !netif_is_ovs_master(upper_dev)) {
 -			NL_SET_ERR_MSG(extack,
 -				       "spectrum: Unknown upper device type");
 +			NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
  			return -EINVAL;
  		}
  		if (!info->linking)
@@@ -4424,7 -4821,8 +4431,7 @@@
  		    (!netif_is_bridge_master(upper_dev) ||
  		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
  							  upper_dev))) {
 -			NL_SET_ERR_MSG(extack,
 -				       "spectrum: Enslaving a port to a device that already has an upper device is not supported");
 +			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
  			return -EINVAL;
  		}
  		if (netif_is_lag_master(upper_dev) &&
@@@ -4432,20 -4830,24 +4439,20 @@@
  					       info->upper_info, extack))
  			return -EINVAL;
  		if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
 -			NL_SET_ERR_MSG(extack,
 -				       "spectrum: Master device is a LAG master and this device has a VLAN");
 +			NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
  			return -EINVAL;
  		}
  		if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
  		    !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
 -			NL_SET_ERR_MSG(extack,
 -				       "spectrum: Can not put a VLAN on a LAG port");
 +			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
  			return -EINVAL;
  		}
  		if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
 -			NL_SET_ERR_MSG(extack,
 -				       "spectrum: Master device is an OVS master and this device has a VLAN");
 +			NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
  			return -EINVAL;
  		}
  		if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
 -			NL_SET_ERR_MSG(extack,
 -				       "spectrum: Can not put a VLAN on an OVS port");
 +			NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
  			return -EINVAL;
  		}
  		break;
@@@ -4558,7 -4960,7 +4565,7 @@@ static int mlxsw_sp_netdevice_port_vlan
  	case NETDEV_PRECHANGEUPPER:
  		upper_dev = info->upper_dev;
  		if (!netif_is_bridge_master(upper_dev)) {
 -			NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers");
 +			NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
  			return -EINVAL;
  		}
  		if (!info->linking)
@@@ -4567,7 -4969,7 +4574,7 @@@
  		    (!netif_is_bridge_master(upper_dev) ||
  		     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
  							  upper_dev))) {
 -			NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
 +			NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
  			return -EINVAL;
  		}
  		break;
@@@ -4645,18 -5047,10 +4652,18 @@@ static int mlxsw_sp_netdevice_event(str
  				    unsigned long event, void *ptr)
  {
  	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 +	struct mlxsw_sp_span_entry *span_entry;
  	struct mlxsw_sp *mlxsw_sp;
  	int err = 0;
  
  	mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
 +	if (event == NETDEV_UNREGISTER) {
 +		span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
 +		if (span_entry)
 +			mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
 +	}
 +	mlxsw_sp_span_respin(mlxsw_sp);
 +
  	if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
  		err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
  						       event, ptr);
diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index d5e711d8ad71,4ec1ca3c96c8..92194a9b2caf
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@@ -70,23 -70,16 +70,23 @@@
  #define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
  #define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
  #define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
 +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles"
 +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
 +#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
  
  enum mlxsw_sp_resource_id {
  	MLXSW_SP_RESOURCE_KVD,
  	MLXSW_SP_RESOURCE_KVD_LINEAR,
  	MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
  	MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
 +	MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
 +	MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
 +	MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
  };
  
  struct mlxsw_sp_port;
  struct mlxsw_sp_rif;
 +struct mlxsw_sp_span_entry;
  
  struct mlxsw_sp_upper {
  	struct net_device *dev;
@@@ -118,13 -111,32 +118,13 @@@ struct mlxsw_sp_mid 
  	unsigned long *ports_in_mid; /* bits array */
  };
  
 -enum mlxsw_sp_span_type {
 -	MLXSW_SP_SPAN_EGRESS,
 -	MLXSW_SP_SPAN_INGRESS
 -};
 -
 -struct mlxsw_sp_span_inspected_port {
 -	struct list_head list;
 -	enum mlxsw_sp_span_type type;
 -	u8 local_port;
 -};
 -
 -struct mlxsw_sp_span_entry {
 -	u8 local_port;
 -	bool used;
 -	struct list_head bound_ports_list;
 -	int ref_count;
 -	int id;
 -};
 -
  enum mlxsw_sp_port_mall_action_type {
  	MLXSW_SP_PORT_MALL_MIRROR,
  	MLXSW_SP_PORT_MALL_SAMPLE,
  };
  
  struct mlxsw_sp_port_mall_mirror_tc_entry {
 -	u8 to_local_port;
 +	int span_id;
  	bool ingress;
  };
  
@@@ -199,6 -211,7 +199,7 @@@ struct mlxsw_sp_port_vlan 
  	struct list_head list;
  	struct mlxsw_sp_port *mlxsw_sp_port;
  	struct mlxsw_sp_fid *fid;
+ 	unsigned int ref_count;
  	u16 vid;
  	struct mlxsw_sp_bridge_port *bridge_port;
  	struct list_head bridge_vlan_node;
@@@ -210,8 -223,6 +211,8 @@@ struct mlxsw_sp_port_xstats 
  	u64 wred_drop[TC_MAX_QUEUE];
  	u64 tail_drop[TC_MAX_QUEUE];
  	u64 backlog[TC_MAX_QUEUE];
 +	u64 tx_bytes[IEEE_8021QAZ_MAX_TCS];
 +	u64 tx_packets[IEEE_8021QAZ_MAX_TCS];
  };
  
  struct mlxsw_sp_port {
@@@ -249,7 -260,6 +250,7 @@@
  	struct mlxsw_sp_port_sample *sample;
  	struct list_head vlans_list;
  	struct mlxsw_sp_qdisc *root_qdisc;
 +	struct mlxsw_sp_qdisc *tclass_qdiscs;
  	unsigned acl_rule_count;
  	struct mlxsw_sp_acl_block *ing_acl_block;
  	struct mlxsw_sp_acl_block *eg_acl_block;
@@@ -387,6 -397,16 +388,6 @@@ struct mlxsw_sp_port *mlxsw_sp_port_dev
  struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
  void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
  struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
 -int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
 -			     struct mlxsw_sp_port *to,
 -			     enum mlxsw_sp_span_type type,
 -			     bool bind);
 -void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
 -			      u8 destination_port,
 -			      enum mlxsw_sp_span_type type,
 -			      bool bind);
 -struct mlxsw_sp_span_entry *
 -mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
  
  /* spectrum_dcb.c */
  #ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@@ -442,7 -462,6 +443,7 @@@ int mlxsw_sp_kvdl_alloc_size_query(stru
  				   unsigned int entry_count,
  				   unsigned int *p_alloc_size);
  u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
 +int mlxsw_sp_kvdl_resources_register(struct devlink *devlink);
  
  struct mlxsw_sp_acl_rule_info {
  	unsigned int priority;
diff --combined drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 917663adf925,161bcdc012f0..c11c9a635866
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@@ -1203,6 -1203,7 +1203,7 @@@ static int __mlxsw_sp_port_fdb_uc_op(st
  				     bool dynamic)
  {
  	char *sfd_pl;
+ 	u8 num_rec;
  	int err;
  
  	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@@ -1212,9 -1213,16 +1213,16 @@@
  	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  			      mac, fid, action, local_port);
+ 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- 	kfree(sfd_pl);
+ 	if (err)
+ 		goto out;
  
+ 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ 		err = -EBUSY;
+ 
+ out:
+ 	kfree(sfd_pl);
  	return err;
  }
  
@@@ -1239,6 -1247,7 +1247,7 @@@ static int mlxsw_sp_port_fdb_uc_lag_op(
  				       bool adding, bool dynamic)
  {
  	char *sfd_pl;
+ 	u8 num_rec;
  	int err;
  
  	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@@ -1249,9 -1258,16 +1258,16 @@@
  	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
  				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
  				  lag_vid, lag_id);
+ 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
- 	kfree(sfd_pl);
+ 	if (err)
+ 		goto out;
  
+ 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ 		err = -EBUSY;
+ 
+ out:
+ 	kfree(sfd_pl);
  	return err;
  }
  
@@@ -1296,6 -1312,7 +1312,7 @@@ static int mlxsw_sp_port_mdb_op(struct 
  				u16 fid, u16 mid_idx, bool adding)
  {
  	char *sfd_pl;
+ 	u8 num_rec;
  	int err;
  
  	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@@@ -1305,7 -1322,15 +1322,15 @@@
  	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
  	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
  			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
+ 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
  	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
+ 	if (err)
+ 		goto out;
+ 
+ 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
+ 		err = -EBUSY;
+ 
+ out:
  	kfree(sfd_pl);
  	return err;
  }
@@@ -1819,7 -1844,7 +1844,7 @@@ mlxsw_sp_bridge_8021q_port_join(struct 
  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
  
  	if (is_vlan_dev(bridge_port->dev)) {
 -		NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge");
 +		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
  		return -EINVAL;
  	}
  
@@@ -1882,16 -1907,20 +1907,16 @@@ mlxsw_sp_bridge_8021d_port_join(struct 
  				struct netlink_ext_ack *extack)
  {
  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 +	struct net_device *dev = bridge_port->dev;
  	u16 vid;
  
 -	if (!is_vlan_dev(bridge_port->dev)) {
 -		NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge");
 -		return -EINVAL;
 -	}
 -	vid = vlan_dev_vlan_id(bridge_port->dev);
 -
 +	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
  	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  	if (WARN_ON(!mlxsw_sp_port_vlan))
  		return -EINVAL;
  
  	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
 -		NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port");
 +		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
  		return -EINVAL;
  	}
  
@@@ -1908,10 -1937,8 +1933,10 @@@ mlxsw_sp_bridge_8021d_port_leave(struc
  				 struct mlxsw_sp_port *mlxsw_sp_port)
  {
  	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 -	u16 vid = vlan_dev_vlan_id(bridge_port->dev);
 +	struct net_device *dev = bridge_port->dev;
 +	u16 vid;
  
 +	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
  	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
  	if (WARN_ON(!mlxsw_sp_port_vlan))
  		return;
diff --combined drivers/net/ethernet/renesas/sh_eth.c
index d3e1bc05ca9c,14c839bb09e7..3557fe3f2bb5
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@@ -123,8 -123,8 +123,8 @@@ static const u16 sh_eth_offset_gigabit[
  	[TSU_FWSL0]	= 0x0030,
  	[TSU_FWSL1]	= 0x0034,
  	[TSU_FWSLC]	= 0x0038,
 -	[TSU_QTAG0]	= 0x0040,
 -	[TSU_QTAG1]	= 0x0044,
 +	[TSU_QTAGM0]	= 0x0040,
 +	[TSU_QTAGM1]	= 0x0044,
  	[TSU_FWSR]	= 0x0050,
  	[TSU_FWINMK]	= 0x0054,
  	[TSU_ADQT0]	= 0x0048,
@@@ -439,6 -439,17 +439,17 @@@ static void sh_eth_modify(struct net_de
  		     enum_index);
  }
  
+ static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
+ 			     int enum_index)
+ {
+ 	iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
+ }
+ 
+ static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
+ {
+ 	return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
+ }
+ 
  static bool sh_eth_is_gether(struct sh_eth_private *mdp)
  {
  	return mdp->reg_offset == sh_eth_offset_gigabit;
@@@ -752,7 -763,6 +763,7 @@@ static struct sh_eth_cpu_data sh7757_da
  	.rpadir		= 1,
  	.rpadir_value   = 2 << 16,
  	.rtrate		= 1,
 +	.dual_port	= 1,
  };
  
  #define SH_GIGA_ETH_BASE	0xfee00000UL
@@@ -831,7 -841,6 +842,7 @@@ static struct sh_eth_cpu_data sh7757_da
  	.no_trimd	= 1,
  	.no_ade		= 1,
  	.tsu		= 1,
 +	.dual_port	= 1,
  };
  
  /* SH7734 */
@@@ -902,7 -911,6 +913,7 @@@ static struct sh_eth_cpu_data sh7763_da
  	.tsu		= 1,
  	.irq_flags	= IRQF_SHARED,
  	.magic		= 1,
 +	.dual_port	= 1,
  };
  
  static struct sh_eth_cpu_data sh7619_data = {
@@@ -935,7 -943,6 +946,7 @@@ static struct sh_eth_cpu_data sh771x_da
  			  EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
  			  EESIPR_PREIP | EESIPR_CERFIP,
  	.tsu		= 1,
 +	.dual_port	= 1,
  };
  
  static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
@@@ -965,16 -972,20 +976,16 @@@
  
  static int sh_eth_check_reset(struct net_device *ndev)
  {
 -	int ret = 0;
 -	int cnt = 100;
 +	int cnt;
  
 -	while (cnt > 0) {
 +	for (cnt = 100; cnt > 0; cnt--) {
  		if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
 -			break;
 +			return 0;
  		mdelay(1);
 -		cnt--;
 -	}
 -	if (cnt <= 0) {
 -		netdev_err(ndev, "Device reset failed\n");
 -		ret = -ETIMEDOUT;
  	}
 -	return ret;
 +
 +	netdev_err(ndev, "Device reset failed\n");
 +	return -ETIMEDOUT;
  }
  
  static int sh_eth_reset(struct net_device *ndev)
@@@ -2101,6 -2112,8 +2112,6 @@@ static size_t __sh_eth_get_regs(struct 
  		add_tsu_reg(TSU_FWSL0);
  		add_tsu_reg(TSU_FWSL1);
  		add_tsu_reg(TSU_FWSLC);
 -		add_tsu_reg(TSU_QTAG0);
 -		add_tsu_reg(TSU_QTAG1);
  		add_tsu_reg(TSU_QTAGM0);
  		add_tsu_reg(TSU_QTAGM1);
  		add_tsu_reg(TSU_FWSR);
@@@ -2919,7 -2932,7 +2930,7 @@@ static int sh_eth_vlan_rx_kill_vid(stru
  /* SuperH's TSU register init function */
  static void sh_eth_tsu_init(struct sh_eth_private *mdp)
  {
 -	if (sh_eth_is_rz_fast_ether(mdp)) {
 +	if (!mdp->cd->dual_port) {
  		sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
  		sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
  				 TSU_FWSLC);	/* Enable POST registers */
@@@ -2936,8 -2949,13 +2947,8 @@@
  	sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
  	sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
  	sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
 -	if (sh_eth_is_gether(mdp)) {
 -		sh_eth_tsu_write(mdp, 0, TSU_QTAG0);	/* Disable QTAG(0->1) */
 -		sh_eth_tsu_write(mdp, 0, TSU_QTAG1);	/* Disable QTAG(1->0) */
 -	} else {
 -		sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
 -		sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
 -	}
 +	sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);	/* Disable QTAG(0->1) */
 +	sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);	/* Disable QTAG(1->0) */
  	sh_eth_tsu_write(mdp, 0, TSU_FWSR);	/* all interrupt status clear */
  	sh_eth_tsu_write(mdp, 0, TSU_FWINMK);	/* Disable all interrupt */
  	sh_eth_tsu_write(mdp, 0, TSU_TEN);	/* Disable all CAM entry */
diff --combined drivers/net/ethernet/renesas/sh_eth.h
index 5bbaf9e56e92,e5fe70134690..21047d58a93f
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@@ -118,8 -118,8 +118,8 @@@ enum 
  	TSU_FWSL0,
  	TSU_FWSL1,
  	TSU_FWSLC,
 -	TSU_QTAG0,
 -	TSU_QTAG1,
 +	TSU_QTAG0,			/* Same as TSU_QTAGM0 */
 +	TSU_QTAG1,			/* Same as TSU_QTAGM1 */
  	TSU_QTAGM0,
  	TSU_QTAGM1,
  	TSU_FWSR,
@@@ -509,7 -509,6 +509,7 @@@ struct sh_eth_cpu_data 
  	unsigned rmiimode:1;	/* EtherC has RMIIMODE register */
  	unsigned rtrate:1;	/* EtherC has RTRATE register */
  	unsigned magic:1;	/* EtherC has ECMR.MPDE and ECSR.MPD */
 +	unsigned dual_port:1;	/* Dual EtherC/E-DMAC */
  };
  
  struct sh_eth_private {
@@@ -568,15 -567,4 +568,4 @@@ static inline void *sh_eth_tsu_get_offs
  	return mdp->tsu_addr + mdp->reg_offset[enum_index];
  }
  
- static inline void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
- 				    int enum_index)
- {
- 	iowrite32(data, mdp->tsu_addr + mdp->reg_offset[enum_index]);
- }
- 
- static inline u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
- {
- 	return ioread32(mdp->tsu_addr + mdp->reg_offset[enum_index]);
- }
- 
  #endif	/* #ifndef __SH_ETH_H__ */
diff --combined drivers/net/ppp/ppp_generic.c
index a393c1dff7dc,fa2a9bdd1866..7dc2f34e7229
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@@ -971,7 -971,6 +971,7 @@@ static struct pernet_operations ppp_net
  	.exit = ppp_exit_net,
  	.id   = &ppp_net_id,
  	.size = sizeof(struct ppp_net),
 +	.async = true,
  };
  
  static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
@@@ -3162,6 -3161,15 +3162,15 @@@ ppp_connect_channel(struct channel *pch
  		goto outl;
  
  	ppp_lock(ppp);
+ 	spin_lock_bh(&pch->downl);
+ 	if (!pch->chan) {
+ 		/* Don't connect unregistered channels */
+ 		spin_unlock_bh(&pch->downl);
+ 		ppp_unlock(ppp);
+ 		ret = -ENOTCONN;
+ 		goto outl;
+ 	}
+ 	spin_unlock_bh(&pch->downl);
  	if (pch->file.hdrlen > ppp->file.hdrlen)
  		ppp->file.hdrlen = pch->file.hdrlen;
  	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
diff --combined drivers/net/tun.c
index d531954512c7,7433bb2e4451..475088f947bb
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@@ -78,7 -78,6 +78,7 @@@
  #include <linux/mutex.h>
  
  #include <linux/uaccess.h>
 +#include <linux/proc_fs.h>
  
  /* Uncomment to enable debugging */
  /* #define TUN_DEBUG 1 */
@@@ -182,7 -181,6 +182,6 @@@ struct tun_file 
  	struct tun_struct *detached;
  	struct ptr_ring tx_ring;
  	struct xdp_rxq_info xdp_rxq;
- 	int xdp_pending_pkts;
  };
  
  struct tun_flow_entry {
@@@ -1644,6 -1642,7 +1643,7 @@@ static struct sk_buff *tun_build_skb(st
  	else
  		*skb_xdp = 0;
  
+ 	preempt_disable();
  	rcu_read_lock();
  	xdp_prog = rcu_dereference(tun->xdp_prog);
  	if (xdp_prog && !*skb_xdp) {
@@@ -1663,11 -1662,12 +1663,12 @@@
  		case XDP_REDIRECT:
  			get_page(alloc_frag->page);
  			alloc_frag->offset += buflen;
- 			++tfile->xdp_pending_pkts;
  			err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
+ 			xdp_do_flush_map();
  			if (err)
  				goto err_redirect;
  			rcu_read_unlock();
+ 			preempt_enable();
  			return NULL;
  		case XDP_TX:
  			xdp_xmit = true;
@@@ -1689,6 -1689,7 +1690,7 @@@
  	skb = build_skb(buf, buflen);
  	if (!skb) {
  		rcu_read_unlock();
+ 		preempt_enable();
  		return ERR_PTR(-ENOMEM);
  	}
  
@@@ -1701,10 -1702,12 +1703,12 @@@
  		skb->dev = tun->dev;
  		generic_xdp_tx(skb, xdp_prog);
  		rcu_read_unlock();
+ 		preempt_enable();
  		return NULL;
  	}
  
  	rcu_read_unlock();
+ 	preempt_enable();
  
  	return skb;
  
@@@ -1712,6 -1715,7 +1716,7 @@@ err_redirect
  	put_page(alloc_frag->page);
  err_xdp:
  	rcu_read_unlock();
+ 	preempt_enable();
  	this_cpu_inc(tun->pcpu_stats->rx_dropped);
  	return NULL;
  }
@@@ -1985,11 -1989,6 +1990,6 @@@ static ssize_t tun_chr_write_iter(struc
  	result = tun_get_user(tun, tfile, NULL, from,
  			      file->f_flags & O_NONBLOCK, false);
  
- 	if (tfile->xdp_pending_pkts) {
- 		tfile->xdp_pending_pkts = 0;
- 		xdp_do_flush_map();
- 	}
- 
  	tun_put(tun);
  	return result;
  }
@@@ -2287,67 -2286,11 +2287,67 @@@ static int tun_validate(struct nlattr *
  	return -EINVAL;
  }
  
 +static size_t tun_get_size(const struct net_device *dev)
 +{
 +	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
 +	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
 +
 +	return nla_total_size(sizeof(uid_t)) + /* OWNER */
 +	       nla_total_size(sizeof(gid_t)) + /* GROUP */
 +	       nla_total_size(sizeof(u8)) + /* TYPE */
 +	       nla_total_size(sizeof(u8)) + /* PI */
 +	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
 +	       nla_total_size(sizeof(u8)) + /* PERSIST */
 +	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
 +	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
 +	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
 +	       0;
 +}
 +
 +static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
 +{
 +	struct tun_struct *tun = netdev_priv(dev);
 +
 +	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
 +		goto nla_put_failure;
 +	if (uid_valid(tun->owner) &&
 +	    nla_put_u32(skb, IFLA_TUN_OWNER,
 +			from_kuid_munged(current_user_ns(), tun->owner)))
 +		goto nla_put_failure;
 +	if (gid_valid(tun->group) &&
 +	    nla_put_u32(skb, IFLA_TUN_GROUP,
 +			from_kgid_munged(current_user_ns(), tun->group)))
 +		goto nla_put_failure;
 +	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
 +		goto nla_put_failure;
 +	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
 +		goto nla_put_failure;
 +	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
 +		goto nla_put_failure;
 +	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
 +		       !!(tun->flags & IFF_MULTI_QUEUE)))
 +		goto nla_put_failure;
 +	if (tun->flags & IFF_MULTI_QUEUE) {
 +		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
 +			goto nla_put_failure;
 +		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
 +				tun->numdisabled))
 +			goto nla_put_failure;
 +	}
 +
 +	return 0;
 +
 +nla_put_failure:
 +	return -EMSGSIZE;
 +}
 +
  static struct rtnl_link_ops tun_link_ops __read_mostly = {
  	.kind		= DRV_NAME,
  	.priv_size	= sizeof(struct tun_struct),
  	.setup		= tun_setup,
  	.validate	= tun_validate,
 +	.get_size       = tun_get_size,
 +	.fill_info      = tun_fill_info,
  };
  
  static void tun_sock_write_space(struct sock *sk)
@@@ -2382,13 -2325,6 +2382,6 @@@ static int tun_sendmsg(struct socket *s
  	ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
  			   m->msg_flags & MSG_DONTWAIT,
  			   m->msg_flags & MSG_MORE);
- 
- 	if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
- 	    !(m->msg_flags & MSG_MORE)) {
- 		tfile->xdp_pending_pkts = 0;
- 		xdp_do_flush_map();
- 	}
- 
  	tun_put(tun);
  	return ret;
  }
@@@ -2846,7 -2782,6 +2839,7 @@@ static long __tun_chr_ioctl(struct fil
  	struct tun_struct *tun;
  	void __user* argp = (void __user*)arg;
  	struct ifreq ifr;
 +	struct net *net;
  	kuid_t owner;
  	kgid_t group;
  	int sndbuf;
@@@ -2855,8 -2790,7 +2848,8 @@@
  	int le;
  	int ret;
  
 -	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) {
 +	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
 +	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
  		if (copy_from_user(&ifr, argp, ifreq_len))
  			return -EFAULT;
  	} else {
@@@ -2876,7 -2810,6 +2869,7 @@@
  	rtnl_lock();
  
  	tun = tun_get(tfile);
 +	net = sock_net(&tfile->sk);
  	if (cmd == TUNSETIFF) {
  		ret = -EEXIST;
  		if (tun)
@@@ -2884,7 -2817,7 +2877,7 @@@
  
  		ifr.ifr_name[IFNAMSIZ-1] = '\0';
  
 -		ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
 +		ret = tun_set_iff(net, file, &ifr);
  
  		if (ret)
  			goto unlock;
@@@ -2906,14 -2839,6 +2899,14 @@@
  		tfile->ifindex = ifindex;
  		goto unlock;
  	}
 +	if (cmd == SIOCGSKNS) {
 +		ret = -EPERM;
 +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 +			goto unlock;
 +
 +		ret = open_related_ns(&net->ns, get_net_ns);
 +		goto unlock;
 +	}
  
  	ret = -EBADFD;
  	if (!tun)
@@@ -3231,7 -3156,6 +3224,6 @@@ static int tun_chr_open(struct inode *i
  	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
  
  	memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
- 	tfile->xdp_pending_pkts = 0;
  
  	return 0;
  }
diff --combined include/linux/phy.h
index 6e38c699b753,d7069539f351..5a9b1753fdc5
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@@ -924,6 -924,7 +924,7 @@@ void phy_device_remove(struct phy_devic
  int phy_init_hw(struct phy_device *phydev);
  int phy_suspend(struct phy_device *phydev);
  int phy_resume(struct phy_device *phydev);
+ int __phy_resume(struct phy_device *phydev);
  int phy_loopback(struct phy_device *phydev, bool enable);
  struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
  			      phy_interface_t interface);
@@@ -994,14 -995,6 +995,14 @@@ int genphy_c45_pma_setup_forced(struct 
  int genphy_c45_an_disable_aneg(struct phy_device *phydev);
  int genphy_c45_read_mdix(struct phy_device *phydev);
  
 +/* The gen10g_* functions are the old Clause 45 stub */
 +int gen10g_config_aneg(struct phy_device *phydev);
 +int gen10g_read_status(struct phy_device *phydev);
 +int gen10g_no_soft_reset(struct phy_device *phydev);
 +int gen10g_config_init(struct phy_device *phydev);
 +int gen10g_suspend(struct phy_device *phydev);
 +int gen10g_resume(struct phy_device *phydev);
 +
  static inline int phy_read_status(struct phy_device *phydev)
  {
  	if (!phydev->drv)
diff --combined include/linux/skbuff.h
index 9bc1750ca3d3,ddf77cf4ff2d..d8340e6e8814
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@@ -466,9 -466,6 +466,9 @@@ struct ubuf_info 
  
  #define skb_uarg(SKB)	((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
  
 +int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
 +void mm_unaccount_pinned_pages(struct mmpin *mmp);
 +
  struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
  struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
  					struct ubuf_info *uarg);
@@@ -3288,8 -3285,7 +3288,7 @@@ int skb_zerocopy(struct sk_buff *to, st
  void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
  int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
  void skb_scrub_packet(struct sk_buff *skb, bool xnet);
- unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
- bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
+ bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
  bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
  struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
  struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
@@@ -4107,38 -4103,6 +4106,6 @@@ static inline bool skb_head_is_locked(c
  	return !skb->head_frag || skb_cloned(skb);
  }
  
- /**
-  * skb_gso_network_seglen - Return length of individual segments of a gso packet
-  *
-  * @skb: GSO skb
-  *
-  * skb_gso_network_seglen is used to determine the real size of the
-  * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
-  *
-  * The MAC/L2 header is not accounted for.
-  */
- static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
- {
- 	unsigned int hdr_len = skb_transport_header(skb) -
- 			       skb_network_header(skb);
- 	return hdr_len + skb_gso_transport_seglen(skb);
- }
- 
- /**
-  * skb_gso_mac_seglen - Return length of individual segments of a gso packet
-  *
-  * @skb: GSO skb
-  *
-  * skb_gso_mac_seglen is used to determine the real size of the
-  * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
-  * headers (TCP/UDP).
-  */
- static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
- {
- 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
- 	return hdr_len + skb_gso_transport_seglen(skb);
- }
- 
  /* Local Checksum Offload.
   * Compute outer checksum based on the assumption that the
   * inner checksum will be offloaded later.
diff --combined include/net/devlink.h
index 8d1c3f276dea,4de35ed12bcc..c83125ad20ff
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@@ -234,9 -234,13 +234,9 @@@ struct devlink_dpipe_headers 
  /**
   * struct devlink_resource_ops - resource ops
   * @occ_get: get the occupied size
 - * @size_validate: validate the size of the resource before update, reload
 - *                 is needed for changes to take place
   */
  struct devlink_resource_ops {
  	u64 (*occ_get)(struct devlink *devlink);
 -	int (*size_validate)(struct devlink *devlink, u64 size,
 -			     struct netlink_ext_ack *extack);
  };
  
  /**
@@@ -253,6 -257,18 +253,18 @@@ struct devlink_resource_size_params 
  	enum devlink_resource_unit unit;
  };
  
+ static inline void
+ devlink_resource_size_params_init(struct devlink_resource_size_params *size_params,
+ 				  u64 size_min, u64 size_max,
+ 				  u64 size_granularity,
+ 				  enum devlink_resource_unit unit)
+ {
+ 	size_params->size_min = size_min;
+ 	size_params->size_max = size_max;
+ 	size_params->size_granularity = size_granularity;
+ 	size_params->unit = unit;
+ }
+ 
  /**
   * struct devlink_resource - devlink resource
   * @name: name of the resource
@@@ -274,7 -290,7 +286,7 @@@ struct devlink_resource 
  	u64 size_new;
  	bool size_valid;
  	struct devlink_resource *parent;
- 	struct devlink_resource_size_params *size_params;
+ 	struct devlink_resource_size_params size_params;
  	struct list_head list;
  	struct list_head resource_list;
  	const struct devlink_resource_ops *resource_ops;
@@@ -398,7 -414,7 +410,7 @@@ int devlink_resource_register(struct de
  			      u64 resource_size,
  			      u64 resource_id,
  			      u64 parent_resource_id,
- 			      struct devlink_resource_size_params *size_params,
+ 			      const struct devlink_resource_size_params *size_params,
  			      const struct devlink_resource_ops *resource_ops);
  void devlink_resources_unregister(struct devlink *devlink,
  				  struct devlink_resource *resource);
@@@ -552,7 -568,7 +564,7 @@@ devlink_resource_register(struct devlin
  			  u64 resource_size,
  			  u64 resource_id,
  			  u64 parent_resource_id,
- 			  struct devlink_resource_size_params *size_params,
+ 			  const struct devlink_resource_size_params *size_params,
  			  const struct devlink_resource_ops *resource_ops)
  {
  	return 0;
diff --combined kernel/bpf/verifier.c
index 3c74b163eaeb,c6eff108aa99..eb79a34359c0
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@@ -508,6 -508,10 +508,6 @@@ err
  static const int caller_saved[CALLER_SAVED_REGS] = {
  	BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
  };
 -#define CALLEE_SAVED_REGS 5
 -static const int callee_saved[CALLEE_SAVED_REGS] = {
 -	BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9
 -};
  
  static void __mark_reg_not_init(struct bpf_reg_state *reg);
  
@@@ -1352,6 -1356,13 +1352,13 @@@ static bool is_ctx_reg(struct bpf_verif
  	return reg->type == PTR_TO_CTX;
  }
  
+ static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
+ {
+ 	const struct bpf_reg_state *reg = cur_regs(env) + regno;
+ 
+ 	return type_is_pkt_pointer(reg->type);
+ }
+ 
  static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
  				   const struct bpf_reg_state *reg,
  				   int off, int size, bool strict)
@@@ -1412,10 -1423,10 +1419,10 @@@ static int check_generic_ptr_alignment(
  }
  
  static int check_ptr_alignment(struct bpf_verifier_env *env,
- 			       const struct bpf_reg_state *reg,
- 			       int off, int size)
+ 			       const struct bpf_reg_state *reg, int off,
+ 			       int size, bool strict_alignment_once)
  {
- 	bool strict = env->strict_alignment;
+ 	bool strict = env->strict_alignment || strict_alignment_once;
  	const char *pointer_desc = "";
  
  	switch (reg->type) {
@@@ -1572,9 -1583,9 +1579,9 @@@ static void coerce_reg_to_size(struct b
   * if t==write && value_regno==-1, some unknown value is stored into memory
   * if t==read && value_regno==-1, don't care what we read from memory
   */
- static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
- 			    int bpf_size, enum bpf_access_type t,
- 			    int value_regno)
+ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
+ 			    int off, int bpf_size, enum bpf_access_type t,
+ 			    int value_regno, bool strict_alignment_once)
  {
  	struct bpf_reg_state *regs = cur_regs(env);
  	struct bpf_reg_state *reg = regs + regno;
@@@ -1586,7 -1597,7 +1593,7 @@@
  		return size;
  
  	/* alignment checks will add in reg->off themselves */
- 	err = check_ptr_alignment(env, reg, off, size);
+ 	err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
  	if (err)
  		return err;
  
@@@ -1731,21 -1742,23 +1738,23 @@@ static int check_xadd(struct bpf_verifi
  		return -EACCES;
  	}
  
- 	if (is_ctx_reg(env, insn->dst_reg)) {
- 		verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
- 			insn->dst_reg);
+ 	if (is_ctx_reg(env, insn->dst_reg) ||
+ 	    is_pkt_reg(env, insn->dst_reg)) {
+ 		verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
+ 			insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
+ 			"context" : "packet");
  		return -EACCES;
  	}
  
  	/* check whether atomic_add can read the memory */
  	err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- 			       BPF_SIZE(insn->code), BPF_READ, -1);
+ 			       BPF_SIZE(insn->code), BPF_READ, -1, true);
  	if (err)
  		return err;
  
  	/* check whether atomic_add can write into the same memory */
  	return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
- 				BPF_SIZE(insn->code), BPF_WRITE, -1);
+ 				BPF_SIZE(insn->code), BPF_WRITE, -1, true);
  }
  
  /* when register 'regno' is passed into function that will read 'access_size'
@@@ -2384,7 -2397,8 +2393,8 @@@ static int check_helper_call(struct bpf
  	 * is inferred from register state.
  	 */
  	for (i = 0; i < meta.access_size; i++) {
- 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
+ 		err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
+ 				       BPF_WRITE, -1, false);
  		if (err)
  			return err;
  	}
@@@ -4628,7 -4642,7 +4638,7 @@@ static int do_check(struct bpf_verifier
  			 */
  			err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
  					       BPF_SIZE(insn->code), BPF_READ,
- 					       insn->dst_reg);
+ 					       insn->dst_reg, false);
  			if (err)
  				return err;
  
@@@ -4680,7 -4694,7 +4690,7 @@@
  			/* check that memory (dst_reg + off) is writeable */
  			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
  					       BPF_SIZE(insn->code), BPF_WRITE,
- 					       insn->src_reg);
+ 					       insn->src_reg, false);
  			if (err)
  				return err;
  
@@@ -4715,7 -4729,7 +4725,7 @@@
  			/* check that memory (dst_reg + off) is writeable */
  			err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
  					       BPF_SIZE(insn->code), BPF_WRITE,
- 					       -1);
+ 					       -1, false);
  			if (err)
  				return err;
  
diff --combined net/batman-adv/bat_iv_ogm.c
index e21aa147607b,99abeadf416e..be09a9883825
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@@ -1,5 -1,5 +1,5 @@@
  // SPDX-License-Identifier: GPL-2.0
 -/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2007-2018  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich
   *
@@@ -157,7 -157,7 +157,7 @@@ static void batadv_iv_ogm_orig_free(str
   * Return: 0 on success, a negative error code otherwise.
   */
  static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
- 				     int max_if_num)
+ 				     unsigned int max_if_num)
  {
  	void *data_ptr;
  	size_t old_size;
@@@ -201,7 -201,8 +201,8 @@@ unlock
   */
  static void
  batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
- 				   int max_if_num, int del_if_num)
+ 				   unsigned int max_if_num,
+ 				   unsigned int del_if_num)
  {
  	size_t chunk_size;
  	size_t if_offset;
@@@ -239,7 -240,8 +240,8 @@@
   */
  static void
  batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
- 				       int max_if_num, int del_if_num)
+ 				       unsigned int max_if_num,
+ 				       unsigned int del_if_num)
  {
  	size_t if_offset;
  	void *data_ptr;
@@@ -276,7 -278,8 +278,8 @@@
   * Return: 0 on success, a negative error code otherwise.
   */
  static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
- 				     int max_if_num, int del_if_num)
+ 				     unsigned int max_if_num,
+ 				     unsigned int del_if_num)
  {
  	spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
  
@@@ -311,7 -314,8 +314,8 @@@ static struct batadv_orig_node 
  batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
  {
  	struct batadv_orig_node *orig_node;
- 	int size, hash_added;
+ 	int hash_added;
+ 	size_t size;
  
  	orig_node = batadv_orig_hash_find(bat_priv, addr);
  	if (orig_node)
@@@ -893,7 -897,7 +897,7 @@@ batadv_iv_ogm_slide_own_bcast_window(st
  	u32 i;
  	size_t word_index;
  	u8 *w;
- 	int if_num;
+ 	unsigned int if_num;
  
  	for (i = 0; i < hash->size; i++) {
  		head = &hash->table[i];
@@@ -1023,7 -1027,7 +1027,7 @@@ batadv_iv_ogm_orig_update(struct batadv
  	struct batadv_neigh_node *tmp_neigh_node = NULL;
  	struct batadv_neigh_node *router = NULL;
  	struct batadv_orig_node *orig_node_tmp;
- 	int if_num;
+ 	unsigned int if_num;
  	u8 sum_orig, sum_neigh;
  	u8 *neigh_addr;
  	u8 tq_avg;
@@@ -1182,7 -1186,7 +1186,7 @@@ static bool batadv_iv_ogm_calc_tq(struc
  	u8 total_count;
  	u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
  	unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
- 	int if_num;
+ 	unsigned int if_num;
  	unsigned int tq_asym_penalty, inv_asym_penalty;
  	unsigned int combined_tq;
  	unsigned int tq_iface_penalty;
@@@ -1702,9 -1706,9 +1706,9 @@@ static void batadv_iv_ogm_process(cons
  
  	if (is_my_orig) {
  		unsigned long *word;
- 		int offset;
+ 		size_t offset;
  		s32 bit_pos;
- 		s16 if_num;
+ 		unsigned int if_num;
  		u8 *weight;
  
  		orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
@@@ -2729,7 -2733,7 +2733,7 @@@ static int batadv_iv_gw_dump_entry(stru
  	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
  	struct batadv_neigh_node *router;
  	struct batadv_gw_node *curr_gw;
- 	int ret = -EINVAL;
+ 	int ret = 0;
  	void *hdr;
  
  	router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --combined net/batman-adv/bat_v.c
index 9c3a34b65b15,c74f81341dab..ec93337ee259
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@@ -1,5 -1,5 +1,5 @@@
  // SPDX-License-Identifier: GPL-2.0
 -/* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2013-2018  B.A.T.M.A.N. contributors:
   *
   * Linus Lüssing, Marek Lindner
   *
@@@ -928,7 -928,7 +928,7 @@@ static int batadv_v_gw_dump_entry(struc
  	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
  	struct batadv_neigh_node *router;
  	struct batadv_gw_node *curr_gw;
- 	int ret = -EINVAL;
+ 	int ret = 0;
  	void *hdr;
  
  	router = batadv_orig_router_get(gw_node->orig_node, BATADV_IF_DEFAULT);
diff --combined net/batman-adv/bridge_loop_avoidance.c
index 8ff81346ff0c,b1a08374088b..a2de5a44bd41
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@@ -1,5 -1,5 +1,5 @@@
  // SPDX-License-Identifier: GPL-2.0
 -/* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2011-2018  B.A.T.M.A.N. contributors:
   *
   * Simon Wunderlich
   *
@@@ -2161,22 -2161,25 +2161,25 @@@ batadv_bla_claim_dump_bucket(struct sk_
  {
  	struct batadv_bla_claim *claim;
  	int idx = 0;
+ 	int ret = 0;
  
  	rcu_read_lock();
  	hlist_for_each_entry_rcu(claim, head, hash_entry) {
  		if (idx++ < *idx_skip)
  			continue;
- 		if (batadv_bla_claim_dump_entry(msg, portid, seq,
- 						primary_if, claim)) {
+ 
+ 		ret = batadv_bla_claim_dump_entry(msg, portid, seq,
+ 						  primary_if, claim);
+ 		if (ret) {
  			*idx_skip = idx - 1;
  			goto unlock;
  		}
  	}
  
- 	*idx_skip = idx;
+ 	*idx_skip = 0;
  unlock:
  	rcu_read_unlock();
- 	return 0;
+ 	return ret;
  }
  
  /**
@@@ -2391,22 -2394,25 +2394,25 @@@ batadv_bla_backbone_dump_bucket(struct 
  {
  	struct batadv_bla_backbone_gw *backbone_gw;
  	int idx = 0;
+ 	int ret = 0;
  
  	rcu_read_lock();
  	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
  		if (idx++ < *idx_skip)
  			continue;
- 		if (batadv_bla_backbone_dump_entry(msg, portid, seq,
- 						   primary_if, backbone_gw)) {
+ 
+ 		ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
+ 						     primary_if, backbone_gw);
+ 		if (ret) {
  			*idx_skip = idx - 1;
  			goto unlock;
  		}
  	}
  
- 	*idx_skip = idx;
+ 	*idx_skip = 0;
  unlock:
  	rcu_read_unlock();
- 	return 0;
+ 	return ret;
  }
  
  /**
diff --combined net/batman-adv/fragmentation.c
index d815acc13c35,5afe641ee4b0..0fddc17106bd
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@@ -1,5 -1,5 +1,5 @@@
  // SPDX-License-Identifier: GPL-2.0
 -/* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2013-2018  B.A.T.M.A.N. contributors:
   *
   * Martin Hundebøll <martin at hundeboll.net>
   *
@@@ -288,7 -288,8 +288,8 @@@ batadv_frag_merge_packets(struct hlist_
  	/* Move the existing MAC header to just before the payload. (Override
  	 * the fragment header.)
  	 */
- 	skb_pull_rcsum(skb_out, hdr_size);
+ 	skb_pull(skb_out, hdr_size);
+ 	skb_out->ip_summed = CHECKSUM_NONE;
  	memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
  	skb_set_mac_header(skb_out, -ETH_HLEN);
  	skb_reset_network_header(skb_out);
diff --combined net/batman-adv/hard-interface.c
index fd4a263dd6b7,68b54a39c51d..c405d15befd6
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@@ -1,5 -1,5 +1,5 @@@
  // SPDX-License-Identifier: GPL-2.0
 -/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2007-2018  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich
   *
@@@ -763,6 -763,11 +763,11 @@@ int batadv_hardif_enable_interface(stru
  	hard_iface->soft_iface = soft_iface;
  	bat_priv = netdev_priv(hard_iface->soft_iface);
  
+ 	if (bat_priv->num_ifaces >= UINT_MAX) {
+ 		ret = -ENOSPC;
+ 		goto err_dev;
+ 	}
+ 
  	ret = netdev_master_upper_dev_link(hard_iface->net_dev,
  					   soft_iface, NULL, NULL, NULL);
  	if (ret)
@@@ -876,7 -881,7 +881,7 @@@ void batadv_hardif_disable_interface(st
  	batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
  
  	/* nobody uses this interface anymore */
- 	if (!bat_priv->num_ifaces) {
+ 	if (bat_priv->num_ifaces == 0) {
  		batadv_gw_check_client_stop(bat_priv);
  
  		if (autodel == BATADV_IF_CLEANUP_AUTO)
@@@ -912,7 -917,7 +917,7 @@@ batadv_hardif_add_interface(struct net_
  	if (ret)
  		goto free_if;
  
- 	hard_iface->if_num = -1;
+ 	hard_iface->if_num = 0;
  	hard_iface->net_dev = net_dev;
  	hard_iface->soft_iface = NULL;
  	hard_iface->if_status = BATADV_IF_NOT_IN_USE;
diff --combined net/batman-adv/originator.c
index 2a51a0cbb82a,74782426bb77..716e5b43acfa
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@@ -1,5 -1,5 +1,5 @@@
  // SPDX-License-Identifier: GPL-2.0
 -/* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2009-2018  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich
   *
@@@ -1569,7 -1569,7 +1569,7 @@@ int batadv_orig_dump(struct sk_buff *ms
   * Return: 0 on success or negative error number in case of failure
   */
  int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
- 			    int max_if_num)
+ 			    unsigned int max_if_num)
  {
  	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  	struct batadv_algo_ops *bao = bat_priv->algo_ops;
@@@ -1611,7 -1611,7 +1611,7 @@@ err
   * Return: 0 on success or negative error number in case of failure
   */
  int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
- 			    int max_if_num)
+ 			    unsigned int max_if_num)
  {
  	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  	struct batadv_hashtable *hash = bat_priv->orig_hash;
diff --combined net/batman-adv/originator.h
index f3601ab0872e,15d896b2de6f..3b3f59b881e1
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@@ -1,5 -1,5 +1,5 @@@
  /* SPDX-License-Identifier: GPL-2.0 */
 -/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2007-2018  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich
   *
@@@ -73,9 -73,9 +73,9 @@@ int batadv_orig_seq_print_text(struct s
  int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
  int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
  int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
- 			    int max_if_num);
+ 			    unsigned int max_if_num);
  int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
- 			    int max_if_num);
+ 			    unsigned int max_if_num);
  struct batadv_orig_node_vlan *
  batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
  			  unsigned short vid);
diff --combined net/batman-adv/soft-interface.c
index c95e2b2677fd,367a81fb785f..edeffcb9f3a2
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@@ -1,5 -1,5 +1,5 @@@
  // SPDX-License-Identifier: GPL-2.0
 -/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2007-2018  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich
   *
@@@ -459,13 -459,7 +459,7 @@@ void batadv_interface_rx(struct net_dev
  
  	/* skb->dev & skb->pkt_type are set here */
  	skb->protocol = eth_type_trans(skb, soft_iface);
- 
- 	/* should not be necessary anymore as we use skb_pull_rcsum()
- 	 * TODO: please verify this and remove this TODO
- 	 * -- Dec 21st 2009, Simon Wunderlich
- 	 */
- 
- 	/* skb->ip_summed = CHECKSUM_UNNECESSARY; */
+ 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  
  	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
  	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
diff --combined net/batman-adv/types.h
index 4a3b8837e1b5,a5aa6d61f4e2..476b052ad982
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@@ -1,5 -1,5 +1,5 @@@
  /* SPDX-License-Identifier: GPL-2.0 */
 -/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 +/* Copyright (C) 2007-2018  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich
   *
@@@ -167,7 -167,7 +167,7 @@@ struct batadv_hard_iface 
  	struct list_head list;
  
  	/** @if_num: identificator of the interface */
- 	s16 if_num;
+ 	unsigned int if_num;
  
  	/** @if_status: status of the interface for batman-adv */
  	char if_status;
@@@ -1596,7 -1596,7 +1596,7 @@@ struct batadv_priv 
  	atomic_t batman_queue_left;
  
  	/** @num_ifaces: number of interfaces assigned to this mesh interface */
- 	char num_ifaces;
+ 	unsigned int num_ifaces;
  
  	/** @mesh_obj: kobject for sysfs mesh subdirectory */
  	struct kobject *mesh_obj;
@@@ -2186,15 -2186,16 +2186,16 @@@ struct batadv_algo_orig_ops 
  	 *  orig_node due to a new hard-interface being added into the mesh
  	 *  (optional)
  	 */
- 	int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
+ 	int (*add_if)(struct batadv_orig_node *orig_node,
+ 		      unsigned int max_if_num);
  
  	/**
  	 * @del_if: ask the routing algorithm to apply the needed changes to the
  	 *  orig_node due to an hard-interface being removed from the mesh
  	 *  (optional)
  	 */
- 	int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
- 		      int del_if_num);
+ 	int (*del_if)(struct batadv_orig_node *orig_node,
+ 		      unsigned int max_if_num, unsigned int del_if_num);
  
  #ifdef CONFIG_BATMAN_ADV_DEBUGFS
  	/** @print: print the originator table (optional) */
diff --combined net/bridge/br_netfilter_hooks.c
index 484f54150525,9b16eaf33819..c2120eb889a9
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@@ -214,7 -214,7 +214,7 @@@ static int br_validate_ipv4(struct net 
  
  	iph = ip_hdr(skb);
  	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
- 		goto inhdr_error;
+ 		goto csum_error;
  
  	len = ntohs(iph->tot_len);
  	if (skb->len < len) {
@@@ -236,6 -236,8 +236,8 @@@
  	 */
  	return 0;
  
+ csum_error:
+ 	__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
  inhdr_error:
  	__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
  drop:
@@@ -967,7 -969,6 +969,7 @@@ static struct pernet_operations brnf_ne
  	.exit = brnf_exit_net,
  	.id   = &brnf_net_id,
  	.size = sizeof(struct brnf_net),
 +	.async = true,
  };
  
  static struct notifier_block brnf_notifier __read_mostly = {
diff --combined net/core/dev.c
index 8b51f923ce99,2cedf520cb28..e5b8d42b6410
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -2378,7 -2378,7 +2378,7 @@@ EXPORT_SYMBOL(netdev_set_num_tc)
  
  /*
   * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
 - * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
 + * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
   */
  int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
  {
@@@ -6396,6 -6396,7 +6396,7 @@@ static int __netdev_upper_dev_link(stru
  		.linking = true,
  		.upper_info = upper_info,
  	};
+ 	struct net_device *master_dev;
  	int ret = 0;
  
  	ASSERT_RTNL();
@@@ -6407,11 -6408,14 +6408,14 @@@
  	if (netdev_has_upper_dev(upper_dev, dev))
  		return -EBUSY;
  
- 	if (netdev_has_upper_dev(dev, upper_dev))
- 		return -EEXIST;
- 
- 	if (master && netdev_master_upper_dev_get(dev))
- 		return -EBUSY;
+ 	if (!master) {
+ 		if (netdev_has_upper_dev(dev, upper_dev))
+ 			return -EEXIST;
+ 	} else {
+ 		master_dev = netdev_master_upper_dev_get(dev);
+ 		if (master_dev)
+ 			return master_dev == upper_dev ? -EEXIST : -EBUSY;
+ 	}
  
  	ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
  					    &changeupper_info.info);
@@@ -7542,12 -7546,6 +7546,12 @@@ static netdev_features_t netdev_fix_fea
  		}
  	}
  
 +	/* LRO feature cannot be combined with RX-FCS */
 +	if ((features & NETIF_F_LRO) && (features & NETIF_F_RXFCS)) {
 +		netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
 +		features &= ~NETIF_F_LRO;
 +	}
 +
  	return features;
  }
  
@@@ -8147,9 -8145,8 +8151,9 @@@ void netdev_run_todo(void
  		BUG_ON(!list_empty(&dev->ptype_specific));
  		WARN_ON(rcu_access_pointer(dev->ip_ptr));
  		WARN_ON(rcu_access_pointer(dev->ip6_ptr));
 +#if IS_ENABLED(CONFIG_DECNET)
  		WARN_ON(dev->dn_ptr);
 -
 +#endif
  		if (dev->priv_destructor)
  			dev->priv_destructor(dev);
  		if (dev->needs_free_netdev)
@@@ -8847,7 -8844,6 +8851,7 @@@ static void __net_exit netdev_exit(stru
  static struct pernet_operations __net_initdata netdev_net_ops = {
  	.init = netdev_init,
  	.exit = netdev_exit,
 +	.async = true,
  };
  
  static void __net_exit default_device_exit(struct net *net)
@@@ -8948,7 -8944,6 +8952,7 @@@ static void __net_exit default_device_e
  static struct pernet_operations __net_initdata default_device_ops = {
  	.exit = default_device_exit,
  	.exit_batch = default_device_exit_batch,
 +	.async = true,
  };
  
  /*
diff --combined net/core/devlink.c
index 88e846779269,2f2307d94787..1b5bf0d1cee9
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@@ -1695,10 -1695,11 +1695,11 @@@ static int devlink_dpipe_table_put(stru
  		goto nla_put_failure;
  
  	if (table->resource_valid) {
- 		nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
- 				  table->resource_id, DEVLINK_ATTR_PAD);
- 		nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
- 				  table->resource_units, DEVLINK_ATTR_PAD);
+ 		if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+ 				      table->resource_id, DEVLINK_ATTR_PAD) ||
+ 		    nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+ 				      table->resource_units, DEVLINK_ATTR_PAD))
+ 			goto nla_put_failure;
  	}
  	if (devlink_dpipe_matches_put(table, skb))
  		goto nla_put_failure;
@@@ -2332,38 -2333,12 +2333,38 @@@ devlink_resource_validate_children(stru
  	list_for_each_entry(child_resource, &resource->resource_list, list)
  		parts_size += child_resource->size_new;
  
- 	if (parts_size > resource->size)
+ 	if (parts_size > resource->size_new)
  		size_valid = false;
  out:
  	resource->size_valid = size_valid;
  }
  
 +static int
 +devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
 +			       struct netlink_ext_ack *extack)
 +{
 +	u64 reminder;
 +	int err = 0;
 +
- 	if (size > resource->size_params->size_max) {
++	if (size > resource->size_params.size_max) {
 +		NL_SET_ERR_MSG_MOD(extack, "Size larger than maximum");
 +		err = -EINVAL;
 +	}
 +
- 	if (size < resource->size_params->size_min) {
++	if (size < resource->size_params.size_min) {
 +		NL_SET_ERR_MSG_MOD(extack, "Size smaller than minimum");
 +		err = -EINVAL;
 +	}
 +
- 	div64_u64_rem(size, resource->size_params->size_granularity, &reminder);
++	div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
 +	if (reminder) {
 +		NL_SET_ERR_MSG_MOD(extack, "Wrong granularity");
 +		err = -EINVAL;
 +	}
 +
 +	return err;
 +}
 +
  static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
  				       struct genl_info *info)
  {
@@@ -2382,8 -2357,12 +2383,8 @@@
  	if (!resource)
  		return -EINVAL;
  
 -	if (!resource->resource_ops->size_validate)
 -		return -EINVAL;
 -
  	size = nla_get_u64(info->attrs[DEVLINK_ATTR_RESOURCE_SIZE]);
 -	err = resource->resource_ops->size_validate(devlink, size,
 -						    info->extack);
 +	err = devlink_resource_validate_size(resource, size, info->extack);
  	if (err)
  		return err;
  
@@@ -2394,20 -2373,22 +2395,22 @@@
  	return 0;
  }
  
- static void
+ static int
  devlink_resource_size_params_put(struct devlink_resource *resource,
  				 struct sk_buff *skb)
  {
  	struct devlink_resource_size_params *size_params;
  
- 	size_params = resource->size_params;
- 	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
- 			  size_params->size_granularity, DEVLINK_ATTR_PAD);
- 	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
- 			  size_params->size_max, DEVLINK_ATTR_PAD);
- 	nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
- 			  size_params->size_min, DEVLINK_ATTR_PAD);
- 	nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit);
+ 	size_params = &resource->size_params;
+ 	if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+ 			      size_params->size_granularity, DEVLINK_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+ 			      size_params->size_max, DEVLINK_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+ 			      size_params->size_min, DEVLINK_ATTR_PAD) ||
+ 	    nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
+ 		return -EMSGSIZE;
+ 	return 0;
  }
  
  static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
@@@ -2431,10 -2412,12 +2434,12 @@@
  		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
  				  resource->size_new, DEVLINK_ATTR_PAD);
  	if (resource->resource_ops && resource->resource_ops->occ_get)
- 		nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
- 				  resource->resource_ops->occ_get(devlink),
- 				  DEVLINK_ATTR_PAD);
- 	devlink_resource_size_params_put(resource, skb);
+ 		if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+ 				      resource->resource_ops->occ_get(devlink),
+ 				      DEVLINK_ATTR_PAD))
+ 			goto nla_put_failure;
+ 	if (devlink_resource_size_params_put(resource, skb))
+ 		goto nla_put_failure;
  	if (list_empty(&resource->resource_list))
  		goto out;
  
@@@ -3173,7 -3156,7 +3178,7 @@@ int devlink_resource_register(struct de
  			      u64 resource_size,
  			      u64 resource_id,
  			      u64 parent_resource_id,
- 			      struct devlink_resource_size_params *size_params,
+ 			      const struct devlink_resource_size_params *size_params,
  			      const struct devlink_resource_ops *resource_ops)
  {
  	struct devlink_resource *resource;
@@@ -3216,7 -3199,8 +3221,8 @@@
  	resource->id = resource_id;
  	resource->resource_ops = resource_ops;
  	resource->size_valid = true;
- 	resource->size_params = size_params;
+ 	memcpy(&resource->size_params, size_params,
+ 	       sizeof(resource->size_params));
  	INIT_LIST_HEAD(&resource->resource_list);
  	list_add_tail(&resource->list, resource_list);
  out:
diff --combined net/core/skbuff.c
index 96d36b81a3a5,0bb0d8877954..715c13495ba6
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@@ -77,8 -77,8 +77,8 @@@
  #include <linux/capability.h>
  #include <linux/user_namespace.h>
  
 -struct kmem_cache *skbuff_head_cache __read_mostly;
 -static struct kmem_cache *skbuff_fclone_cache __read_mostly;
 +struct kmem_cache *skbuff_head_cache __ro_after_init;
 +static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
  int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
  EXPORT_SYMBOL(sysctl_max_skb_frags);
  
@@@ -890,7 -890,7 +890,7 @@@ struct sk_buff *skb_morph(struct sk_buf
  }
  EXPORT_SYMBOL_GPL(skb_morph);
  
 -static int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
 +int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
  {
  	unsigned long max_pg, num_pg, new_pg, old_pg;
  	struct user_struct *user;
@@@ -919,16 -919,14 +919,16 @@@
  
  	return 0;
  }
 +EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
  
 -static void mm_unaccount_pinned_pages(struct mmpin *mmp)
 +void mm_unaccount_pinned_pages(struct mmpin *mmp)
  {
  	if (mmp->user) {
  		atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
  		free_uid(mmp->user);
  	}
  }
 +EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
  
  struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
  {
@@@ -4893,7 -4891,7 +4893,7 @@@ EXPORT_SYMBOL_GPL(skb_scrub_packet)
   *
   * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
   */
- unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+ static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
  {
  	const struct skb_shared_info *shinfo = skb_shinfo(skb);
  	unsigned int thlen = 0;
@@@ -4915,7 -4913,40 +4915,40 @@@
  	 */
  	return thlen + shinfo->gso_size;
  }
- EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+ 
+ /**
+  * skb_gso_network_seglen - Return length of individual segments of a gso packet
+  *
+  * @skb: GSO skb
+  *
+  * skb_gso_network_seglen is used to determine the real size of the
+  * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
+  *
+  * The MAC/L2 header is not accounted for.
+  */
+ static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
+ {
+ 	unsigned int hdr_len = skb_transport_header(skb) -
+ 			       skb_network_header(skb);
+ 
+ 	return hdr_len + skb_gso_transport_seglen(skb);
+ }
+ 
+ /**
+  * skb_gso_mac_seglen - Return length of individual segments of a gso packet
+  *
+  * @skb: GSO skb
+  *
+  * skb_gso_mac_seglen is used to determine the real size of the
+  * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
+  * headers (TCP/UDP).
+  */
+ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
+ {
+ 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+ 
+ 	return hdr_len + skb_gso_transport_seglen(skb);
+ }
  
  /**
   * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
@@@ -4957,19 -4988,20 +4990,20 @@@ static inline bool skb_gso_size_check(c
  }
  
  /**
-  * skb_gso_validate_mtu - Return in case such skb fits a given MTU
+  * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
   *
   * @skb: GSO skb
   * @mtu: MTU to validate against
   *
-  * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
-  * once split.
+  * skb_gso_validate_network_len validates if a given skb will fit a
+  * wanted MTU once split. It considers L3 headers, L4 headers, and the
+  * payload.
   */
- bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
+ bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
  {
  	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
  }
- EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
+ EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
  
  /**
   * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
diff --combined net/ipv4/ip_gre.c
index 95fd225f402e,0901de42ed85..2fa2ef2e2af9
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@@ -522,7 -522,6 +522,7 @@@ err_free_skb
  static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
  			__be16 proto)
  {
 +	struct ip_tunnel *tunnel = netdev_priv(dev);
  	struct ip_tunnel_info *tun_info;
  	const struct ip_tunnel_key *key;
  	struct rtable *rt = NULL;
@@@ -546,11 -545,9 +546,11 @@@
  	if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
  		goto err_free_rt;
  
 -	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
 +	flags = tun_info->key.tun_flags &
 +		(TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
  	gre_build_header(skb, tunnel_hlen, flags, proto,
 -			 tunnel_id_to_key32(tun_info->key.tun_id), 0);
 +			 tunnel_id_to_key32(tun_info->key.tun_id),
 +			 (flags | TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
  
  	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
  
@@@ -973,9 -970,6 +973,6 @@@ static void __gre_tunnel_init(struct ne
  
  	t_hlen = tunnel->hlen + sizeof(struct iphdr);
  
- 	dev->needed_headroom	= LL_MAX_HEADER + t_hlen + 4;
- 	dev->mtu		= ETH_DATA_LEN - t_hlen - 4;
- 
  	dev->features		|= GRE_FEATURES;
  	dev->hw_features	|= GRE_FEATURES;
  
@@@ -1047,7 -1041,6 +1044,7 @@@ static struct pernet_operations ipgre_n
  	.exit_batch = ipgre_exit_batch_net,
  	.id   = &ipgre_net_id,
  	.size = sizeof(struct ip_tunnel_net),
 +	.async = true,
  };
  
  static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
@@@ -1294,8 -1287,6 +1291,6 @@@ static int erspan_tunnel_init(struct ne
  		       erspan_hdr_len(tunnel->erspan_ver);
  	t_hlen = tunnel->hlen + sizeof(struct iphdr);
  
- 	dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
- 	dev->mtu = ETH_DATA_LEN - t_hlen - 4;
  	dev->features		|= GRE_FEATURES;
  	dev->hw_features	|= GRE_FEATURES;
  	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
@@@ -1326,12 -1317,6 +1321,12 @@@ static void ipgre_tap_setup(struct net_
  	ip_tunnel_setup(dev, gre_tap_net_id);
  }
  
 +bool is_gretap_dev(const struct net_device *dev)
 +{
 +	return dev->netdev_ops == &gre_tap_netdev_ops;
 +}
 +EXPORT_SYMBOL_GPL(is_gretap_dev);
 +
  static int ipgre_newlink(struct net *src_net, struct net_device *dev,
  			 struct nlattr *tb[], struct nlattr *data[],
  			 struct netlink_ext_ack *extack)
@@@ -1633,7 -1618,6 +1628,7 @@@ static struct pernet_operations ipgre_t
  	.exit_batch = ipgre_tap_exit_batch_net,
  	.id   = &gre_tap_net_id,
  	.size = sizeof(struct ip_tunnel_net),
 +	.async = true,
  };
  
  static int __net_init erspan_init_net(struct net *net)
@@@ -1652,7 -1636,6 +1647,7 @@@ static struct pernet_operations erspan_
  	.exit_batch = erspan_exit_batch_net,
  	.id   = &erspan_net_id,
  	.size = sizeof(struct ip_tunnel_net),
 +	.async = true,
  };
  
  static int __init ipgre_init(void)
diff --combined net/ipv4/ip_tunnel.c
index b2117d89bc83,6d21068f9b55..602597dfc395
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@@ -290,6 -290,22 +290,6 @@@ failed
  	return ERR_PTR(err);
  }
  
 -static inline void init_tunnel_flow(struct flowi4 *fl4,
 -				    int proto,
 -				    __be32 daddr, __be32 saddr,
 -				    __be32 key, __u8 tos, int oif,
 -				    __u32 mark)
 -{
 -	memset(fl4, 0, sizeof(*fl4));
 -	fl4->flowi4_oif = oif;
 -	fl4->daddr = daddr;
 -	fl4->saddr = saddr;
 -	fl4->flowi4_tos = tos;
 -	fl4->flowi4_proto = proto;
 -	fl4->fl4_gre_key = key;
 -	fl4->flowi4_mark = mark;
 -}
 -
  static int ip_tunnel_bind_dev(struct net_device *dev)
  {
  	struct net_device *tdev = NULL;
@@@ -306,10 -322,10 +306,10 @@@
  		struct flowi4 fl4;
  		struct rtable *rt;
  
 -		init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
 -				 iph->saddr, tunnel->parms.o_key,
 -				 RT_TOS(iph->tos), tunnel->parms.link,
 -				 tunnel->fwmark);
 +		ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
 +				    iph->saddr, tunnel->parms.o_key,
 +				    RT_TOS(iph->tos), tunnel->parms.link,
 +				    tunnel->fwmark);
  		rt = ip_route_output_key(tunnel->net, &fl4);
  
  		if (!IS_ERR(rt)) {
@@@ -565,8 -581,8 +565,8 @@@ void ip_md_tunnel_xmit(struct sk_buff *
  		else if (skb->protocol == htons(ETH_P_IPV6))
  			tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
  	}
 -	init_tunnel_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
 -			 RT_TOS(tos), tunnel->parms.link, tunnel->fwmark);
 +	ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
 +			    RT_TOS(tos), tunnel->parms.link, tunnel->fwmark);
  	if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
  		goto tx_error;
  	rt = ip_route_output_key(tunnel->net, &fl4);
@@@ -694,16 -710,9 +694,9 @@@ void ip_tunnel_xmit(struct sk_buff *skb
  		}
  	}
  
- 	if (tunnel->fwmark) {
- 		ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
- 				    tunnel->parms.o_key, RT_TOS(tos),
- 				    tunnel->parms.link, tunnel->fwmark);
- 	}
- 	else {
- 		ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
- 				    tunnel->parms.o_key, RT_TOS(tos),
- 				    tunnel->parms.link, skb->mark);
- 	}
 -	init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
 -			 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
 -			 tunnel->fwmark);
++	ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
++			    tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
++			    tunnel->fwmark);
  
  	if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
  		goto tx_error;
diff --combined net/ipv4/netfilter/ipt_CLUSTERIP.c
index 08b3e48f44fc,8a8ae61cea71..0fc88fa7a4dc
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@@ -232,7 -232,6 +232,6 @@@ clusterip_config_init(struct net *net, 
  	c->hash_mode = i->hash_mode;
  	c->hash_initval = i->hash_initval;
  	refcount_set(&c->refcount, 1);
- 	refcount_set(&c->entries, 1);
  
  	spin_lock_bh(&cn->lock);
  	if (__clusterip_config_find(net, ip)) {
@@@ -263,8 -262,10 +262,10 @@@
  
  	c->notifier.notifier_call = clusterip_netdev_event;
  	err = register_netdevice_notifier(&c->notifier);
- 	if (!err)
+ 	if (!err) {
+ 		refcount_set(&c->entries, 1);
  		return c;
+ 	}
  
  #ifdef CONFIG_PROC_FS
  	proc_remove(c->pde);
@@@ -273,7 -274,7 +274,7 @@@ err
  	spin_lock_bh(&cn->lock);
  	list_del_rcu(&c->list);
  	spin_unlock_bh(&cn->lock);
- 	kfree(c);
+ 	clusterip_config_put(c);
  
  	return ERR_PTR(err);
  }
@@@ -496,12 -497,15 +497,15 @@@ static int clusterip_tg_check(const str
  				return PTR_ERR(config);
  		}
  	}
- 	cipinfo->config = config;
  
  	ret = nf_ct_netns_get(par->net, par->family);
- 	if (ret < 0)
+ 	if (ret < 0) {
  		pr_info("cannot load conntrack support for proto=%u\n",
  			par->family);
+ 		clusterip_config_entry_put(par->net, config);
+ 		clusterip_config_put(config);
+ 		return ret;
+ 	}
  
  	if (!par->net->xt.clusterip_deprecated_warning) {
  		pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, "
@@@ -509,6 -513,7 +513,7 @@@
  		par->net->xt.clusterip_deprecated_warning = true;
  	}
  
+ 	cipinfo->config = config;
  	return ret;
  }
  
@@@ -840,7 -845,6 +845,7 @@@ static struct pernet_operations cluster
  	.exit = clusterip_net_exit,
  	.id   = &clusterip_net_id,
  	.size = sizeof(struct clusterip_net),
 +	.async = true,
  };
  
  static int __init clusterip_tg_init(void)
diff --combined net/ipv4/route.c
index 6a7b3cba3972,860b3fd2f54b..e74ee837b300
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@@ -128,10 -128,11 +128,11 @@@ static int ip_rt_redirect_silence __rea
  static int ip_rt_error_cost __read_mostly	= HZ;
  static int ip_rt_error_burst __read_mostly	= 5 * HZ;
  static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
- static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
+ static u32 ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
  static int ip_rt_min_advmss __read_mostly	= 256;
  
  static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ 
  /*
   *	Interface to generic destination cache.
   */
@@@ -417,7 -418,6 +418,7 @@@ static void __net_exit ip_rt_do_proc_ex
  static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
  	.init = ip_rt_do_proc_init,
  	.exit = ip_rt_do_proc_exit,
 +	.async = true,
  };
  
  static int __init ip_rt_proc_init(void)
@@@ -931,14 -931,23 +932,23 @@@ out_put_peer
  
  static int ip_error(struct sk_buff *skb)
  {
- 	struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
  	struct rtable *rt = skb_rtable(skb);
+ 	struct net_device *dev = skb->dev;
+ 	struct in_device *in_dev;
  	struct inet_peer *peer;
  	unsigned long now;
  	struct net *net;
  	bool send;
  	int code;
  
+ 	if (netif_is_l3_master(skb->dev)) {
+ 		dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
+ 		if (!dev)
+ 			goto out;
+ 	}
+ 
+ 	in_dev = __in_dev_get_rcu(dev);
+ 
  	/* IP on this device is disabled. */
  	if (!in_dev)
  		goto out;
@@@ -1509,6 -1518,7 +1519,6 @@@ struct rtable *rt_dst_alloc(struct net_
  		rt->rt_pmtu = 0;
  		rt->rt_gateway = 0;
  		rt->rt_uses_gateway = 0;
 -		rt->rt_table_id = 0;
  		INIT_LIST_HEAD(&rt->rt_uncached);
  
  		rt->dst.output = ip_output;
@@@ -1644,6 -1654,19 +1654,6 @@@ static void ip_del_fnhe(struct fib_nh *
  	spin_unlock_bh(&fnhe_lock);
  }
  
 -static void set_lwt_redirect(struct rtable *rth)
 -{
 -	if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
 -		rth->dst.lwtstate->orig_output = rth->dst.output;
 -		rth->dst.output = lwtunnel_output;
 -	}
 -
 -	if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
 -		rth->dst.lwtstate->orig_input = rth->dst.input;
 -		rth->dst.input = lwtunnel_input;
 -	}
 -}
 -
  /* called in rcu_read_lock() section */
  static int __mkroute_input(struct sk_buff *skb,
  			   const struct fib_result *res,
@@@ -1726,13 -1749,15 +1736,13 @@@ rt_cache
  	}
  
  	rth->rt_is_input = 1;
 -	if (res->table)
 -		rth->rt_table_id = res->table->tb_id;
  	RT_CACHE_STAT_INC(in_slow_tot);
  
  	rth->dst.input = ip_forward;
  
  	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
  		       do_cache);
 -	set_lwt_redirect(rth);
 +	lwtunnel_set_redirect(&rth->dst);
  	skb_dst_set(skb, &rth->dst);
  out:
  	err = 0;
@@@ -1748,45 -1773,44 +1758,45 @@@ static void ip_multipath_l3_keys(const 
  				 struct flow_keys *hash_keys)
  {
  	const struct iphdr *outer_iph = ip_hdr(skb);
 +	const struct iphdr *key_iph = outer_iph;
  	const struct iphdr *inner_iph;
  	const struct icmphdr *icmph;
  	struct iphdr _inner_iph;
  	struct icmphdr _icmph;
  
 -	hash_keys->addrs.v4addrs.src = outer_iph->saddr;
 -	hash_keys->addrs.v4addrs.dst = outer_iph->daddr;
  	if (likely(outer_iph->protocol != IPPROTO_ICMP))
 -		return;
 +		goto out;
  
  	if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
 -		return;
 +		goto out;
  
  	icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
  				   &_icmph);
  	if (!icmph)
 -		return;
 +		goto out;
  
  	if (icmph->type != ICMP_DEST_UNREACH &&
  	    icmph->type != ICMP_REDIRECT &&
  	    icmph->type != ICMP_TIME_EXCEEDED &&
  	    icmph->type != ICMP_PARAMETERPROB)
 -		return;
 +		goto out;
  
  	inner_iph = skb_header_pointer(skb,
  				       outer_iph->ihl * 4 + sizeof(_icmph),
  				       sizeof(_inner_iph), &_inner_iph);
  	if (!inner_iph)
 -		return;
 -	hash_keys->addrs.v4addrs.src = inner_iph->saddr;
 -	hash_keys->addrs.v4addrs.dst = inner_iph->daddr;
 +		goto out;
 +
 +	key_iph = inner_iph;
 +out:
 +	hash_keys->addrs.v4addrs.src = key_iph->saddr;
 +	hash_keys->addrs.v4addrs.dst = key_iph->daddr;
  }
  
  /* if skb is set it will be used and fl4 can be NULL */
 -int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
 -		       const struct sk_buff *skb)
 +int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
 +		       const struct sk_buff *skb, struct flow_keys *flkeys)
  {
 -	struct net *net = fi->fib_net;
  	struct flow_keys hash_keys;
  	u32 mhash;
  
@@@ -1810,20 -1834,15 +1820,20 @@@
  			/* short-circuit if we already have L4 hash present */
  			if (skb->l4_hash)
  				return skb_get_hash_raw(skb) >> 1;
 +
  			memset(&hash_keys, 0, sizeof(hash_keys));
 -			skb_flow_dissect_flow_keys(skb, &keys, flag);
 +
 +			if (!flkeys) {
 +				skb_flow_dissect_flow_keys(skb, &keys, flag);
 +				flkeys = &keys;
 +			}
  
  			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 -			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
 -			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
 -			hash_keys.ports.src = keys.ports.src;
 -			hash_keys.ports.dst = keys.ports.dst;
 -			hash_keys.basic.ip_proto = keys.basic.ip_proto;
 +			hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
 +			hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
 +			hash_keys.ports.src = flkeys->ports.src;
 +			hash_keys.ports.dst = flkeys->ports.dst;
 +			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
  		} else {
  			memset(&hash_keys, 0, sizeof(hash_keys));
  			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
@@@ -1839,17 -1858,17 +1849,17 @@@
  
  	return mhash >> 1;
  }
 -EXPORT_SYMBOL_GPL(fib_multipath_hash);
  #endif /* CONFIG_IP_ROUTE_MULTIPATH */
  
  static int ip_mkroute_input(struct sk_buff *skb,
  			    struct fib_result *res,
  			    struct in_device *in_dev,
 -			    __be32 daddr, __be32 saddr, u32 tos)
 +			    __be32 daddr, __be32 saddr, u32 tos,
 +			    struct flow_keys *hkeys)
  {
  #ifdef CONFIG_IP_ROUTE_MULTIPATH
  	if (res->fi && res->fi->fib_nhs > 1) {
 -		int h = fib_multipath_hash(res->fi, NULL, skb);
 +		int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
  
  		fib_select_multipath(res, h);
  	}
@@@ -1875,14 -1894,13 +1885,14 @@@ static int ip_route_input_slow(struct s
  			       struct fib_result *res)
  {
  	struct in_device *in_dev = __in_dev_get_rcu(dev);
 +	struct flow_keys *flkeys = NULL, _flkeys;
 +	struct net    *net = dev_net(dev);
  	struct ip_tunnel_info *tun_info;
 -	struct flowi4	fl4;
 +	int		err = -EINVAL;
  	unsigned int	flags = 0;
  	u32		itag = 0;
  	struct rtable	*rth;
 -	int		err = -EINVAL;
 -	struct net    *net = dev_net(dev);
 +	struct flowi4	fl4;
  	bool do_cache;
  
  	/* IP on this device is disabled. */
@@@ -1941,10 -1959,6 +1951,10 @@@
  	fl4.daddr = daddr;
  	fl4.saddr = saddr;
  	fl4.flowi4_uid = sock_net_uid(net, NULL);
 +
 +	if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys))
 +		flkeys = &_flkeys;
 +
  	err = fib_lookup(net, &fl4, res, 0);
  	if (err != 0) {
  		if (!IN_DEV_FORWARD(in_dev))
@@@ -1970,7 -1984,7 +1980,7 @@@
  	if (res->type != RTN_UNICAST)
  		goto martian_destination;
  
 -	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos);
 +	err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
  out:	return err;
  
  brd_input:
@@@ -2012,6 -2026,8 +2022,6 @@@ local_input
  	rth->dst.tclassid = itag;
  #endif
  	rth->rt_is_input = 1;
 -	if (res->table)
 -		rth->rt_table_id = res->table->tb_id;
  
  	RT_CACHE_STAT_INC(in_slow_tot);
  	if (res->type == RTN_UNREACHABLE) {
@@@ -2240,6 -2256,8 +2250,6 @@@ add
  		return ERR_PTR(-ENOBUFS);
  
  	rth->rt_iif = orig_oif;
 -	if (res->table)
 -		rth->rt_table_id = res->table->tb_id;
  
  	RT_CACHE_STAT_INC(out_slow_tot);
  
@@@ -2261,7 -2279,7 +2271,7 @@@
  	}
  
  	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
 -	set_lwt_redirect(rth);
 +	lwtunnel_set_redirect(&rth->dst);
  
  	return rth;
  }
@@@ -2769,7 -2787,7 +2779,7 @@@ static int inet_rtm_getroute(struct sk_
  		rt->rt_flags |= RTCF_NOTIFY;
  
  	if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
 -		table_id = rt->rt_table_id;
 +		table_id = res.table ? res.table->tb_id : 0;
  
  	if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
  		if (!res.fi) {
@@@ -2810,6 -2828,7 +2820,7 @@@ void ip_rt_multicast_event(struct in_de
  static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
  static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
  static int ip_rt_gc_elasticity __read_mostly	= 8;
+ static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
  
  static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
  					void __user *buffer,
@@@ -2925,7 -2944,8 +2936,8 @@@ static struct ctl_table ipv4_route_tabl
  		.data		= &ip_rt_min_pmtu,
  		.maxlen		= sizeof(int),
  		.mode		= 0644,
- 		.proc_handler	= proc_dointvec,
+ 		.proc_handler	= proc_dointvec_minmax,
+ 		.extra1		= &ip_min_valid_pmtu,
  	},
  	{
  		.procname	= "min_adv_mss",
@@@ -2988,7 -3008,6 +3000,7 @@@ static __net_exit void sysctl_route_net
  static __net_initdata struct pernet_operations sysctl_route_ops = {
  	.init = sysctl_route_net_init,
  	.exit = sysctl_route_net_exit,
 +	.async = true,
  };
  #endif
  
@@@ -3002,7 -3021,6 +3014,7 @@@ static __net_init int rt_genid_init(str
  
  static __net_initdata struct pernet_operations rt_genid_ops = {
  	.init = rt_genid_init,
 +	.async = true,
  };
  
  static int __net_init ipv4_inetpeer_init(struct net *net)
@@@ -3028,7 -3046,6 +3040,7 @@@ static void __net_exit ipv4_inetpeer_ex
  static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
  	.init	=	ipv4_inetpeer_init,
  	.exit	=	ipv4_inetpeer_exit,
 +	.async	=	true,
  };
  
  #ifdef CONFIG_IP_ROUTE_CLASSID
diff --combined net/ipv4/tcp_input.c
index 06b9c4765f42,9a1b3c1c1c14..451ef3012636
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@@ -1358,6 -1358,9 +1358,6 @@@ static struct sk_buff *tcp_shift_skb_da
  	int len;
  	int in_sack;
  
 -	if (!sk_can_gso(sk))
 -		goto fallback;
 -
  	/* Normally R but no L won't result in plain S */
  	if (!dup_sack &&
  	    (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
@@@ -1968,11 -1971,6 +1968,6 @@@ void tcp_enter_loss(struct sock *sk
  	/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
  	 * loss recovery is underway except recurring timeout(s) on
  	 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
- 	 *
- 	 * In theory F-RTO can be used repeatedly during loss recovery.
- 	 * In practice this interacts badly with broken middle-boxes that
- 	 * falsely raise the receive window, which results in repeated
- 	 * timeouts and stop-and-go behavior.
  	 */
  	tp->frto = net->ipv4.sysctl_tcp_frto &&
  		   (new_recovery || icsk->icsk_retransmits) &&
@@@ -2628,18 -2626,14 +2623,14 @@@ static void tcp_process_loss(struct soc
  	    tcp_try_undo_loss(sk, false))
  		return;
  
- 	/* The ACK (s)acks some never-retransmitted data meaning not all
- 	 * the data packets before the timeout were lost. Therefore we
- 	 * undo the congestion window and state. This is essentially
- 	 * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
- 	 * a retransmitted skb is permantly marked, we can apply such an
- 	 * operation even if F-RTO was not used.
- 	 */
- 	if ((flag & FLAG_ORIG_SACK_ACKED) &&
- 	    tcp_try_undo_loss(sk, tp->undo_marker))
- 		return;
- 
  	if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
+ 		/* Step 3.b. A timeout is spurious if not all data are
+ 		 * lost, i.e., never-retransmitted data are (s)acked.
+ 		 */
+ 		if ((flag & FLAG_ORIG_SACK_ACKED) &&
+ 		    tcp_try_undo_loss(sk, true))
+ 			return;
+ 
  		if (after(tp->snd_nxt, tp->high_seq)) {
  			if (flag & FLAG_DATA_SACKED || is_dupack)
  				tp->frto = 0; /* Step 3.a. loss was real */
@@@ -3998,6 -3992,7 +3989,7 @@@ void tcp_reset(struct sock *sk
  	/* This barrier is coupled with smp_rmb() in tcp_poll() */
  	smp_wmb();
  
+ 	tcp_write_queue_purge(sk);
  	tcp_done(sk);
  
  	if (!sock_flag(sk, SOCK_DEAD))
@@@ -5867,12 -5862,10 +5859,12 @@@ int tcp_rcv_state_process(struct sock *
  	tp->rx_opt.saw_tstamp = 0;
  	req = tp->fastopen_rsk;
  	if (req) {
 +		bool req_stolen;
 +
  		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
  		    sk->sk_state != TCP_FIN_WAIT1);
  
 -		if (!tcp_check_req(sk, skb, req, true))
 +		if (!tcp_check_req(sk, skb, req, true, &req_stolen))
  			goto discard;
  	}
  
diff --combined net/ipv6/ip6_output.c
index a6eb0e699b15,a8a919520090..2c7f09c3c39e
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@@ -71,7 -71,7 +71,7 @@@ static int ip6_finish_output2(struct ne
  		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
  
  		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
 -		    ((mroute6_socket(net, skb) &&
 +		    ((mroute6_is_socket(net, skb) &&
  		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
  		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
  					 &ipv6_hdr(skb)->saddr))) {
@@@ -412,7 -412,7 +412,7 @@@ static bool ip6_pkt_too_big(const struc
  	if (skb->ignore_df)
  		return false;
  
- 	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
+ 	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
  		return false;
  
  	return true;
diff --combined net/ipv6/ip6_tunnel.c
index 1124f310df5a,6e0f21eed88a..56c4967f1868
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@@ -679,7 -679,7 +679,7 @@@ ip6ip6_err(struct sk_buff *skb, struct 
  
  		/* Try to guess incoming interface */
  		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
 -				NULL, 0, 0);
 +				NULL, 0, skb2, 0);
  
  		if (rt && rt->dst.dev)
  			skb2->dev = rt->dst.dev;
@@@ -1444,7 -1444,7 +1444,7 @@@ static void ip6_tnl_link_config(struct 
  
  		struct rt6_info *rt = rt6_lookup(t->net,
  						 &p->raddr, &p->laddr,
 -						 p->link, strict);
 +						 p->link, NULL, strict);
  
  		if (!rt)
  			return;
@@@ -1982,14 -1982,14 +1982,14 @@@ static int ip6_tnl_newlink(struct net *
  {
  	struct net *net = dev_net(dev);
  	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- 	struct ip6_tnl *nt, *t;
  	struct ip_tunnel_encap ipencap;
+ 	struct ip6_tnl *nt, *t;
+ 	int err;
  
  	nt = netdev_priv(dev);
  
  	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
- 		int err = ip6_tnl_encap_setup(nt, &ipencap);
- 
+ 		err = ip6_tnl_encap_setup(nt, &ipencap);
  		if (err < 0)
  			return err;
  	}
@@@ -2005,7 -2005,11 +2005,11 @@@
  			return -EEXIST;
  	}
  
- 	return ip6_tnl_create2(dev);
+ 	err = ip6_tnl_create2(dev);
+ 	if (!err && tb[IFLA_MTU])
+ 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ 
+ 	return err;
  }
  
  static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
@@@ -2250,7 -2254,6 +2254,7 @@@ static struct pernet_operations ip6_tnl
  	.exit_batch = ip6_tnl_exit_batch_net,
  	.id   = &ip6_tnl_net_id,
  	.size = sizeof(struct ip6_tnl_net),
 +	.async = true,
  };
  
  /**
diff --combined net/ipv6/netfilter/ip6t_rpfilter.c
index 910a27318f58,91ed25a24b79..d12f511929f5
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@@ -48,12 -48,8 +48,8 @@@ static bool rpfilter_lookup_reverse6(st
  	}
  
  	fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
- 	if ((flags & XT_RPFILTER_LOOSE) == 0) {
- 		fl6.flowi6_oif = dev->ifindex;
- 		lookup_flags |= RT6_LOOKUP_F_IFACE;
- 	}
  
 -	rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
 +	rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
  	if (rt->dst.error)
  		goto out;
  
diff --combined net/ipv6/netfilter/nft_fib_ipv6.c
index 3230b3d7b11b,62fc84d7bdff..36be3cf0adef
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@@ -180,9 -180,7 +180,8 @@@ void nft_fib6_eval(const struct nft_exp
  	}
  
  	*dest = 0;
-  again:
 -	rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags);
 +	rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb,
 +				      lookup_flags);
  	if (rt->dst.error)
  		goto put_rt_err;
  
@@@ -190,15 -188,8 +189,8 @@@
  	if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
  		goto put_rt_err;
  
- 	if (oif && oif != rt->rt6i_idev->dev) {
- 		/* multipath route? Try again with F_IFACE */
- 		if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) {
- 			lookup_flags |= RT6_LOOKUP_F_IFACE;
- 			fl6.flowi6_oif = oif->ifindex;
- 			ip6_rt_put(rt);
- 			goto again;
- 		}
- 	}
+ 	if (oif && oif != rt->rt6i_idev->dev)
+ 		goto put_rt_err;
  
  	switch (priv->result) {
  	case NFT_FIB_RESULT_OIF:
diff --combined net/ipv6/sit.c
index 182db078f01e,0195598f7bb5..a9c4ac6efe22
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@@ -1578,6 -1578,13 +1578,13 @@@ static int ipip6_newlink(struct net *sr
  	if (err < 0)
  		return err;
  
+ 	if (tb[IFLA_MTU]) {
+ 		u32 mtu = nla_get_u32(tb[IFLA_MTU]);
+ 
+ 		if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
+ 			dev->mtu = mtu;
+ 	}
+ 
  #ifdef CONFIG_IPV6_SIT_6RD
  	if (ipip6_netlink_6rd_parms(data, &ip6rd))
  		err = ipip6_tunnel_update_6rd(nt, &ip6rd);
@@@ -1878,7 -1885,6 +1885,7 @@@ static struct pernet_operations sit_net
  	.exit_batch = sit_exit_batch_net,
  	.id   = &sit_net_id,
  	.size = sizeof(struct sit_net),
 +	.async = true,
  };
  
  static void __exit sit_cleanup(void)
diff --combined net/l2tp/l2tp_ip.c
index 4614585e1720,3428fba6f2b7..a9c05b2bc1b0
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@@ -234,17 -234,13 +234,13 @@@ static void l2tp_ip_close(struct sock *
  static void l2tp_ip_destroy_sock(struct sock *sk)
  {
  	struct sk_buff *skb;
- 	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+ 	struct l2tp_tunnel *tunnel = sk->sk_user_data;
  
  	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
  		kfree_skb(skb);
  
- 	if (tunnel) {
- 		l2tp_tunnel_closeall(tunnel);
- 		sock_put(sk);
- 	}
- 
- 	sk_refcnt_debug_dec(sk);
+ 	if (tunnel)
+ 		l2tp_tunnel_delete(tunnel);
  }
  
  static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
@@@ -349,7 -345,7 +345,7 @@@ static int l2tp_ip_disconnect(struct so
  }
  
  static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
 -			   int *uaddr_len, int peer)
 +			   int peer)
  {
  	struct sock *sk		= sock->sk;
  	struct inet_sock *inet	= inet_sk(sk);
@@@ -370,7 -366,8 +366,7 @@@
  		lsa->l2tp_conn_id = lsk->conn_id;
  		lsa->l2tp_addr.s_addr = addr;
  	}
 -	*uaddr_len = sizeof(*lsa);
 -	return 0;
 +	return sizeof(*lsa);
  }
  
  static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
diff --combined net/l2tp/l2tp_ip6.c
index efea58b66295,6f009eaa5fbe..957369192ca1
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@@ -248,16 -248,14 +248,14 @@@ static void l2tp_ip6_close(struct sock 
  
  static void l2tp_ip6_destroy_sock(struct sock *sk)
  {
- 	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+ 	struct l2tp_tunnel *tunnel = sk->sk_user_data;
  
  	lock_sock(sk);
  	ip6_flush_pending_frames(sk);
  	release_sock(sk);
  
- 	if (tunnel) {
- 		l2tp_tunnel_closeall(tunnel);
- 		sock_put(sk);
- 	}
+ 	if (tunnel)
+ 		l2tp_tunnel_delete(tunnel);
  
  	inet6_destroy_sock(sk);
  }
@@@ -421,7 -419,7 +419,7 @@@ static int l2tp_ip6_disconnect(struct s
  }
  
  static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
 -			    int *uaddr_len, int peer)
 +			    int peer)
  {
  	struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
  	struct sock *sk = sock->sk;
@@@ -449,7 -447,8 +447,7 @@@
  	}
  	if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
  		lsa->l2tp_scope_id = sk->sk_bound_dev_if;
 -	*uaddr_len = sizeof(*lsa);
 -	return 0;
 +	return sizeof(*lsa);
  }
  
  static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
diff --combined net/l2tp/l2tp_ppp.c
index 0c4f49a6a0cb,3b02f24ea9ec..977bca659787
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@@ -416,20 -416,28 +416,28 @@@ abort
   * Session (and tunnel control) socket create/destroy.
   *****************************************************************************/
  
+ static void pppol2tp_put_sk(struct rcu_head *head)
+ {
+ 	struct pppol2tp_session *ps;
+ 
+ 	ps = container_of(head, typeof(*ps), rcu);
+ 	sock_put(ps->__sk);
+ }
+ 
  /* Called by l2tp_core when a session socket is being closed.
   */
  static void pppol2tp_session_close(struct l2tp_session *session)
  {
- 	struct sock *sk;
- 
- 	BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+ 	struct pppol2tp_session *ps;
  
- 	sk = pppol2tp_session_get_sock(session);
- 	if (sk) {
- 		if (sk->sk_socket)
- 			inet_shutdown(sk->sk_socket, SEND_SHUTDOWN);
- 		sock_put(sk);
- 	}
+ 	ps = l2tp_session_priv(session);
+ 	mutex_lock(&ps->sk_lock);
+ 	ps->__sk = rcu_dereference_protected(ps->sk,
+ 					     lockdep_is_held(&ps->sk_lock));
+ 	RCU_INIT_POINTER(ps->sk, NULL);
+ 	if (ps->__sk)
+ 		call_rcu(&ps->rcu, pppol2tp_put_sk);
+ 	mutex_unlock(&ps->sk_lock);
  }
  
  /* Really kill the session socket. (Called from sock_put() if
@@@ -449,14 -457,6 +457,6 @@@ static void pppol2tp_session_destruct(s
  	}
  }
  
- static void pppol2tp_put_sk(struct rcu_head *head)
- {
- 	struct pppol2tp_session *ps;
- 
- 	ps = container_of(head, typeof(*ps), rcu);
- 	sock_put(ps->__sk);
- }
- 
  /* Called when the PPPoX socket (session) is closed.
   */
  static int pppol2tp_release(struct socket *sock)
@@@ -480,26 -480,17 +480,17 @@@
  	sock_orphan(sk);
  	sock->sk = NULL;
  
+ 	/* If the socket is associated with a session,
+ 	 * l2tp_session_delete will call pppol2tp_session_close which
+ 	 * will drop the session's ref on the socket.
+ 	 */
  	session = pppol2tp_sock_to_session(sk);
- 
- 	if (session != NULL) {
- 		struct pppol2tp_session *ps;
- 
+ 	if (session) {
  		l2tp_session_delete(session);
- 
- 		ps = l2tp_session_priv(session);
- 		mutex_lock(&ps->sk_lock);
- 		ps->__sk = rcu_dereference_protected(ps->sk,
- 						     lockdep_is_held(&ps->sk_lock));
- 		RCU_INIT_POINTER(ps->sk, NULL);
- 		mutex_unlock(&ps->sk_lock);
- 		call_rcu(&ps->rcu, pppol2tp_put_sk);
- 
- 		/* Rely on the sock_put() call at the end of the function for
- 		 * dropping the reference held by pppol2tp_sock_to_session().
- 		 * The last reference will be dropped by pppol2tp_put_sk().
- 		 */
+ 		/* drop the ref obtained by pppol2tp_sock_to_session */
+ 		sock_put(sk);
  	}
+ 
  	release_sock(sk);
  
  	/* This will delete the session context via
@@@ -796,6 -787,7 +787,7 @@@ static int pppol2tp_connect(struct sock
  
  out_no_ppp:
  	/* This is how we get the session context from the socket. */
+ 	sock_hold(sk);
  	sk->sk_user_data = session;
  	rcu_assign_pointer(ps->sk, sk);
  	mutex_unlock(&ps->sk_lock);
@@@ -870,7 -862,7 +862,7 @@@ err
  /* getname() support.
   */
  static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
 -			    int *usockaddr_len, int peer)
 +			    int peer)
  {
  	int len = 0;
  	int error = 0;
@@@ -969,7 -961,8 +961,7 @@@
  		memcpy(uaddr, &sp, len);
  	}
  
 -	*usockaddr_len = len;
 -	error = 0;
 +	error = len;
  
  	sock_put(sk);
  end:
@@@ -1770,7 -1763,6 +1762,7 @@@ static struct pernet_operations pppol2t
  	.init = pppol2tp_init_net,
  	.exit = pppol2tp_exit_net,
  	.id   = &pppol2tp_net_id,
 +	.async = true,
  };
  
  /*****************************************************************************
diff --combined net/mac80211/rx.c
index de7d10732fd5,56fe16b07538..d01743234cf6
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@@ -439,10 -439,6 +439,10 @@@ ieee80211_add_rx_radiotap_header(struc
  			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
  		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
  			flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
 +		if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
 +			flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
 +		if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
 +			flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
  		put_unaligned_le16(flags, pos);
  		pos += 2;
  		if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
@@@ -1189,7 -1185,7 +1189,7 @@@ static void ieee80211_rx_reorder_ampdu(
  
  	ack_policy = *ieee80211_get_qos_ctl(hdr) &
  		     IEEE80211_QOS_CTL_ACK_POLICY_MASK;
 -	tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
 +	tid = ieee80211_get_tid(hdr);
  
  	tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
  	if (!tid_agg_rx) {
@@@ -1528,7 -1524,9 +1528,7 @@@ ieee80211_rx_h_uapsd_and_pspoll(struct 
  		   ieee80211_has_pm(hdr->frame_control) &&
  		   (ieee80211_is_data_qos(hdr->frame_control) ||
  		    ieee80211_is_qos_nullfunc(hdr->frame_control))) {
 -		u8 tid;
 -
 -		tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
 +		u8 tid = ieee80211_get_tid(hdr);
  
  		ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
  	}
@@@ -2353,17 -2351,39 +2353,17 @@@ ieee80211_deliver_skb(struct ieee80211_
  }
  
  static ieee80211_rx_result debug_noinline
 -ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
 +__ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
  {
  	struct net_device *dev = rx->sdata->dev;
  	struct sk_buff *skb = rx->skb;
  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  	__le16 fc = hdr->frame_control;
  	struct sk_buff_head frame_list;
 -	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
  	struct ethhdr ethhdr;
  	const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
  
 -	if (unlikely(!ieee80211_is_data(fc)))
 -		return RX_CONTINUE;
 -
 -	if (unlikely(!ieee80211_is_data_present(fc)))
 -		return RX_DROP_MONITOR;
 -
 -	if (!(status->rx_flags & IEEE80211_RX_AMSDU))
 -		return RX_CONTINUE;
 -
  	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
 -		switch (rx->sdata->vif.type) {
 -		case NL80211_IFTYPE_AP_VLAN:
 -			if (!rx->sdata->u.vlan.sta)
 -				return RX_DROP_UNUSABLE;
 -			break;
 -		case NL80211_IFTYPE_STATION:
 -			if (!rx->sdata->u.mgd.use_4addr)
 -				return RX_DROP_UNUSABLE;
 -			break;
 -		default:
 -			return RX_DROP_UNUSABLE;
 -		}
  		check_da = NULL;
  		check_sa = NULL;
  	} else switch (rx->sdata->vif.type) {
@@@ -2383,13 -2403,15 +2383,13 @@@
  			break;
  	}
  
 -	if (is_multicast_ether_addr(hdr->addr1))
 -		return RX_DROP_UNUSABLE;
 -
  	skb->dev = dev;
  	__skb_queue_head_init(&frame_list);
  
  	if (ieee80211_data_to_8023_exthdr(skb, &ethhdr,
  					  rx->sdata->vif.addr,
 -					  rx->sdata->vif.type))
 +					  rx->sdata->vif.type,
 +					  data_offset))
  		return RX_DROP_UNUSABLE;
  
  	ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
@@@ -2411,44 -2433,6 +2411,44 @@@
  	return RX_QUEUED;
  }
  
 +static ieee80211_rx_result debug_noinline
 +ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
 +{
 +	struct sk_buff *skb = rx->skb;
 +	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 +	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 +	__le16 fc = hdr->frame_control;
 +
 +	if (!(status->rx_flags & IEEE80211_RX_AMSDU))
 +		return RX_CONTINUE;
 +
 +	if (unlikely(!ieee80211_is_data(fc)))
 +		return RX_CONTINUE;
 +
 +	if (unlikely(!ieee80211_is_data_present(fc)))
 +		return RX_DROP_MONITOR;
 +
 +	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
 +		switch (rx->sdata->vif.type) {
 +		case NL80211_IFTYPE_AP_VLAN:
 +			if (!rx->sdata->u.vlan.sta)
 +				return RX_DROP_UNUSABLE;
 +			break;
 +		case NL80211_IFTYPE_STATION:
 +			if (!rx->sdata->u.mgd.use_4addr)
 +				return RX_DROP_UNUSABLE;
 +			break;
 +		default:
 +			return RX_DROP_UNUSABLE;
 +		}
 +	}
 +
 +	if (is_multicast_ether_addr(hdr->addr1))
 +		return RX_DROP_UNUSABLE;
 +
 +	return __ieee80211_rx_h_amsdu(rx, 0);
 +}
 +
  #ifdef CONFIG_MAC80211_MESH
  static ieee80211_rx_result
  ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
@@@ -2864,7 -2848,6 +2864,7 @@@ ieee80211_rx_h_action(struct ieee80211_
  		case WLAN_HT_ACTION_SMPS: {
  			struct ieee80211_supported_band *sband;
  			enum ieee80211_smps_mode smps_mode;
 +			struct sta_opmode_info sta_opmode = {};
  
  			/* convert to HT capability */
  			switch (mgmt->u.action.u.ht_smps.smps_control) {
@@@ -2885,24 -2868,17 +2885,24 @@@
  			if (rx->sta->sta.smps_mode == smps_mode)
  				goto handled;
  			rx->sta->sta.smps_mode = smps_mode;
 +			sta_opmode.smps_mode = smps_mode;
 +			sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
  
  			sband = rx->local->hw.wiphy->bands[status->band];
  
  			rate_control_rate_update(local, sband, rx->sta,
  						 IEEE80211_RC_SMPS_CHANGED);
 +			cfg80211_sta_opmode_change_notify(sdata->dev,
 +							  rx->sta->addr,
 +							  &sta_opmode,
 +							  GFP_KERNEL);
  			goto handled;
  		}
  		case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
  			struct ieee80211_supported_band *sband;
  			u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
  			enum ieee80211_sta_rx_bandwidth max_bw, new_bw;
 +			struct sta_opmode_info sta_opmode = {};
  
  			/* If it doesn't support 40 MHz it can't change ... */
  			if (!(rx->sta->sta.ht_cap.cap &
@@@ -2923,15 -2899,9 +2923,15 @@@
  
  			rx->sta->sta.bandwidth = new_bw;
  			sband = rx->local->hw.wiphy->bands[status->band];
 +			sta_opmode.bw = new_bw;
 +			sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED;
  
  			rate_control_rate_update(local, sband, rx->sta,
  						 IEEE80211_RC_BW_CHANGED);
 +			cfg80211_sta_opmode_change_notify(sdata->dev,
 +							  rx->sta->addr,
 +							  &sta_opmode,
 +							  GFP_KERNEL);
  			goto handled;
  		}
  		default:
@@@ -3761,6 -3731,15 +3761,6 @@@ void ieee80211_check_fast_rx(struct sta
  
  	switch (sdata->vif.type) {
  	case NL80211_IFTYPE_STATION:
 -		/* 4-addr is harder to deal with, later maybe */
 -		if (sdata->u.mgd.use_4addr)
 -			goto clear;
 -		/* software powersave is a huge mess, avoid all of it */
 -		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
 -			goto clear;
 -		if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
 -		    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
 -			goto clear;
  		if (sta->sta.tdls) {
  			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
  			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
@@@ -3772,23 -3751,6 +3772,23 @@@
  			fastrx.expected_ds_bits =
  				cpu_to_le16(IEEE80211_FCTL_FROMDS);
  		}
 +
 +		if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
 +			fastrx.expected_ds_bits |=
 +				cpu_to_le16(IEEE80211_FCTL_TODS);
 +			fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
 +			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
 +		}
 +
 +		if (!sdata->u.mgd.powersave)
 +			break;
 +
 +		/* software powersave is a huge mess, avoid all of it */
 +		if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
 +			goto clear;
 +		if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
 +		    !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
 +			goto clear;
  		break;
  	case NL80211_IFTYPE_AP_VLAN:
  	case NL80211_IFTYPE_AP:
@@@ -3805,15 -3767,6 +3805,15 @@@
  			!(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
  			(sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
  			 !sdata->u.vlan.sta);
 +
 +		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
 +		    sdata->u.vlan.sta) {
 +			fastrx.expected_ds_bits |=
 +				cpu_to_le16(IEEE80211_FCTL_FROMDS);
 +			fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
 +			fastrx.internal_forward = 0;
 +		}
 +
  		break;
  	default:
  		goto clear;
@@@ -3912,8 -3865,7 +3912,8 @@@ static bool ieee80211_invoke_fast_rx(st
  	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
  	struct sta_info *sta = rx->sta;
  	int orig_len = skb->len;
 -	int snap_offs = ieee80211_hdrlen(hdr->frame_control);
 +	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 +	int snap_offs = hdrlen;
  	struct {
  		u8 snap[sizeof(rfc1042_header)];
  		__be16 proto;
@@@ -3944,6 -3896,10 +3944,6 @@@
  	    (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
  		return false;
  
 -	/* we don't deal with A-MSDU deaggregation here */
 -	if (status->rx_flags & IEEE80211_RX_AMSDU)
 -		return false;
 -
  	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
  		return false;
  
@@@ -3965,7 -3921,7 +3965,7 @@@
  	if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
  					      IEEE80211_FCTL_TODS)) !=
  	    fast_rx->expected_ds_bits)
- 		goto drop;
+ 		return false;
  
  	/* assign the key to drop unencrypted frames (later)
  	 * and strip the IV/MIC if necessary
@@@ -3975,24 -3931,21 +3975,24 @@@
  		snap_offs += IEEE80211_CCMP_HDR_LEN;
  	}
  
 -	if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
 -		goto drop;
 -	payload = (void *)(skb->data + snap_offs);
 +	if (!(status->rx_flags & IEEE80211_RX_AMSDU)) {
 +		if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
 +			goto drop;
  
 -	if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
 -		return false;
 +		payload = (void *)(skb->data + snap_offs);
  
 -	/* Don't handle these here since they require special code.
 -	 * Accept AARP and IPX even though they should come with a
 -	 * bridge-tunnel header - but if we get them this way then
 -	 * there's little point in discarding them.
 -	 */
 -	if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
 -		     payload->proto == fast_rx->control_port_protocol))
 -		return false;
 +		if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
 +			return false;
 +
 +		/* Don't handle these here since they require special code.
 +		 * Accept AARP and IPX even though they should come with a
 +		 * bridge-tunnel header - but if we get them this way then
 +		 * there's little point in discarding them.
 +		 */
 +		if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
 +			     payload->proto == fast_rx->control_port_protocol))
 +			return false;
 +	}
  
  	/* after this point, don't punt to the slowpath! */
  
@@@ -4006,6 -3959,12 +4006,6 @@@
  	}
  
  	/* statistics part of ieee80211_rx_h_sta_process() */
 -	stats->last_rx = jiffies;
 -	stats->last_rate = sta_stats_encode_rate(status);
 -
 -	stats->fragments++;
 -	stats->packets++;
 -
  	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
  		stats->last_signal = status->signal;
  		if (!fast_rx->uses_rss)
@@@ -4034,20 -3993,6 +4034,20 @@@
  	if (rx->key && !ieee80211_has_protected(hdr->frame_control))
  		goto drop;
  
 +	if (status->rx_flags & IEEE80211_RX_AMSDU) {
 +		if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
 +		    RX_QUEUED)
 +			goto drop;
 +
 +		return true;
 +	}
 +
 +	stats->last_rx = jiffies;
 +	stats->last_rate = sta_stats_encode_rate(status);
 +
 +	stats->fragments++;
 +	stats->packets++;
 +
  	/* do the header conversion - first grab the addresses */
  	ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
  	ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
diff --combined net/mac80211/tx.c
index 7643178ef132,69722504e3e1..933c67b5f845
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@@ -797,6 -797,7 +797,6 @@@ ieee80211_tx_h_sequence(struct ieee8021
  {
  	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
  	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
 -	u8 *qc;
  	int tid;
  
  	/*
@@@ -843,7 -844,9 +843,7 @@@
  		return TX_CONTINUE;
  
  	/* include per-STA, per-TID sequence counter */
 -
 -	qc = ieee80211_get_qos_ctl(hdr);
 -	tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
 +	tid = ieee80211_get_tid(hdr);
  	tx->sta->tx_stats.msdu[tid]++;
  
  	hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
@@@ -1155,6 -1158,7 +1155,6 @@@ ieee80211_tx_prepare(struct ieee80211_s
  	struct ieee80211_hdr *hdr;
  	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  	int tid;
 -	u8 *qc;
  
  	memset(tx, 0, sizeof(*tx));
  	tx->skb = skb;
@@@ -1194,7 -1198,8 +1194,7 @@@
  	    !ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW)) {
  		struct tid_ampdu_tx *tid_tx;
  
 -		qc = ieee80211_get_qos_ctl(hdr);
 -		tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
 +		tid = ieee80211_get_tid(hdr);
  
  		tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
  		if (tid_tx) {
@@@ -1916,7 -1921,7 +1916,7 @@@ void ieee80211_xmit(struct ieee80211_su
  {
  	struct ieee80211_local *local = sdata->local;
  	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 -	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
 +	struct ieee80211_hdr *hdr;
  	int headroom;
  	bool may_encrypt;
  
@@@ -3569,6 -3574,14 +3569,14 @@@ void __ieee80211_subif_start_xmit(struc
  	if (!IS_ERR_OR_NULL(sta)) {
  		struct ieee80211_fast_tx *fast_tx;
  
+ 		/* We need a bit of data queued to build aggregates properly, so
+ 		 * instruct the TCP stack to allow more than a single ms of data
+ 		 * to be queued in the stack. The value is a bit-shift of 1
+ 		 * second, so 8 is ~4ms of queued data. Only affects local TCP
+ 		 * sockets.
+ 		 */
+ 		sk_pacing_shift_update(skb->sk, 8);
+ 
  		fast_tx = rcu_dereference(sta->fast_tx);
  
  		if (fast_tx &&
diff --combined net/smc/af_smc.c
index 26684e086750,8cc97834d4f6..2c6f4e0a9f3d
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@@ -7,6 -7,7 +7,6 @@@
   *  applicable with RoCE-cards only
   *
   *  Initial restrictions:
 - *    - non-blocking connect postponed
   *    - IPv6 support postponed
   *    - support for alternate links postponed
   *    - partial support for non-blocking sockets only
@@@ -23,6 -24,7 +23,6 @@@
  
  #include <linux/module.h>
  #include <linux/socket.h>
 -#include <linux/inetdevice.h>
  #include <linux/workqueue.h>
  #include <linux/in.h>
  #include <linux/sched/signal.h>
@@@ -271,7 -273,47 +271,7 @@@ static void smc_copy_sock_settings_to_s
  	smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
  }
  
 -/* determine subnet and mask of internal TCP socket */
 -int smc_netinfo_by_tcpsk(struct socket *clcsock,
 -			 __be32 *subnet, u8 *prefix_len)
 -{
 -	struct dst_entry *dst = sk_dst_get(clcsock->sk);
 -	struct in_device *in_dev;
 -	struct sockaddr_in addr;
 -	int rc = -ENOENT;
 -	int len;
 -
 -	if (!dst) {
 -		rc = -ENOTCONN;
 -		goto out;
 -	}
 -	if (!dst->dev) {
 -		rc = -ENODEV;
 -		goto out_rel;
 -	}
 -
 -	/* get address to which the internal TCP socket is bound */
 -	kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len);
 -	/* analyze IPv4 specific data of net_device belonging to TCP socket */
 -	rcu_read_lock();
 -	in_dev = __in_dev_get_rcu(dst->dev);
 -	for_ifa(in_dev) {
 -		if (!inet_ifa_match(addr.sin_addr.s_addr, ifa))
 -			continue;
 -		*prefix_len = inet_mask_len(ifa->ifa_mask);
 -		*subnet = ifa->ifa_address & ifa->ifa_mask;
 -		rc = 0;
 -		break;
 -	} endfor_ifa(in_dev);
 -	rcu_read_unlock();
 -
 -out_rel:
 -	dst_release(dst);
 -out:
 -	return rc;
 -}
 -
 -static int smc_clnt_conf_first_link(struct smc_sock *smc, union ib_gid *gid)
 +static int smc_clnt_conf_first_link(struct smc_sock *smc)
  {
  	struct smc_link_group *lgr = smc->conn.lgr;
  	struct smc_link *link;
@@@ -291,9 -333,6 +291,9 @@@
  		return rc;
  	}
  
 +	if (link->llc_confirm_rc)
 +		return SMC_CLC_DECL_RMBE_EC;
 +
  	rc = smc_ib_modify_qp_rts(link);
  	if (rc)
  		return SMC_CLC_DECL_INTERR;
@@@ -308,33 -347,11 +308,33 @@@
  	/* send CONFIRM LINK response over RoCE fabric */
  	rc = smc_llc_send_confirm_link(link,
  				       link->smcibdev->mac[link->ibport - 1],
 -				       gid, SMC_LLC_RESP);
 +				       &link->smcibdev->gid[link->ibport - 1],
 +				       SMC_LLC_RESP);
  	if (rc < 0)
  		return SMC_CLC_DECL_TCL;
  
 -	return rc;
 +	/* receive ADD LINK request from server over RoCE fabric */
 +	rest = wait_for_completion_interruptible_timeout(&link->llc_add,
 +							 SMC_LLC_WAIT_TIME);
 +	if (rest <= 0) {
 +		struct smc_clc_msg_decline dclc;
 +
 +		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
 +				      SMC_CLC_DECLINE);
 +		return rc;
 +	}
 +
 +	/* send add link reject message, only one link supported for now */
 +	rc = smc_llc_send_add_link(link,
 +				   link->smcibdev->mac[link->ibport - 1],
 +				   &link->smcibdev->gid[link->ibport - 1],
 +				   SMC_LLC_RESP);
 +	if (rc < 0)
 +		return SMC_CLC_DECL_TCL;
 +
 +	link->state = SMC_LNK_ACTIVE;
 +
 +	return 0;
  }
  
  static void smc_conn_save_peer_info(struct smc_sock *smc,
@@@ -356,9 -373,19 +356,9 @@@ static void smc_link_save_peer_info(str
  	link->peer_mtu = clc->qp_mtu;
  }
  
 -static void smc_lgr_forget(struct smc_link_group *lgr)
 -{
 -	spin_lock_bh(&smc_lgr_list.lock);
 -	/* do not use this link group for new connections */
 -	if (!list_empty(&lgr->list))
 -		list_del_init(&lgr->list);
 -	spin_unlock_bh(&smc_lgr_list.lock);
 -}
 -
  /* setup for RDMA connection of client */
  static int smc_connect_rdma(struct smc_sock *smc)
  {
 -	struct sockaddr_in *inaddr = (struct sockaddr_in *)smc->addr;
  	struct smc_clc_msg_accept_confirm aclc;
  	int local_contact = SMC_FIRST_CONTACT;
  	struct smc_ib_device *smcibdev;
@@@ -412,8 -439,8 +412,8 @@@
  
  	srv_first_contact = aclc.hdr.flag;
  	mutex_lock(&smc_create_lgr_pending);
 -	local_contact = smc_conn_create(smc, inaddr->sin_addr.s_addr, smcibdev,
 -					ibport, &aclc.lcl, srv_first_contact);
 +	local_contact = smc_conn_create(smc, smcibdev, ibport, &aclc.lcl,
 +					srv_first_contact);
  	if (local_contact < 0) {
  		rc = local_contact;
  		if (rc == -ENOMEM)
@@@ -472,7 -499,8 +472,7 @@@
  
  	if (local_contact == SMC_FIRST_CONTACT) {
  		/* QP confirmation over RoCE fabric */
 -		reason_code = smc_clnt_conf_first_link(
 -			smc, &smcibdev->gid[ibport - 1]);
 +		reason_code = smc_clnt_conf_first_link(smc);
  		if (reason_code < 0) {
  			rc = reason_code;
  			goto out_err_unlock;
@@@ -531,6 -559,7 +531,6 @@@ static int smc_connect(struct socket *s
  		goto out_err;
  	if (addr->sa_family != AF_INET)
  		goto out_err;
 -	smc->addr = addr;	/* needed for nonblocking connect */
  
  	lock_sock(sk);
  	switch (sk->sk_state) {
@@@ -720,34 -749,9 +720,34 @@@ static int smc_serv_conf_first_link(str
  
  		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
  				      SMC_CLC_DECLINE);
 +		return rc;
  	}
  
 -	return rc;
 +	if (link->llc_confirm_resp_rc)
 +		return SMC_CLC_DECL_RMBE_EC;
 +
 +	/* send ADD LINK request to client over the RoCE fabric */
 +	rc = smc_llc_send_add_link(link,
 +				   link->smcibdev->mac[link->ibport - 1],
 +				   &link->smcibdev->gid[link->ibport - 1],
 +				   SMC_LLC_REQ);
 +	if (rc < 0)
 +		return SMC_CLC_DECL_TCL;
 +
 +	/* receive ADD LINK response from client over the RoCE fabric */
 +	rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
 +							 SMC_LLC_WAIT_TIME);
 +	if (rest <= 0) {
 +		struct smc_clc_msg_decline dclc;
 +
 +		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
 +				      SMC_CLC_DECLINE);
 +		return rc;
 +	}
 +
 +	link->state = SMC_LNK_ACTIVE;
 +
 +	return 0;
  }
  
  /* setup for RDMA connection of server */
@@@ -763,10 -767,11 +763,10 @@@ static void smc_listen_work(struct work
  	struct sock *newsmcsk = &new_smc->sk;
  	struct smc_clc_msg_proposal *pclc;
  	struct smc_ib_device *smcibdev;
 -	struct sockaddr_in peeraddr;
  	u8 buf[SMC_CLC_MAX_LEN];
  	struct smc_link *link;
  	int reason_code = 0;
 -	int rc = 0, len;
 +	int rc = 0;
  	__be32 subnet;
  	u8 prefix_len;
  	u8 ibport;
@@@ -804,7 -809,7 +804,7 @@@
  	}
  
  	/* determine subnet and mask from internal TCP socket */
 -	rc = smc_netinfo_by_tcpsk(newclcsock, &subnet, &prefix_len);
 +	rc = smc_clc_netinfo_by_tcpsk(newclcsock, &subnet, &prefix_len);
  	if (rc) {
  		reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
  		goto decline_rdma;
@@@ -818,10 -823,13 +818,10 @@@
  		goto decline_rdma;
  	}
  
 -	/* get address of the peer connected to the internal TCP socket */
 -	kernel_getpeername(newclcsock, (struct sockaddr *)&peeraddr, &len);
 -
  	/* allocate connection / link group */
  	mutex_lock(&smc_create_lgr_pending);
 -	local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr,
 -					smcibdev, ibport, &pclc->lcl, 0);
 +	local_contact = smc_conn_create(new_smc, smcibdev, ibport, &pclc->lcl,
 +					0);
  	if (local_contact < 0) {
  		rc = local_contact;
  		if (rc == -ENOMEM)
@@@ -1067,7 -1075,7 +1067,7 @@@ out
  }
  
  static int smc_getname(struct socket *sock, struct sockaddr *addr,
 -		       int *len, int peer)
 +		       int peer)
  {
  	struct smc_sock *smc;
  
@@@ -1077,7 -1085,7 +1077,7 @@@
  
  	smc = smc_sk(sock->sk);
  
 -	return smc->clcsock->ops->getname(smc->clcsock, addr, len, peer);
 +	return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
  }
  
  static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
@@@ -1398,8 -1406,10 +1398,10 @@@ static int smc_create(struct net *net, 
  	smc->use_fallback = false; /* assume rdma capability first */
  	rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
  			      IPPROTO_TCP, &smc->clcsock);
- 	if (rc)
+ 	if (rc) {
  		sk_common_release(sk);
+ 		goto out;
+ 	}
  	smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
  	smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
  
diff --combined net/smc/smc_core.c
index 702ce5f85e97,645dd226177b..f76f60e463cb
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@@ -144,7 -144,7 +144,7 @@@ free
  }
  
  /* create a new SMC link group */
 -static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
 +static int smc_lgr_create(struct smc_sock *smc,
  			  struct smc_ib_device *smcibdev, u8 ibport,
  			  char *peer_systemid, unsigned short vlan_id)
  {
@@@ -161,6 -161,7 +161,6 @@@
  	}
  	lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
  	lgr->sync_err = false;
 -	lgr->daddr = peer_in_addr;
  	memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
  	lgr->vlan_id = vlan_id;
  	rwlock_init(&lgr->sndbufs_lock);
@@@ -176,7 -177,7 +176,8 @@@
  
  	lnk = &lgr->lnk[SMC_SINGLE_LINK];
  	/* initialize link */
 +	lnk->state = SMC_LNK_ACTIVATING;
+ 	lnk->link_id = SMC_SINGLE_LINK;
  	lnk->smcibdev = smcibdev;
  	lnk->ibport = ibport;
  	lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
@@@ -198,8 -199,6 +199,8 @@@
  		goto destroy_qp;
  	init_completion(&lnk->llc_confirm);
  	init_completion(&lnk->llc_confirm_resp);
 +	init_completion(&lnk->llc_add);
 +	init_completion(&lnk->llc_add_resp);
  
  	smc->conn.lgr = lgr;
  	rwlock_init(&lgr->conns_lock);
@@@ -308,15 -307,6 +309,15 @@@ void smc_lgr_free(struct smc_link_grou
  	kfree(lgr);
  }
  
 +void smc_lgr_forget(struct smc_link_group *lgr)
 +{
 +	spin_lock_bh(&smc_lgr_list.lock);
 +	/* do not use this link group for new connections */
 +	if (!list_empty(&lgr->list))
 +		list_del_init(&lgr->list);
 +	spin_unlock_bh(&smc_lgr_list.lock);
 +}
 +
  /* terminate linkgroup abnormally */
  void smc_lgr_terminate(struct smc_link_group *lgr)
  {
@@@ -324,7 -314,15 +325,7 @@@
  	struct smc_sock *smc;
  	struct rb_node *node;
  
 -	spin_lock_bh(&smc_lgr_list.lock);
 -	if (list_empty(&lgr->list)) {
 -		/* termination already triggered */
 -		spin_unlock_bh(&smc_lgr_list.lock);
 -		return;
 -	}
 -	/* do not use this link group for new connections */
 -	list_del_init(&lgr->list);
 -	spin_unlock_bh(&smc_lgr_list.lock);
 +	smc_lgr_forget(lgr);
  
  	write_lock_bh(&lgr->conns_lock);
  	node = rb_first(&lgr->conns_all);
@@@ -403,7 -401,7 +404,7 @@@ static int smc_link_determine_gid(struc
  }
  
  /* create a new SMC connection (and a new link group if necessary) */
 -int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr,
 +int smc_conn_create(struct smc_sock *smc,
  		    struct smc_ib_device *smcibdev, u8 ibport,
  		    struct smc_clc_msg_local *lcl, int srv_first_contact)
  {
@@@ -460,7 -458,7 +461,7 @@@
  
  create:
  	if (local_contact == SMC_FIRST_CONTACT) {
 -		rc = smc_lgr_create(smc, peer_in_addr, smcibdev, ibport,
 +		rc = smc_lgr_create(smc, smcibdev, ibport,
  				    lcl->id_for_peer, vlan_id);
  		if (rc)
  			goto out;
@@@ -468,7 -466,7 +469,7 @@@
  		rc = smc_link_determine_gid(conn->lgr);
  	}
  	conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
- 	conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg);
+ 	conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
  #ifndef KERNEL_HAS_ATOMIC64
  	spin_lock_init(&conn->acurs_lock);
  #endif
@@@ -701,55 -699,27 +702,55 @@@ static inline int smc_rmb_reserve_rtoke
  	return -ENOSPC;
  }
  
 -/* save rkey and dma_addr received from peer during clc handshake */
 -int smc_rmb_rtoken_handling(struct smc_connection *conn,
 -			    struct smc_clc_msg_accept_confirm *clc)
 +/* add a new rtoken from peer */
 +int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
  {
 -	u64 dma_addr = be64_to_cpu(clc->rmb_dma_addr);
 -	struct smc_link_group *lgr = conn->lgr;
 -	u32 rkey = ntohl(clc->rmb_rkey);
 +	u64 dma_addr = be64_to_cpu(nw_vaddr);
 +	u32 rkey = ntohl(nw_rkey);
  	int i;
  
  	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
  		if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
  		    (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
  		    test_bit(i, lgr->rtokens_used_mask)) {
 -			conn->rtoken_idx = i;
 +			/* already in list */
 +			return i;
 +		}
 +	}
 +	i = smc_rmb_reserve_rtoken_idx(lgr);
 +	if (i < 0)
 +		return i;
 +	lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
 +	lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
 +	return i;
 +}
 +
 +/* delete an rtoken */
 +int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
 +{
 +	u32 rkey = ntohl(nw_rkey);
 +	int i;
 +
 +	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
 +		if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
 +		    test_bit(i, lgr->rtokens_used_mask)) {
 +			lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
 +			lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
 +
 +			clear_bit(i, lgr->rtokens_used_mask);
  			return 0;
  		}
  	}
 -	conn->rtoken_idx = smc_rmb_reserve_rtoken_idx(lgr);
 +	return -ENOENT;
 +}
 +
 +/* save rkey and dma_addr received from peer during clc handshake */
 +int smc_rmb_rtoken_handling(struct smc_connection *conn,
 +			    struct smc_clc_msg_accept_confirm *clc)
 +{
 +	conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
 +					  clc->rmb_rkey);
  	if (conn->rtoken_idx < 0)
  		return conn->rtoken_idx;
 -	lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey = rkey;
 -	lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr = dma_addr;
  	return 0;
  }
diff --combined net/smc/smc_llc.c
index 54e8d6dc9201,b4aa4fcedb96..ea4b21981b4b
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@@ -4,6 -4,9 +4,6 @@@
   *
   *  Link Layer Control (LLC)
   *
 - *  For now, we only support the necessary "confirm link" functionality
 - *  which happens for the first RoCE link after successful CLC handshake.
 - *
   *  Copyright IBM Corp. 2016
   *
   *  Author(s):  Klaus Wacker <Klaus.Wacker at de.ibm.com>
@@@ -18,122 -21,6 +18,122 @@@
  #include "smc_clc.h"
  #include "smc_llc.h"
  
 +#define SMC_LLC_DATA_LEN		40
 +
 +struct smc_llc_hdr {
 +	struct smc_wr_rx_hdr common;
 +	u8 length;	/* 44 */
 +#if defined(__BIG_ENDIAN_BITFIELD)
 +	u8 reserved:4,
 +	   add_link_rej_rsn:4;
 +#elif defined(__LITTLE_ENDIAN_BITFIELD)
 +	u8 add_link_rej_rsn:4,
 +	   reserved:4;
 +#endif
 +	u8 flags;
 +};
 +
 +#define SMC_LLC_FLAG_NO_RMBE_EYEC	0x03
 +
 +struct smc_llc_msg_confirm_link {	/* type 0x01 */
 +	struct smc_llc_hdr hd;
 +	u8 sender_mac[ETH_ALEN];
 +	u8 sender_gid[SMC_GID_SIZE];
 +	u8 sender_qp_num[3];
 +	u8 link_num;
 +	u8 link_uid[SMC_LGR_ID_SIZE];
 +	u8 max_links;
 +	u8 reserved[9];
 +};
 +
 +#define SMC_LLC_FLAG_ADD_LNK_REJ	0x40
 +#define SMC_LLC_REJ_RSN_NO_ALT_PATH	1
 +
 +#define SMC_LLC_ADD_LNK_MAX_LINKS	2
 +
 +struct smc_llc_msg_add_link {		/* type 0x02 */
 +	struct smc_llc_hdr hd;
 +	u8 sender_mac[ETH_ALEN];
 +	u8 reserved2[2];
 +	u8 sender_gid[SMC_GID_SIZE];
 +	u8 sender_qp_num[3];
 +	u8 link_num;
 +	u8 flags2;	/* QP mtu */
 +	u8 initial_psn[3];
 +	u8 reserved[8];
 +};
 +
 +#define SMC_LLC_FLAG_DEL_LINK_ALL	0x40
 +#define SMC_LLC_FLAG_DEL_LINK_ORDERLY	0x20
 +
 +struct smc_llc_msg_del_link {		/* type 0x04 */
 +	struct smc_llc_hdr hd;
 +	u8 link_num;
 +	__be32 reason;
 +	u8 reserved[35];
 +} __packed;			/* format defined in RFC7609 */
 +
 +struct smc_llc_msg_test_link {		/* type 0x07 */
 +	struct smc_llc_hdr hd;
 +	u8 user_data[16];
 +	u8 reserved[24];
 +};
 +
 +struct smc_rmb_rtoken {
 +	union {
 +		u8 num_rkeys;	/* first rtoken byte of CONFIRM LINK msg */
 +				/* is actually the num of rtokens, first */
 +				/* rtoken is always for the current link */
 +		u8 link_id;	/* link id of the rtoken */
 +	};
 +	__be32 rmb_key;
 +	__be64 rmb_vaddr;
 +} __packed;			/* format defined in RFC7609 */
 +
 +#define SMC_LLC_RKEYS_PER_MSG	3
 +
 +struct smc_llc_msg_confirm_rkey {	/* type 0x06 */
 +	struct smc_llc_hdr hd;
 +	struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
 +	u8 reserved;
 +};
 +
 +struct smc_llc_msg_confirm_rkey_cont {	/* type 0x08 */
 +	struct smc_llc_hdr hd;
 +	u8 num_rkeys;
 +	struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
 +};
 +
 +#define SMC_LLC_DEL_RKEY_MAX	8
 +#define SMC_LLC_FLAG_RKEY_NEG	0x20
 +
 +struct smc_llc_msg_delete_rkey {	/* type 0x09 */
 +	struct smc_llc_hdr hd;
 +	u8 num_rkeys;
 +	u8 err_mask;
 +	u8 reserved[2];
 +	__be32 rkey[8];
 +	u8 reserved2[4];
 +};
 +
 +union smc_llc_msg {
 +	struct smc_llc_msg_confirm_link confirm_link;
 +	struct smc_llc_msg_add_link add_link;
 +	struct smc_llc_msg_del_link delete_link;
 +
 +	struct smc_llc_msg_confirm_rkey confirm_rkey;
 +	struct smc_llc_msg_confirm_rkey_cont confirm_rkey_cont;
 +	struct smc_llc_msg_delete_rkey delete_rkey;
 +
 +	struct smc_llc_msg_test_link test_link;
 +	struct {
 +		struct smc_llc_hdr hdr;
 +		u8 data[SMC_LLC_DATA_LEN];
 +	} raw;
 +};
 +
 +#define SMC_LLC_FLAG_RESP		0x80
 +
  /********************************** send *************************************/
  
  struct smc_llc_tx_pend {
@@@ -200,112 -87,14 +200,112 @@@ int smc_llc_send_confirm_link(struct sm
  	memset(confllc, 0, sizeof(*confllc));
  	confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
  	confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
 +	confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
  	if (reqresp == SMC_LLC_RESP)
  		confllc->hd.flags |= SMC_LLC_FLAG_RESP;
  	memcpy(confllc->sender_mac, mac, ETH_ALEN);
  	memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
  	hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
- 	/* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */
+ 	confllc->link_num = link->link_id;
  	memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
 -	confllc->max_links = SMC_LINKS_PER_LGR_MAX;
 +	confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; /* enforce peer resp. */
 +	/* send llc message */
 +	rc = smc_wr_tx_send(link, pend);
 +	return rc;
 +}
 +
 +/* send ADD LINK request or response */
 +int smc_llc_send_add_link(struct smc_link *link, u8 mac[],
 +			  union ib_gid *gid,
 +			  enum smc_llc_reqresp reqresp)
 +{
 +	struct smc_llc_msg_add_link *addllc;
 +	struct smc_wr_tx_pend_priv *pend;
 +	struct smc_wr_buf *wr_buf;
 +	int rc;
 +
 +	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
 +	if (rc)
 +		return rc;
 +	addllc = (struct smc_llc_msg_add_link *)wr_buf;
 +	memset(addllc, 0, sizeof(*addllc));
 +	addllc->hd.common.type = SMC_LLC_ADD_LINK;
 +	addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
 +	if (reqresp == SMC_LLC_RESP) {
 +		addllc->hd.flags |= SMC_LLC_FLAG_RESP;
 +		/* always reject more links for now */
 +		addllc->hd.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
 +		addllc->hd.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
 +	}
 +	memcpy(addllc->sender_mac, mac, ETH_ALEN);
 +	memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
 +	/* send llc message */
 +	rc = smc_wr_tx_send(link, pend);
 +	return rc;
 +}
 +
 +/* send DELETE LINK request or response */
 +int smc_llc_send_delete_link(struct smc_link *link,
 +			     enum smc_llc_reqresp reqresp)
 +{
 +	struct smc_llc_msg_del_link *delllc;
 +	struct smc_wr_tx_pend_priv *pend;
 +	struct smc_wr_buf *wr_buf;
 +	int rc;
 +
 +	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
 +	if (rc)
 +		return rc;
 +	delllc = (struct smc_llc_msg_del_link *)wr_buf;
 +	memset(delllc, 0, sizeof(*delllc));
 +	delllc->hd.common.type = SMC_LLC_DELETE_LINK;
 +	delllc->hd.length = sizeof(struct smc_llc_msg_add_link);
 +	if (reqresp == SMC_LLC_RESP)
 +		delllc->hd.flags |= SMC_LLC_FLAG_RESP;
 +	/* DEL_LINK_ALL because only 1 link supported */
 +	delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
 +	delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
 +	delllc->link_num = link->link_id;
 +	/* send llc message */
 +	rc = smc_wr_tx_send(link, pend);
 +	return rc;
 +}
 +
 +/* send LLC test link request or response */
 +int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16],
 +			   enum smc_llc_reqresp reqresp)
 +{
 +	struct smc_llc_msg_test_link *testllc;
 +	struct smc_wr_tx_pend_priv *pend;
 +	struct smc_wr_buf *wr_buf;
 +	int rc;
 +
 +	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
 +	if (rc)
 +		return rc;
 +	testllc = (struct smc_llc_msg_test_link *)wr_buf;
 +	memset(testllc, 0, sizeof(*testllc));
 +	testllc->hd.common.type = SMC_LLC_TEST_LINK;
 +	testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
 +	if (reqresp == SMC_LLC_RESP)
 +		testllc->hd.flags |= SMC_LLC_FLAG_RESP;
 +	memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
 +	/* send llc message */
 +	rc = smc_wr_tx_send(link, pend);
 +	return rc;
 +}
 +
 +/* send a prepared message */
 +static int smc_llc_send_message(struct smc_link *link, void *llcbuf, int llclen)
 +{
 +	struct smc_wr_tx_pend_priv *pend;
 +	struct smc_wr_buf *wr_buf;
 +	int rc;
 +
 +	rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
 +	if (rc)
 +		return rc;
 +	memcpy(wr_buf, llcbuf, llclen);
  	/* send llc message */
  	rc = smc_wr_tx_send(link, pend);
  	return rc;
@@@ -317,156 -106,19 +317,156 @@@ static void smc_llc_rx_confirm_link(str
  				    struct smc_llc_msg_confirm_link *llc)
  {
  	struct smc_link_group *lgr;
 +	int conf_rc;
  
  	lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
 +
 +	/* RMBE eyecatchers are not supported */
 +	if (llc->hd.flags & SMC_LLC_FLAG_NO_RMBE_EYEC)
 +		conf_rc = 0;
 +	else
 +		conf_rc = ENOTSUPP;
 +
  	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
 -		if (lgr->role == SMC_SERV)
 +		if (lgr->role == SMC_SERV &&
 +		    link->state == SMC_LNK_ACTIVATING) {
 +			link->llc_confirm_resp_rc = conf_rc;
  			complete(&link->llc_confirm_resp);
 +		}
  	} else {
 -		if (lgr->role == SMC_CLNT) {
 +		if (lgr->role == SMC_CLNT &&
 +		    link->state == SMC_LNK_ACTIVATING) {
 +			link->llc_confirm_rc = conf_rc;
  			link->link_id = llc->link_num;
  			complete(&link->llc_confirm);
  		}
  	}
  }
  
 +static void smc_llc_rx_add_link(struct smc_link *link,
 +				struct smc_llc_msg_add_link *llc)
 +{
 +	struct smc_link_group *lgr = container_of(link, struct smc_link_group,
 +						  lnk[SMC_SINGLE_LINK]);
 +
 +	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
 +		if (link->state == SMC_LNK_ACTIVATING)
 +			complete(&link->llc_add_resp);
 +	} else {
 +		if (link->state == SMC_LNK_ACTIVATING) {
 +			complete(&link->llc_add);
 +			return;
 +		}
 +
 +		if (lgr->role == SMC_SERV) {
 +			smc_llc_send_add_link(link,
 +					link->smcibdev->mac[link->ibport - 1],
 +					&link->smcibdev->gid[link->ibport - 1],
 +					SMC_LLC_REQ);
 +
 +		} else {
 +			smc_llc_send_add_link(link,
 +					link->smcibdev->mac[link->ibport - 1],
 +					&link->smcibdev->gid[link->ibport - 1],
 +					SMC_LLC_RESP);
 +		}
 +	}
 +}
 +
 +static void smc_llc_rx_delete_link(struct smc_link *link,
 +				   struct smc_llc_msg_del_link *llc)
 +{
 +	struct smc_link_group *lgr = container_of(link, struct smc_link_group,
 +						  lnk[SMC_SINGLE_LINK]);
 +
 +	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
 +		if (lgr->role == SMC_SERV)
 +			smc_lgr_terminate(lgr);
 +	} else {
 +		if (lgr->role == SMC_SERV) {
 +			smc_lgr_forget(lgr);
 +			smc_llc_send_delete_link(link, SMC_LLC_REQ);
 +		} else {
 +			smc_llc_send_delete_link(link, SMC_LLC_RESP);
 +			smc_lgr_terminate(lgr);
 +		}
 +	}
 +}
 +
 +static void smc_llc_rx_test_link(struct smc_link *link,
 +				 struct smc_llc_msg_test_link *llc)
 +{
 +	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
 +		/* unused as long as we don't send this type of msg */
 +	} else {
 +		smc_llc_send_test_link(link, llc->user_data, SMC_LLC_RESP);
 +	}
 +}
 +
 +static void smc_llc_rx_confirm_rkey(struct smc_link *link,
 +				    struct smc_llc_msg_confirm_rkey *llc)
 +{
 +	struct smc_link_group *lgr;
 +	int rc;
 +
 +	lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
 +
 +	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
 +		/* unused as long as we don't send this type of msg */
 +	} else {
 +		rc = smc_rtoken_add(lgr,
 +				    llc->rtoken[0].rmb_vaddr,
 +				    llc->rtoken[0].rmb_key);
 +
 +		/* ignore rtokens for other links, we have only one link */
 +
 +		llc->hd.flags |= SMC_LLC_FLAG_RESP;
 +		if (rc < 0)
 +			llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
 +		smc_llc_send_message(link, (void *)llc, sizeof(*llc));
 +	}
 +}
 +
 +static void smc_llc_rx_confirm_rkey_cont(struct smc_link *link,
 +				      struct smc_llc_msg_confirm_rkey_cont *llc)
 +{
 +	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
 +		/* unused as long as we don't send this type of msg */
 +	} else {
 +		/* ignore rtokens for other links, we have only one link */
 +		llc->hd.flags |= SMC_LLC_FLAG_RESP;
 +		smc_llc_send_message(link, (void *)llc, sizeof(*llc));
 +	}
 +}
 +
 +static void smc_llc_rx_delete_rkey(struct smc_link *link,
 +				   struct smc_llc_msg_delete_rkey *llc)
 +{
 +	struct smc_link_group *lgr;
 +	u8 err_mask = 0;
 +	int i, max;
 +
 +	lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
 +
 +	if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
 +		/* unused as long as we don't send this type of msg */
 +	} else {
 +		max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
 +		for (i = 0; i < max; i++) {
 +			if (smc_rtoken_delete(lgr, llc->rkey[i]))
 +				err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
 +		}
 +
 +		if (err_mask) {
 +			llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
 +			llc->err_mask = err_mask;
 +		}
 +
 +		llc->hd.flags |= SMC_LLC_FLAG_RESP;
 +		smc_llc_send_message(link, (void *)llc, sizeof(*llc));
 +	}
 +}
 +
  static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
  {
  	struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
@@@ -476,30 -128,8 +476,30 @@@
  		return; /* short message */
  	if (llc->raw.hdr.length != sizeof(*llc))
  		return; /* invalid message */
 -	if (llc->raw.hdr.common.type == SMC_LLC_CONFIRM_LINK)
 +
 +	switch (llc->raw.hdr.common.type) {
 +	case SMC_LLC_TEST_LINK:
 +		smc_llc_rx_test_link(link, &llc->test_link);
 +		break;
 +	case SMC_LLC_CONFIRM_LINK:
  		smc_llc_rx_confirm_link(link, &llc->confirm_link);
 +		break;
 +	case SMC_LLC_ADD_LINK:
 +		smc_llc_rx_add_link(link, &llc->add_link);
 +		break;
 +	case SMC_LLC_DELETE_LINK:
 +		smc_llc_rx_delete_link(link, &llc->delete_link);
 +		break;
 +	case SMC_LLC_CONFIRM_RKEY:
 +		smc_llc_rx_confirm_rkey(link, &llc->confirm_rkey);
 +		break;
 +	case SMC_LLC_CONFIRM_RKEY_CONT:
 +		smc_llc_rx_confirm_rkey_cont(link, &llc->confirm_rkey_cont);
 +		break;
 +	case SMC_LLC_DELETE_RKEY:
 +		smc_llc_rx_delete_rkey(link, &llc->delete_rkey);
 +		break;
 +	}
  }
  
  /***************************** init, exit, misc ******************************/
@@@ -510,30 -140,6 +510,30 @@@ static struct smc_wr_rx_handler smc_llc
  		.type		= SMC_LLC_CONFIRM_LINK
  	},
  	{
 +		.handler	= smc_llc_rx_handler,
 +		.type		= SMC_LLC_TEST_LINK
 +	},
 +	{
 +		.handler	= smc_llc_rx_handler,
 +		.type		= SMC_LLC_ADD_LINK
 +	},
 +	{
 +		.handler	= smc_llc_rx_handler,
 +		.type		= SMC_LLC_DELETE_LINK
 +	},
 +	{
 +		.handler	= smc_llc_rx_handler,
 +		.type		= SMC_LLC_CONFIRM_RKEY
 +	},
 +	{
 +		.handler	= smc_llc_rx_handler,
 +		.type		= SMC_LLC_CONFIRM_RKEY_CONT
 +	},
 +	{
 +		.handler	= smc_llc_rx_handler,
 +		.type		= SMC_LLC_DELETE_RKEY
 +	},
 +	{
  		.handler	= NULL,
  	}
  };
diff --combined net/tipc/group.c
index 03086ccb7746,04e516d18054..d7a7befeddd4
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@@ -37,7 -37,7 +37,7 @@@
  #include "addr.h"
  #include "group.h"
  #include "bcast.h"
 -#include "server.h"
 +#include "topsrv.h"
  #include "msg.h"
  #include "socket.h"
  #include "node.h"
@@@ -189,6 -189,7 +189,7 @@@ struct tipc_group *tipc_group_create(st
  	grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
  	grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
  	grp->open = group_is_open;
+ 	*grp->open = false;
  	filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
  	if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
  				    filter, &grp->subid))
diff --combined net/tipc/socket.c
index f93477187a90,7dfa9fc99ec3..8b04e601311c
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@@ -473,6 -473,7 +473,7 @@@ static int tipc_sk_create(struct net *n
  	sk->sk_write_space = tipc_write_space;
  	sk->sk_destruct = tipc_sock_destruct;
  	tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+ 	tsk->group_is_open = true;
  	atomic_set(&tsk->dupl_rcvcnt, 0);
  
  	/* Start out with safe limits until we receive an advertised window */
@@@ -665,7 -666,7 +666,7 @@@ exit
   *       a completely predictable manner).
   */
  static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 -			int *uaddr_len, int peer)
 +			int peer)
  {
  	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
  	struct sock *sk = sock->sk;
@@@ -684,12 -685,13 +685,12 @@@
  		addr->addr.id.node = tn->own_addr;
  	}
  
 -	*uaddr_len = sizeof(*addr);
  	addr->addrtype = TIPC_ADDR_ID;
  	addr->family = AF_TIPC;
  	addr->scope = 0;
  	addr->addr.name.domain = 0;
  
 -	return 0;
 +	return sizeof(*addr);
  }
  
  /**
diff --combined tools/testing/selftests/bpf/test_verifier.c
index 9eb05f3135ac,437c0b1c9d21..86d7ff491b6f
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@@ -24,6 -24,7 +24,6 @@@
  #include <limits.h>
  
  #include <sys/capability.h>
 -#include <sys/resource.h>
  
  #include <linux/unistd.h>
  #include <linux/filter.h>
@@@ -40,7 -41,7 +40,7 @@@
  #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
  # endif
  #endif
 -
 +#include "bpf_rlimit.h"
  #include "../../../include/linux/filter.h"
  
  #ifndef ARRAY_SIZE
@@@ -56,9 -57,6 +56,9 @@@
  #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
  #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
  
 +#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
 +static bool unpriv_disabled = false;
 +
  struct bpf_test {
  	const char *descr;
  	struct bpf_insn	insns[MAX_INSNS];
@@@ -2589,74 -2587,17 +2589,74 @@@ static struct bpf_test tests[] = 
  		.result = ACCEPT,
  	},
  	{
 +		"runtime/jit: tail_call within bounds, prog once",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_3, 0),
 +			BPF_LD_MAP_FD(BPF_REG_2, 0),
 +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +				     BPF_FUNC_tail_call),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +		},
 +		.fixup_prog = { 1 },
 +		.result = ACCEPT,
 +		.retval = 42,
 +	},
 +	{
 +		"runtime/jit: tail_call within bounds, prog loop",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_3, 1),
 +			BPF_LD_MAP_FD(BPF_REG_2, 0),
 +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +				     BPF_FUNC_tail_call),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +		},
 +		.fixup_prog = { 1 },
 +		.result = ACCEPT,
 +		.retval = 41,
 +	},
 +	{
 +		"runtime/jit: tail_call within bounds, no prog",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_3, 2),
 +			BPF_LD_MAP_FD(BPF_REG_2, 0),
 +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +				     BPF_FUNC_tail_call),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +		},
 +		.fixup_prog = { 1 },
 +		.result = ACCEPT,
 +		.retval = 1,
 +	},
 +	{
 +		"runtime/jit: tail_call out of bounds",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_3, 256),
 +			BPF_LD_MAP_FD(BPF_REG_2, 0),
 +			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +				     BPF_FUNC_tail_call),
 +			BPF_MOV64_IMM(BPF_REG_0, 2),
 +			BPF_EXIT_INSN(),
 +		},
 +		.fixup_prog = { 1 },
 +		.result = ACCEPT,
 +		.retval = 2,
 +	},
 +	{
  		"runtime/jit: pass negative index to tail_call",
  		.insns = {
  			BPF_MOV64_IMM(BPF_REG_3, -1),
  			BPF_LD_MAP_FD(BPF_REG_2, 0),
  			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  				     BPF_FUNC_tail_call),
 -			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 2),
  			BPF_EXIT_INSN(),
  		},
  		.fixup_prog = { 1 },
  		.result = ACCEPT,
 +		.retval = 2,
  	},
  	{
  		"runtime/jit: pass > 32bit index to tail_call",
@@@ -2665,12 -2606,11 +2665,12 @@@
  			BPF_LD_MAP_FD(BPF_REG_2, 0),
  			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
  				     BPF_FUNC_tail_call),
 -			BPF_MOV64_IMM(BPF_REG_0, 0),
 +			BPF_MOV64_IMM(BPF_REG_0, 2),
  			BPF_EXIT_INSN(),
  		},
  		.fixup_prog = { 2 },
  		.result = ACCEPT,
 +		.retval = 42,
  	},
  	{
  		"stack pointer arithmetic",
@@@ -11224,94 -11164,63 +11224,151 @@@
  		.prog_type = BPF_PROG_TYPE_TRACEPOINT,
  	},
  	{
 +		"jit: lsh, rsh, arsh by 1",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_MOV64_IMM(BPF_REG_1, 0xff),
 +			BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
 +			BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
 +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
 +			BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
 +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
 +			BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_MOV64_IMM(BPF_REG_0, 2),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.retval = 2,
 +	},
 +	{
 +		"jit: mov32 for ldimm64, 1",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_0, 2),
 +			BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
 +			BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
 +			BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
 +			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.retval = 2,
 +	},
 +	{
 +		"jit: mov32 for ldimm64, 2",
 +		.insns = {
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
 +			BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
 +			BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
 +			BPF_MOV64_IMM(BPF_REG_0, 2),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.retval = 2,
 +	},
 +	{
 +		"jit: various mul tests",
 +		.insns = {
 +			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
 +			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
 +			BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
 +			BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
 +			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
 +			BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
 +			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
 +			BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
 +			BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
 +			BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
 +			BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
 +			BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
 +			BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
 +			BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
 +			BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
 +			BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
 +			BPF_MOV64_IMM(BPF_REG_0, 1),
 +			BPF_EXIT_INSN(),
 +			BPF_MOV64_IMM(BPF_REG_0, 2),
 +			BPF_EXIT_INSN(),
 +		},
 +		.result = ACCEPT,
 +		.retval = 2,
 +	},
- 
++	{
+ 		"xadd/w check unaligned stack",
+ 		.insns = {
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ 			BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+ 			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = REJECT,
+ 		.errstr = "misaligned stack access off",
+ 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ 	},
+ 	{
+ 		"xadd/w check unaligned map",
+ 		.insns = {
+ 			BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ 			BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ 			BPF_LD_MAP_FD(BPF_REG_1, 0),
+ 			BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ 				     BPF_FUNC_map_lookup_elem),
+ 			BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ 			BPF_EXIT_INSN(),
+ 			BPF_MOV64_IMM(BPF_REG_1, 1),
+ 			BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.fixup_map1 = { 3 },
+ 		.result = REJECT,
+ 		.errstr = "misaligned value access off",
+ 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ 	},
+ 	{
+ 		"xadd/w check unaligned pkt",
+ 		.insns = {
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data)),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ 				    offsetof(struct xdp_md, data_end)),
+ 			BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ 			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ 			BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
+ 			BPF_MOV64_IMM(BPF_REG_0, 99),
+ 			BPF_JMP_IMM(BPF_JA, 0, 0, 6),
+ 			BPF_MOV64_IMM(BPF_REG_0, 1),
+ 			BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ 			BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
+ 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
+ 			BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+ 			BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
+ 			BPF_EXIT_INSN(),
+ 		},
+ 		.result = REJECT,
+ 		.errstr = "BPF_XADD stores into R2 packet",
+ 		.prog_type = BPF_PROG_TYPE_XDP,
+ 	},
  };
  
  static int probe_filter_length(const struct bpf_insn *fp)
@@@ -11336,61 -11245,16 +11393,61 @@@ static int create_map(uint32_t size_val
  	return fd;
  }
  
 +static int create_prog_dummy1(void)
 +{
 +	struct bpf_insn prog[] = {
 +		BPF_MOV64_IMM(BPF_REG_0, 42),
 +		BPF_EXIT_INSN(),
 +	};
 +
 +	return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
 +				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 +}
 +
 +static int create_prog_dummy2(int mfd, int idx)
 +{
 +	struct bpf_insn prog[] = {
 +		BPF_MOV64_IMM(BPF_REG_3, idx),
 +		BPF_LD_MAP_FD(BPF_REG_2, mfd),
 +		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
 +			     BPF_FUNC_tail_call),
 +		BPF_MOV64_IMM(BPF_REG_0, 41),
 +		BPF_EXIT_INSN(),
 +	};
 +
 +	return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog,
 +				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
 +}
 +
  static int create_prog_array(void)
  {
 -	int fd;
 +	int p1key = 0, p2key = 1;
 +	int mfd, p1fd, p2fd;
  
 -	fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
 -			    sizeof(int), 4, 0);
 -	if (fd < 0)
 +	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
 +			     sizeof(int), 4, 0);
 +	if (mfd < 0) {
  		printf("Failed to create prog array '%s'!\n", strerror(errno));
 +		return -1;
 +	}
  
 -	return fd;
 +	p1fd = create_prog_dummy1();
 +	p2fd = create_prog_dummy2(mfd, p2key);
 +	if (p1fd < 0 || p2fd < 0)
 +		goto out;
 +	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
 +		goto out;
 +	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
 +		goto out;
 +	close(p2fd);
 +	close(p1fd);
 +
 +	return mfd;
 +out:
 +	close(p2fd);
 +	close(p1fd);
 +	close(mfd);
 +	return -1;
  }
  
  static int create_map_in_map(void)
@@@ -11511,8 -11375,7 +11568,8 @@@ static void do_test_single(struct bpf_t
  			goto fail_log;
  		}
  		if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
 -			printf("FAIL\nUnexpected error message!\n");
 +			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
 +			      expected_err, bpf_vlog);
  			goto fail_log;
  		}
  	}
@@@ -11596,20 -11459,9 +11653,20 @@@ out
  	return ret;
  }
  
 +static void get_unpriv_disabled()
 +{
 +	char buf[2];
 +	FILE *fd;
 +
 +	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
 +	if (fgets(buf, 2, fd) == buf && atoi(buf))
 +		unpriv_disabled = true;
 +	fclose(fd);
 +}
 +
  static int do_test(bool unpriv, unsigned int from, unsigned int to)
  {
 -	int i, passes = 0, errors = 0;
 +	int i, passes = 0, errors = 0, skips = 0;
  
  	for (i = from; i < to; i++) {
  		struct bpf_test *test = &tests[i];
@@@ -11617,10 -11469,7 +11674,10 @@@
  		/* Program types that are not supported by non-root we
  		 * skip right away.
  		 */
 -		if (!test->prog_type) {
 +		if (!test->prog_type && unpriv_disabled) {
 +			printf("#%d/u %s SKIP\n", i, test->descr);
 +			skips++;
 +		} else if (!test->prog_type) {
  			if (!unpriv)
  				set_admin(false);
  			printf("#%d/u %s ", i, test->descr);
@@@ -11629,22 -11478,20 +11686,22 @@@
  				set_admin(true);
  		}
  
 -		if (!unpriv) {
 +		if (unpriv) {
 +			printf("#%d/p %s SKIP\n", i, test->descr);
 +			skips++;
 +		} else {
  			printf("#%d/p %s ", i, test->descr);
  			do_test_single(test, false, &passes, &errors);
  		}
  	}
  
 -	printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
 +	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
 +	       skips, errors);
  	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
  }
  
  int main(int argc, char **argv)
  {
 -	struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
 -	struct rlimit rlim = { 1 << 20, 1 << 20 };
  	unsigned int from = 0, to = ARRAY_SIZE(tests);
  	bool unpriv = !is_admin();
  
@@@ -11665,12 -11512,6 +11722,12 @@@
  		}
  	}
  
 -	setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf);
 +	get_unpriv_disabled();
 +	if (unpriv && unpriv_disabled) {
 +		printf("Cannot run as unprivileged user with sysctl %s.\n",
 +		       UNPRIV_SYSCTL);
 +		return EXIT_FAILURE;
 +	}
 +
  	return do_test(unpriv, from, to);
  }

-- 
LinuxNextTracking


More information about the linux-merge mailing list