[linux-next] LinuxNextTracking branch, master, updated. next-20190403

batman at open-mesh.org batman at open-mesh.org
Thu Apr 4 00:17:52 CEST 2019


The following commit has been merged in the master branch:
commit f76c95964ca77a27d042f3f373b1f9f063698ed1
Merge: 36afed2478bb1b3aead11600a32fdd82bd77c1a6 3eed52842b9fd291233c15f65fed34c5d3241183
Author: Stephen Rothwell <sfr at canb.auug.org.au>
Date:   Wed Apr 3 09:55:54 2019 +1100

    Merge remote-tracking branch 'net-next/master'

diff --combined MAINTAINERS
index 99a7558f77ec,c1e2f4070aa5..1ad50ca52ff4
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -816,14 -816,6 +816,14 @@@ F:	drivers/gpu/drm/amd/include/vi_struc
  F:	drivers/gpu/drm/amd/include/v9_structs.h
  F:	include/uapi/linux/kfd_ioctl.h
  
 +AMD MP2 I2C DRIVER
 +M:	Elie Morisse <syniurge at gmail.com>
 +M:	Nehal Shah <nehal-bakulchandra.shah at amd.com>
 +M:	Shyam Sundar S K <shyam-sundar.s-k at amd.com>
 +L:	linux-i2c at vger.kernel.org
 +S:	Maintained
 +F:	drivers/i2c/busses/i2c-amd-mp2*
 +
  AMD POWERPLAY
  M:	Rex Zhu <rex.zhu at amd.com>
  M:	Evan Quan <evan.quan at amd.com>
@@@ -2364,7 -2356,7 +2364,7 @@@ F:	arch/arm/mm/cache-uniphier.
  F:	arch/arm64/boot/dts/socionext/uniphier*
  F:	drivers/bus/uniphier-system-bus.c
  F:	drivers/clk/uniphier/
 -F:	drivers/dmaengine/uniphier-mdmac.c
 +F:	drivers/dma/uniphier-mdmac.c
  F:	drivers/gpio/gpio-uniphier.c
  F:	drivers/i2c/busses/i2c-uniphier*
  F:	drivers/irqchip/irq-uniphier-aidet.c
@@@ -2801,10 -2793,13 +2801,13 @@@ M:	Simon Wunderlich <sw at simonwunderlich
  M:	Antonio Quartulli <a at unstable.cc>
  L:	b.a.t.m.a.n at lists.open-mesh.org (moderated for non-subscribers)
  W:	https://www.open-mesh.org/
+ B:	https://www.open-mesh.org/projects/batman-adv/issues
+ C:	irc://chat.freenode.net/batman
  Q:	https://patchwork.open-mesh.org/project/batman/list/
+ T:	git https://git.open-mesh.org/linux-merge.git
  S:	Maintained
- F:	Documentation/ABI/testing/sysfs-class-net-batman-adv
- F:	Documentation/ABI/testing/sysfs-class-net-mesh
+ F:	Documentation/ABI/obsolete/sysfs-class-net-batman-adv
+ F:	Documentation/ABI/obsolete/sysfs-class-net-mesh
  F:	Documentation/networking/batman-adv.rst
  F:	include/uapi/linux/batadv_packet.h
  F:	include/uapi/linux/batman_adv.h
@@@ -3736,8 -3731,8 +3739,8 @@@ F:	scripts/checkpatch.p
  
  CHINESE DOCUMENTATION
  M:	Harry Wei <harryxiyou at gmail.com>
 +M:	Alex Shi <alex.shi at linux.alibaba.com>
  L:	xiyoulinuxkernelgroup at googlegroups.com (subscribers-only)
 -L:	linux-kernel at zh-kernel.org (moderated for non-subscribers)
  S:	Maintained
  F:	Documentation/translations/zh_CN/
  
@@@ -3804,7 -3799,6 +3807,7 @@@ M:	Richard Fitzgerald <rf at opensource.ci
  L:	patches at opensource.cirrus.com
  S:	Supported
  F:	drivers/clk/clk-lochnagar.c
 +F:	drivers/hwmon/lochnagar-hwmon.c
  F:	drivers/mfd/lochnagar-i2c.c
  F:	drivers/pinctrl/cirrus/pinctrl-lochnagar.c
  F:	drivers/regulator/lochnagar-regulator.c
@@@ -3813,10 -3807,8 +3816,10 @@@ F:	include/dt-bindings/pinctrl/lochnaga
  F:	include/linux/mfd/lochnagar*
  F:	Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
  F:	Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
 +F:	Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
  F:	Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
  F:	Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
 +F:	Documentation/hwmon/lochnagar
  
  CISCO FCOE HBA DRIVER
  M:	Satish Kharat <satishkh at cisco.com>
@@@ -4257,7 -4249,7 +4260,7 @@@ S:	Supporte
  F:	drivers/scsi/cxgbi/cxgb3i
  
  CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
 -M:	Steve Wise <swise at chelsio.com>
 +M:	Potnuri Bharat Teja <bharat at chelsio.com>
  L:	linux-rdma at vger.kernel.org
  W:	http://www.openfabrics.org
  S:	Supported
@@@ -4286,7 -4278,7 +4289,7 @@@ S:	Supporte
  F:	drivers/scsi/cxgbi/cxgb4i
  
  CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
 -M:	Steve Wise <swise at chelsio.com>
 +M:	Potnuri Bharat Teja <bharat at chelsio.com>
  L:	linux-rdma at vger.kernel.org
  W:	http://www.openfabrics.org
  S:	Supported
@@@ -5844,7 -5836,7 +5847,7 @@@ L:	netdev at vger.kernel.or
  S:	Maintained
  F:	Documentation/ABI/testing/sysfs-bus-mdio
  F:	Documentation/devicetree/bindings/net/mdio*
 -F:	Documentation/networking/phy.txt
 +F:	Documentation/networking/phy.rst
  F:	drivers/net/phy/
  F:	drivers/of/of_mdio.c
  F:	drivers/of/of_net.c
@@@ -6419,6 -6411,7 +6422,6 @@@ L:	linux-kernel at vger.kernel.or
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
  S:	Maintained
  F:	kernel/futex.c
 -F:	kernel/futex_compat.c
  F:	include/asm-generic/futex.h
  F:	include/linux/futex.h
  F:	include/uapi/linux/futex.h
@@@ -7663,10 -7656,6 +7666,10 @@@ F:	drivers/infiniband
  F:	include/uapi/linux/if_infiniband.h
  F:	include/uapi/rdma/
  F:	include/rdma/
 +F:	include/trace/events/ib_mad.h
 +F:	include/trace/events/ib_umad.h
 +F:	samples/bpf/ibumad_kern.c
 +F:	samples/bpf/ibumad_user.c
  
  INGENIC JZ4780 DMA Driver
  M:	Zubair Lutfullah Kakakhel <Zubair.Kakakhel at imgtec.com>
@@@ -9890,15 -9879,6 +9893,6 @@@ F:	drivers/net/ethernet/mellanox/mlx5/c
  F:	drivers/net/ethernet/mellanox/mlx5/core/fpga/*
  F:	include/linux/mlx5/mlx5_ifc_fpga.h
  
- MELLANOX ETHERNET INNOVA IPSEC DRIVER
- R:	Boris Pismenny <borisp at mellanox.com>
- L:	netdev at vger.kernel.org
- S:	Supported
- W:	http://www.mellanox.com
- Q:	http://patchwork.ozlabs.org/project/netdev/list/
- F:	drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
- F:	drivers/net/ethernet/mellanox/mlx5/core/ipsec*
- 
  MELLANOX ETHERNET SWITCH DRIVERS
  M:	Jiri Pirko <jiri at mellanox.com>
  M:	Ido Schimmel <idosch at mellanox.com>
@@@ -10126,8 -10106,7 +10120,8 @@@ MICROCHIP I2C DRIVE
  M:	Ludovic Desroches <ludovic.desroches at microchip.com>
  L:	linux-i2c at vger.kernel.org
  S:	Supported
 -F:	drivers/i2c/busses/i2c-at91.c
 +F:	drivers/i2c/busses/i2c-at91.h
 +F:	drivers/i2c/busses/i2c-at91-*.c
  
  MICROCHIP ISC DRIVER
  M:	Eugen Hristev <eugen.hristev at microchip.com>
@@@ -13997,7 -13976,7 +13991,7 @@@ F:	drivers/media/rc/serial_ir.
  SFC NETWORK DRIVER
  M:	Solarflare linux maintainers <linux-net-drivers at solarflare.com>
  M:	Edward Cree <ecree at solarflare.com>
 -M:	Bert Kenward <bkenward at solarflare.com>
 +M:	Martin Habets <mhabets at solarflare.com>
  L:	netdev at vger.kernel.org
  S:	Supported
  F:	drivers/net/ethernet/sfc/
@@@ -14350,8 -14329,9 +14344,8 @@@ SOC-CAMERA V4L2 SUBSYSTE
  L:	linux-media at vger.kernel.org
  T:	git git://linuxtv.org/media_tree.git
  S:	Orphan
 -F:	include/media/soc*
 -F:	drivers/media/i2c/soc_camera/
 -F:	drivers/media/platform/soc_camera/
 +F:	include/media/soc_camera.h
 +F:	drivers/staging/media/soc_camera/
  
  SOCIONEXT SYNQUACER I2C DRIVER
  M:	Ard Biesheuvel <ard.biesheuvel at linaro.org>
diff --combined drivers/net/ethernet/intel/fm10k/fm10k_main.c
index ecef949f3baa,e2fa112bed9a..2325cee76211
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@@ -41,8 -41,6 +41,8 @@@ static int __init fm10k_init_module(voi
  	/* create driver workqueue */
  	fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
  					  fm10k_driver_name);
 +	if (!fm10k_workqueue)
 +		return -ENOMEM;
  
  	fm10k_dbg_init();
  
@@@ -1037,7 -1035,7 +1037,7 @@@ static void fm10k_tx_map(struct fm10k_r
  	fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
  
  	/* notify HW of packet */
- 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
  		writel(i, tx_ring->tail);
  
  		/* we need this if more than one processor can write to our tail
diff --combined drivers/net/ethernet/intel/igb/igb_main.c
index 3269d8e94744,32d61d5a2706..acbb5b4f333d
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@@ -2480,7 -2480,7 +2480,7 @@@ static int igb_set_features(struct net_
  	else
  		igb_reset(adapter);
  
- 	return 0;
+ 	return 1;
  }
  
  static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@@ -3452,6 -3452,9 +3452,9 @@@ static int igb_probe(struct pci_dev *pd
  			break;
  		}
  	}
+ 
+ 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+ 
  	pm_runtime_put_noidle(&pdev->dev);
  	return 0;
  
@@@ -6026,7 -6029,7 +6029,7 @@@ static int igb_tx_map(struct igb_ring *
  	/* Make sure there is space in the ring for the next send. */
  	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
  
- 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ 	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
  		writel(i, tx_ring->tail);
  
  		/* we need this if more than one processor can write to our tail
@@@ -8740,7 -8743,9 +8743,7 @@@ static int __igb_shutdown(struct pci_de
  	struct e1000_hw *hw = &adapter->hw;
  	u32 ctrl, rctl, status;
  	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
 -#ifdef CONFIG_PM
 -	int retval = 0;
 -#endif
 +	bool wake;
  
  	rtnl_lock();
  	netif_device_detach(netdev);
@@@ -8753,6 -8758,14 +8756,6 @@@
  	igb_clear_interrupt_scheme(adapter);
  	rtnl_unlock();
  
 -#ifdef CONFIG_PM
 -	if (!runtime) {
 -		retval = pci_save_state(pdev);
 -		if (retval)
 -			return retval;
 -	}
 -#endif
 -
  	status = rd32(E1000_STATUS);
  	if (status & E1000_STATUS_LU)
  		wufc &= ~E1000_WUFC_LNKC;
@@@ -8769,6 -8782,10 +8772,6 @@@
  		}
  
  		ctrl = rd32(E1000_CTRL);
 -		/* advertise wake from D3Cold */
 -		#define E1000_CTRL_ADVD3WUC 0x00100000
 -		/* phy power management enable */
 -		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  		ctrl |= E1000_CTRL_ADVD3WUC;
  		wr32(E1000_CTRL, ctrl);
  
@@@ -8782,15 -8799,12 +8785,15 @@@
  		wr32(E1000_WUFC, 0);
  	}
  
 -	*enable_wake = wufc || adapter->en_mng_pt;
 -	if (!*enable_wake)
 +	wake = wufc || adapter->en_mng_pt;
 +	if (!wake)
  		igb_power_down_link(adapter);
  	else
  		igb_power_up_link(adapter);
  
 +	if (enable_wake)
 +		*enable_wake = wake;
 +
  	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
  	 * would have already happened in close and is redundant.
  	 */
@@@ -8833,7 -8847,22 +8836,7 @@@ static void igb_deliver_wake_packet(str
  
  static int __maybe_unused igb_suspend(struct device *dev)
  {
 -	int retval;
 -	bool wake;
 -	struct pci_dev *pdev = to_pci_dev(dev);
 -
 -	retval = __igb_shutdown(pdev, &wake, 0);
 -	if (retval)
 -		return retval;
 -
 -	if (wake) {
 -		pci_prepare_to_sleep(pdev);
 -	} else {
 -		pci_wake_from_d3(pdev, false);
 -		pci_set_power_state(pdev, PCI_D3hot);
 -	}
 -
 -	return 0;
 +	return __igb_shutdown(to_pci_dev(dev), NULL, 0);
  }
  
  static int __maybe_unused igb_resume(struct device *dev)
@@@ -8904,7 -8933,22 +8907,7 @@@ static int __maybe_unused igb_runtime_i
  
  static int __maybe_unused igb_runtime_suspend(struct device *dev)
  {
 -	struct pci_dev *pdev = to_pci_dev(dev);
 -	int retval;
 -	bool wake;
 -
 -	retval = __igb_shutdown(pdev, &wake, 1);
 -	if (retval)
 -		return retval;
 -
 -	if (wake) {
 -		pci_prepare_to_sleep(pdev);
 -	} else {
 -		pci_wake_from_d3(pdev, false);
 -		pci_set_power_state(pdev, PCI_D3hot);
 -	}
 -
 -	return 0;
 +	return __igb_shutdown(to_pci_dev(dev), NULL, 1);
  }
  
  static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 4ab0d030b544,b0ce68feb0f3..633b117eb13e
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@@ -122,9 -122,7 +122,9 @@@ out
  	return err;
  }
  
 -/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
 +/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
 + * minimum speed value is 40Gbps
 + */
  static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
  {
  	u32 speed;
@@@ -132,9 -130,10 +132,9 @@@
  	int err;
  
  	err = mlx5e_port_linkspeed(priv->mdev, &speed);
 -	if (err) {
 -		mlx5_core_warn(priv->mdev, "cannot get port speed\n");
 -		return 0;
 -	}
 +	if (err)
 +		speed = SPEED_40000;
 +	speed = max_t(u32, speed, SPEED_40000);
  
  	xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
  
@@@ -143,7 -142,7 +143,7 @@@
  }
  
  static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
 -				 u32 xoff, unsigned int mtu)
 +				 u32 xoff, unsigned int max_mtu)
  {
  	int i;
  
@@@ -155,37 -154,36 +155,37 @@@
  		}
  
  		if (port_buffer->buffer[i].size <
 -		    (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
 +		    (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
  			return -ENOMEM;
  
  		port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
 -		port_buffer->buffer[i].xon  = port_buffer->buffer[i].xoff - mtu;
 +		port_buffer->buffer[i].xon  =
 +			port_buffer->buffer[i].xoff - max_mtu;
  	}
  
  	return 0;
  }
  
  /**
-  * update_buffer_lossy()
-  *   max_mtu: netdev's max_mtu
-  *   pfc_en: <input> current pfc configuration
-  *   buffer: <input> current prio to buffer mapping
-  *   xoff:   <input> xoff value
-  *   port_buffer: <output> port receive buffer configuration
-  *   change: <output>
+  *	update_buffer_lossy	- Update buffer configuration based on pfc
 - *	@mtu: device's MTU
++ *	@max_mtu: netdev's max_mtu
+  *	@pfc_en: <input> current pfc configuration
+  *	@buffer: <input> current prio to buffer mapping
+  *	@xoff:   <input> xoff value
+  *	@port_buffer: <output> port receive buffer configuration
+  *	@change: <output>
   *
-  *   Update buffer configuration based on pfc configuraiton and priority
-  *   to buffer mapping.
-  *   Buffer's lossy bit is changed to:
-  *     lossless if there is at least one PFC enabled priority mapped to this buffer
-  *     lossy if all priorities mapped to this buffer are PFC disabled
+  *	Update buffer configuration based on pfc configuraiton and
+  *	priority to buffer mapping.
+  *	Buffer's lossy bit is changed to:
+  *		lossless if there is at least one PFC enabled priority
+  *		mapped to this buffer lossy if all priorities mapped to
+  *		this buffer are PFC disabled
   *
-  *   Return:
-  *     Return 0 if no error.
-  *     Set change to true if buffer configuration is modified.
+  *	@return: 0 if no error,
+  *	sets change to true if buffer configuration was modified.
   */
 -static int update_buffer_lossy(unsigned int mtu,
 +static int update_buffer_lossy(unsigned int max_mtu,
  			       u8 pfc_en, u8 *buffer, u32 xoff,
  			       struct mlx5e_port_buffer *port_buffer,
  			       bool *change)
@@@ -222,7 -220,7 +222,7 @@@
  	}
  
  	if (changed) {
 -		err = update_xoff_threshold(port_buffer, xoff, mtu);
 +		err = update_xoff_threshold(port_buffer, xoff, max_mtu);
  		if (err)
  			return err;
  
@@@ -232,7 -230,6 +232,7 @@@
  	return 0;
  }
  
 +#define MINIMUM_MAX_MTU 9216
  int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
  				    u32 change, unsigned int mtu,
  				    struct ieee_pfc *pfc,
@@@ -244,14 -241,12 +244,14 @@@
  	bool update_prio2buffer = false;
  	u8 buffer[MLX5E_MAX_PRIORITY];
  	bool update_buffer = false;
 +	unsigned int max_mtu;
  	u32 total_used = 0;
  	u8 curr_pfc_en;
  	int err;
  	int i;
  
  	mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
 +	max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
  
  	err = mlx5e_port_query_buffer(priv, &port_buffer);
  	if (err)
@@@ -259,7 -254,7 +259,7 @@@
  
  	if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
  		update_buffer = true;
 -		err = update_xoff_threshold(&port_buffer, xoff, mtu);
 +		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
  		if (err)
  			return err;
  	}
@@@ -269,7 -264,7 +269,7 @@@
  		if (err)
  			return err;
  
 -		err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
 +		err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
  					  &port_buffer, &update_buffer);
  		if (err)
  			return err;
@@@ -281,8 -276,8 +281,8 @@@
  		if (err)
  			return err;
  
 -		err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
 -					  &port_buffer, &update_buffer);
 +		err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
 +					  xoff, &port_buffer, &update_buffer);
  		if (err)
  			return err;
  	}
@@@ -306,7 -301,7 +306,7 @@@
  			return -EINVAL;
  
  		update_buffer = true;
 -		err = update_xoff_threshold(&port_buffer, xoff, mtu);
 +		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
  		if (err)
  			return err;
  	}
@@@ -314,7 -309,7 +314,7 @@@
  	/* Need to update buffer configuration if xoff value is changed */
  	if (!update_buffer && xoff != priv->dcbx.xoff) {
  		update_buffer = true;
 -		err = update_xoff_threshold(&port_buffer, xoff, mtu);
 +		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
  		if (err)
  			return err;
  	}
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index d75dc44eb2ff,2fd425a7b156..ffc4a36551c8
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@@ -44,6 -44,7 +44,7 @@@
  #include <net/tc_act/tc_pedit.h>
  #include <net/tc_act/tc_csum.h>
  #include <net/arp.h>
+ #include <net/ipv6_stubs.h>
  #include "en.h"
  #include "en_rep.h"
  #include "en_tc.h"
@@@ -1827,6 -1828,7 +1828,7 @@@ static int parse_cls_flower(struct mlx5
  
  struct pedit_headers {
  	struct ethhdr  eth;
+ 	struct vlan_hdr vlan;
  	struct iphdr   ip4;
  	struct ipv6hdr ip6;
  	struct tcphdr  tcp;
@@@ -1884,6 -1886,7 +1886,7 @@@ static struct mlx5_fields fields[] = 
  	OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
  	OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0),
  	OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0),
+ 	OFFLOAD(FIRST_VID,  2, vlan.h_vlan_TCI, 0),
  
  	OFFLOAD(IP_TTL, 1, ip4.ttl,   0),
  	OFFLOAD(SIPV4,  4, ip4.saddr, 0),
@@@ -2158,52 -2161,6 +2161,52 @@@ static bool csum_offload_supported(stru
  	return true;
  }
  
 +struct ip_ttl_word {
 +	__u8	ttl;
 +	__u8	protocol;
 +	__sum16	check;
 +};
 +
 +struct ipv6_hoplimit_word {
 +	__be16	payload_len;
 +	__u8	nexthdr;
 +	__u8	hop_limit;
 +};
 +
 +static bool is_action_keys_supported(const struct flow_action_entry *act)
 +{
 +	u32 mask, offset;
 +	u8 htype;
 +
 +	htype = act->mangle.htype;
 +	offset = act->mangle.offset;
 +	mask = ~act->mangle.mask;
 +	/* For IPv4 & IPv6 header check 4 byte word,
 +	 * to determine that modified fields
 +	 * are NOT ttl & hop_limit only.
 +	 */
 +	if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
 +		struct ip_ttl_word *ttl_word =
 +			(struct ip_ttl_word *)&mask;
 +
 +		if (offset != offsetof(struct iphdr, ttl) ||
 +		    ttl_word->protocol ||
 +		    ttl_word->check) {
 +			return true;
 +		}
 +	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
 +		struct ipv6_hoplimit_word *hoplimit_word =
 +			(struct ipv6_hoplimit_word *)&mask;
 +
 +		if (offset != offsetof(struct ipv6hdr, payload_len) ||
 +		    hoplimit_word->payload_len ||
 +		    hoplimit_word->nexthdr) {
 +			return true;
 +		}
 +	}
 +	return false;
 +}
 +
  static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
  					  struct flow_action *flow_action,
  					  u32 actions,
@@@ -2211,9 -2168,9 +2214,9 @@@
  {
  	const struct flow_action_entry *act;
  	bool modify_ip_header;
 -	u8 htype, ip_proto;
  	void *headers_v;
  	u16 ethertype;
 +	u8 ip_proto;
  	int i;
  
  	if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@@ -2233,7 -2190,9 +2236,7 @@@
  		    act->id != FLOW_ACTION_ADD)
  			continue;
  
 -		htype = act->mangle.htype;
 -		if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
 -		    htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
 +		if (is_action_keys_supported(act)) {
  			modify_ip_header = true;
  			break;
  		}
@@@ -2291,6 -2250,35 +2294,35 @@@ static bool same_hw_devs(struct mlx5e_p
  	return (fsystem_guid == psystem_guid);
  }
  
+ static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ 				   const struct flow_action_entry *act,
+ 				   struct mlx5e_tc_flow_parse_attr *parse_attr,
+ 				   struct pedit_headers_action *hdrs,
+ 				   u32 *action, struct netlink_ext_ack *extack)
+ {
+ 	u16 mask16 = VLAN_VID_MASK;
+ 	u16 val16 = act->vlan.vid & VLAN_VID_MASK;
+ 	const struct flow_action_entry pedit_act = {
+ 		.id = FLOW_ACTION_MANGLE,
+ 		.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ 		.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
+ 		.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
+ 		.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
+ 	};
+ 	int err;
+ 
+ 	if (act->vlan.prio) {
+ 		NL_SET_ERR_MSG_MOD(extack, "Setting VLAN prio is not supported");
+ 		return -EOPNOTSUPP;
+ 	}
+ 
+ 	err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
+ 				    hdrs, NULL);
+ 	*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ 
+ 	return err;
+ }
+ 
  static int parse_tc_nic_actions(struct mlx5e_priv *priv,
  				struct flow_action *flow_action,
  				struct mlx5e_tc_flow_parse_attr *parse_attr,
@@@ -2326,6 -2314,15 +2358,15 @@@
  			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
  				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
  			break;
+ 		case FLOW_ACTION_VLAN_MANGLE:
+ 			err = add_vlan_rewrite_action(priv,
+ 						      MLX5_FLOW_NAMESPACE_KERNEL,
+ 						      act, parse_attr, hdrs,
+ 						      &action, extack);
+ 			if (err)
+ 				return err;
+ 
+ 			break;
  		case FLOW_ACTION_CSUM:
  			if (csum_offload_supported(priv, action,
  						   act->csum_flags,
@@@ -2384,22 -2381,15 +2425,22 @@@
  	return 0;
  }
  
 -static inline int cmp_encap_info(struct ip_tunnel_key *a,
 -				 struct ip_tunnel_key *b)
 +struct encap_key {
 +	struct ip_tunnel_key *ip_tun_key;
 +	int tunnel_type;
 +};
 +
 +static inline int cmp_encap_info(struct encap_key *a,
 +				 struct encap_key *b)
  {
 -	return memcmp(a, b, sizeof(*a));
 +	return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
 +	       a->tunnel_type != b->tunnel_type;
  }
  
 -static inline int hash_encap_info(struct ip_tunnel_key *key)
 +static inline int hash_encap_info(struct encap_key *key)
  {
 -	return jhash(key, sizeof(*key), 0);
 +	return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
 +		     key->tunnel_type);
  }
  
  
@@@ -2430,7 -2420,7 +2471,7 @@@ static int mlx5e_attach_encap(struct ml
  	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
  	struct mlx5e_tc_flow_parse_attr *parse_attr;
  	struct ip_tunnel_info *tun_info;
 -	struct ip_tunnel_key *key;
 +	struct encap_key key, e_key;
  	struct mlx5e_encap_entry *e;
  	unsigned short family;
  	uintptr_t hash_key;
@@@ -2440,16 -2430,13 +2481,16 @@@
  	parse_attr = attr->parse_attr;
  	tun_info = &parse_attr->tun_info[out_index];
  	family = ip_tunnel_info_af(tun_info);
 -	key = &tun_info->key;
 +	key.ip_tun_key = &tun_info->key;
 +	key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
  
 -	hash_key = hash_encap_info(key);
 +	hash_key = hash_encap_info(&key);
  
  	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
  				   encap_hlist, hash_key) {
 -		if (!cmp_encap_info(&e->tun_info.key, key)) {
 +		e_key.ip_tun_key = &e->tun_info.key;
 +		e_key.tunnel_type = e->tunnel_type;
 +		if (!cmp_encap_info(&e_key, &key)) {
  			found = true;
  			break;
  		}
@@@ -2544,8 -2531,7 +2585,7 @@@ static int parse_tc_vlan_action(struct 
  		}
  		break;
  	default:
- 		/* action is FLOW_ACT_VLAN_MANGLE */
- 		return -EOPNOTSUPP;
+ 		return -EINVAL;
  	}
  
  	attr->total_vlan = vlan_idx + 1;
@@@ -2679,7 -2665,27 +2719,27 @@@ static int parse_tc_fdb_actions(struct 
  			break;
  		case FLOW_ACTION_VLAN_PUSH:
  		case FLOW_ACTION_VLAN_POP:
- 			err = parse_tc_vlan_action(priv, act, attr, &action);
+ 			if (act->id == FLOW_ACTION_VLAN_PUSH &&
+ 			    (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
+ 				/* Replace vlan pop+push with vlan modify */
+ 				action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ 				err = add_vlan_rewrite_action(priv,
+ 							      MLX5_FLOW_NAMESPACE_FDB,
+ 							      act, parse_attr, hdrs,
+ 							      &action, extack);
+ 			} else {
+ 				err = parse_tc_vlan_action(priv, act, attr, &action);
+ 			}
+ 			if (err)
+ 				return err;
+ 
+ 			attr->split_count = attr->out_count;
+ 			break;
+ 		case FLOW_ACTION_VLAN_MANGLE:
+ 			err = add_vlan_rewrite_action(priv,
+ 						      MLX5_FLOW_NAMESPACE_FDB,
+ 						      act, parse_attr, hdrs,
+ 						      &action, extack);
  			if (err)
  				return err;
  
@@@ -2711,7 -2717,7 +2771,7 @@@
  
  	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
  	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
 -		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
 +		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
  					    parse_attr, hdrs, extack);
  		if (err)
  			return err;
diff --combined drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9b2d78ee22b8,6c72f33f6d09..fe770cd2151c
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@@ -1287,13 -1287,13 +1287,13 @@@ void esw_offloads_cleanup_reps(struct m
  
  int esw_offloads_init_reps(struct mlx5_eswitch *esw)
  {
- 	int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+ 	int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
  	struct mlx5_core_dev *dev = esw->dev;
  	struct mlx5_eswitch_rep *rep;
  	u8 hw_id[ETH_ALEN], rep_type;
  	int vport;
  
- 	esw->offloads.vport_reps = kcalloc(total_vfs,
+ 	esw->offloads.vport_reps = kcalloc(total_vports,
  					   sizeof(struct mlx5_eswitch_rep),
  					   GFP_KERNEL);
  	if (!esw->offloads.vport_reps)
@@@ -1523,8 -1523,6 +1523,6 @@@ static int mlx5_esw_offloads_pair(struc
  	return 0;
  }
  
- void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
- 
  static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
  {
  	mlx5e_tc_clean_fdb_peer_flows(esw);
@@@ -1611,7 -1609,6 +1609,7 @@@ static int esw_offloads_steering_init(s
  {
  	int err;
  
 +	memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
  	mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
  
  	err = esw_create_offloads_fdb_tables(esw, nvports);
diff --combined drivers/net/ethernet/netronome/nfp/flower/action.c
index e336f6ee94f5,6e2a6caec3fb..c56e31d9f8a4
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@@ -48,7 -48,8 +48,7 @@@ nfp_fl_push_vlan(struct nfp_fl_push_vla
  
  	tmp_push_vlan_tci =
  		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
 -		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
 -		NFP_FL_PUSH_VLAN_CFI;
 +		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
  	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
  }
  
@@@ -160,9 -161,9 +160,9 @@@ nfp_fl_get_tun_from_act_l4_port(struct 
  	struct nfp_flower_priv *priv = app->priv;
  
  	switch (tun->key.tp_dst) {
- 	case htons(NFP_FL_VXLAN_PORT):
+ 	case htons(IANA_VXLAN_UDP_PORT):
  		return NFP_FL_TUNNEL_VXLAN;
- 	case htons(NFP_FL_GENEVE_PORT):
+ 	case htons(GENEVE_UDP_PORT):
  		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
  			return NFP_FL_TUNNEL_GENEVE;
  		/* FALLTHROUGH */
@@@ -582,60 -583,23 +582,23 @@@ static u32 nfp_fl_csum_l4_to_flag(u8 ip
  	}
  }
  
- static int
- nfp_fl_pedit(const struct flow_action_entry *act,
- 	     struct tc_cls_flower_offload *flow,
- 	     char *nfp_action, int *a_len, u32 *csum_updated)
- {
- 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+ struct nfp_flower_pedit_acts {
  	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
  	struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
  	struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
  	struct nfp_fl_set_ip4_addrs set_ip_addr;
- 	enum flow_action_mangle_base htype;
  	struct nfp_fl_set_tport set_tport;
  	struct nfp_fl_set_eth set_eth;
+ };
+ 
+ static int
+ nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
+ 		     int *a_len, struct nfp_flower_pedit_acts *set_act,
+ 		     u32 *csum_updated)
+ {
+ 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
  	size_t act_size = 0;
  	u8 ip_proto = 0;
- 	u32 offset;
- 	int err;
- 
- 	memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
- 	memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
- 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
- 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
- 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
- 	memset(&set_tport, 0, sizeof(set_tport));
- 	memset(&set_eth, 0, sizeof(set_eth));
- 
- 	htype = act->mangle.htype;
- 	offset = act->mangle.offset;
- 
- 	switch (htype) {
- 	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- 		err = nfp_fl_set_eth(act, offset, &set_eth);
- 		break;
- 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- 		err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
- 				     &set_ip_ttl_tos);
- 		break;
- 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- 		err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
- 				     &set_ip6_src, &set_ip6_tc_hl_fl);
- 		break;
- 	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- 		err = nfp_fl_set_tport(act, offset, &set_tport,
- 				       NFP_FL_ACTION_OPCODE_SET_TCP);
- 		break;
- 	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- 		err = nfp_fl_set_tport(act, offset, &set_tport,
- 				       NFP_FL_ACTION_OPCODE_SET_UDP);
- 		break;
- 	default:
- 		return -EOPNOTSUPP;
- 	}
- 	if (err)
- 		return err;
  
  	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
  		struct flow_match_basic match;
@@@ -644,77 -608,82 +607,82 @@@
  		ip_proto = match.key->ip_proto;
  	}
  
- 	if (set_eth.head.len_lw) {
- 		act_size = sizeof(set_eth);
- 		memcpy(nfp_action, &set_eth, act_size);
+ 	if (set_act->set_eth.head.len_lw) {
+ 		act_size = sizeof(set_act->set_eth);
+ 		memcpy(nfp_action, &set_act->set_eth, act_size);
  		*a_len += act_size;
  	}
- 	if (set_ip_ttl_tos.head.len_lw) {
+ 
+ 	if (set_act->set_ip_ttl_tos.head.len_lw) {
  		nfp_action += act_size;
- 		act_size = sizeof(set_ip_ttl_tos);
- 		memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ 		act_size = sizeof(set_act->set_ip_ttl_tos);
+ 		memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
  		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
  				nfp_fl_csum_l4_to_flag(ip_proto);
  	}
- 	if (set_ip_addr.head.len_lw) {
+ 
+ 	if (set_act->set_ip_addr.head.len_lw) {
  		nfp_action += act_size;
- 		act_size = sizeof(set_ip_addr);
- 		memcpy(nfp_action, &set_ip_addr, act_size);
+ 		act_size = sizeof(set_act->set_ip_addr);
+ 		memcpy(nfp_action, &set_act->set_ip_addr, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
  		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
  				nfp_fl_csum_l4_to_flag(ip_proto);
  	}
- 	if (set_ip6_tc_hl_fl.head.len_lw) {
+ 
+ 	if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
  		nfp_action += act_size;
- 		act_size = sizeof(set_ip6_tc_hl_fl);
- 		memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ 		act_size = sizeof(set_act->set_ip6_tc_hl_fl);
+ 		memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
  		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
  	}
- 	if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+ 
+ 	if (set_act->set_ip6_dst.head.len_lw &&
+ 	    set_act->set_ip6_src.head.len_lw) {
  		/* TC compiles set src and dst IPv6 address as a single action,
  		 * the hardware requires this to be 2 separate actions.
  		 */
  		nfp_action += act_size;
- 		act_size = sizeof(set_ip6_src);
- 		memcpy(nfp_action, &set_ip6_src, act_size);
+ 		act_size = sizeof(set_act->set_ip6_src);
+ 		memcpy(nfp_action, &set_act->set_ip6_src, act_size);
  		*a_len += act_size;
  
- 		act_size = sizeof(set_ip6_dst);
- 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
- 		       act_size);
+ 		act_size = sizeof(set_act->set_ip6_dst);
+ 		memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
+ 		       &set_act->set_ip6_dst, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
  		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- 	} else if (set_ip6_dst.head.len_lw) {
+ 	} else if (set_act->set_ip6_dst.head.len_lw) {
  		nfp_action += act_size;
- 		act_size = sizeof(set_ip6_dst);
- 		memcpy(nfp_action, &set_ip6_dst, act_size);
+ 		act_size = sizeof(set_act->set_ip6_dst);
+ 		memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
  		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- 	} else if (set_ip6_src.head.len_lw) {
+ 	} else if (set_act->set_ip6_src.head.len_lw) {
  		nfp_action += act_size;
- 		act_size = sizeof(set_ip6_src);
- 		memcpy(nfp_action, &set_ip6_src, act_size);
+ 		act_size = sizeof(set_act->set_ip6_src);
+ 		memcpy(nfp_action, &set_act->set_ip6_src, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
  		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
  	}
- 	if (set_tport.head.len_lw) {
+ 	if (set_act->set_tport.head.len_lw) {
  		nfp_action += act_size;
- 		act_size = sizeof(set_tport);
- 		memcpy(nfp_action, &set_tport, act_size);
+ 		act_size = sizeof(set_act->set_tport);
+ 		memcpy(nfp_action, &set_act->set_tport, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
@@@ -725,7 -694,40 +693,40 @@@
  }
  
  static int
- nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
+ nfp_fl_pedit(const struct flow_action_entry *act,
+ 	     struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
+ 	     u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
+ {
+ 	enum flow_action_mangle_base htype;
+ 	u32 offset;
+ 
+ 	htype = act->mangle.htype;
+ 	offset = act->mangle.offset;
+ 
+ 	switch (htype) {
+ 	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ 		return nfp_fl_set_eth(act, offset, &set_act->set_eth);
+ 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ 		return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
+ 				      &set_act->set_ip_ttl_tos);
+ 	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ 		return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
+ 				      &set_act->set_ip6_src,
+ 				      &set_act->set_ip6_tc_hl_fl);
+ 	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ 		return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ 					NFP_FL_ACTION_OPCODE_SET_TCP);
+ 	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ 		return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ 					NFP_FL_ACTION_OPCODE_SET_UDP);
+ 	default:
+ 		return -EOPNOTSUPP;
+ 	}
+ }
+ 
+ static int
+ nfp_flower_output_action(struct nfp_app *app,
+ 			 const struct flow_action_entry *act,
  			 struct nfp_fl_payload *nfp_fl, int *a_len,
  			 struct net_device *netdev, bool last,
  			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@@ -775,7 -777,8 +776,8 @@@ nfp_flower_loop_action(struct nfp_app *
  		       struct nfp_fl_payload *nfp_fl, int *a_len,
  		       struct net_device *netdev,
  		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- 		       int *out_cnt, u32 *csum_updated)
+ 		       int *out_cnt, u32 *csum_updated,
+ 		       struct nfp_flower_pedit_acts *set_act)
  {
  	struct nfp_fl_set_ipv4_udp_tun *set_tun;
  	struct nfp_fl_pre_tunnel *pre_tun;
@@@ -860,7 -863,7 +862,7 @@@
  		return 0;
  	case FLOW_ACTION_MANGLE:
  		if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
- 				 a_len, csum_updated))
+ 				 a_len, csum_updated, set_act))
  			return -EOPNOTSUPP;
  		break;
  	case FLOW_ACTION_CSUM:
@@@ -880,12 -883,49 +882,49 @@@
  	return 0;
  }
  
+ static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
+ 				      int current_act_idx)
+ {
+ 	struct flow_action_entry current_act;
+ 	struct flow_action_entry prev_act;
+ 
+ 	current_act = flow_act->entries[current_act_idx];
+ 	if (current_act.id != FLOW_ACTION_MANGLE)
+ 		return false;
+ 
+ 	if (current_act_idx == 0)
+ 		return true;
+ 
+ 	prev_act = flow_act->entries[current_act_idx - 1];
+ 
+ 	return prev_act.id != FLOW_ACTION_MANGLE;
+ }
+ 
+ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
+ 				    int current_act_idx)
+ {
+ 	struct flow_action_entry current_act;
+ 	struct flow_action_entry next_act;
+ 
+ 	current_act = flow_act->entries[current_act_idx];
+ 	if (current_act.id != FLOW_ACTION_MANGLE)
+ 		return false;
+ 
+ 	if (current_act_idx == flow_act->num_entries)
+ 		return true;
+ 
+ 	next_act = flow_act->entries[current_act_idx + 1];
+ 
+ 	return next_act.id != FLOW_ACTION_MANGLE;
+ }
+ 
  int nfp_flower_compile_action(struct nfp_app *app,
  			      struct tc_cls_flower_offload *flow,
  			      struct net_device *netdev,
  			      struct nfp_fl_payload *nfp_flow)
  {
  	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
+ 	struct nfp_flower_pedit_acts set_act;
  	enum nfp_flower_tun_type tun_type;
  	struct flow_action_entry *act;
  	u32 csum_updated = 0;
@@@ -899,12 -939,18 +938,18 @@@
  	out_cnt = 0;
  
  	flow_action_for_each(i, act, &flow->rule->action) {
+ 		if (nfp_fl_check_mangle_start(&flow->rule->action, i))
+ 			memset(&set_act, 0, sizeof(set_act));
  		err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
  					     netdev, &tun_type, &tun_out_cnt,
- 					     &out_cnt, &csum_updated);
+ 					     &out_cnt, &csum_updated, &set_act);
  		if (err)
  			return err;
  		act_cnt++;
+ 		if (nfp_fl_check_mangle_end(&flow->rule->action, i))
+ 			nfp_fl_commit_mangle(flow,
+ 					     &nfp_flow->action_data[act_len],
+ 					     &act_len, &set_act, &csum_updated);
  	}
  
  	/* We optimise when the action list is small, this can unfortunately
diff --combined drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 94d228c04496,bf621674f583..0b44f851b276
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@@ -195,7 -195,7 +195,7 @@@ static netdev_tx_t nfp_repr_xmit(struc
  	ret = dev_queue_xmit(skb);
  	nfp_repr_inc_tx_stats(netdev, len, ret);
  
 -	return ret;
 +	return NETDEV_TX_OK;
  }
  
  static int nfp_repr_stop(struct net_device *netdev)
@@@ -273,7 -273,7 +273,7 @@@ const struct net_device_ops nfp_repr_ne
  	.ndo_set_features	= nfp_port_set_features,
  	.ndo_set_mac_address    = eth_mac_addr,
  	.ndo_get_port_parent_id	= nfp_port_get_port_parent_id,
- 	.ndo_get_devlink	= nfp_devlink_get_devlink,
+ 	.ndo_get_devlink_port	= nfp_devlink_get_devlink_port,
  };
  
  void
@@@ -383,7 -383,7 +383,7 @@@ int nfp_repr_init(struct nfp_app *app, 
  	netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
  	netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
  
 -	netdev->priv_flags |= IFF_NO_QUEUE;
 +	netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
  	netdev->features |= NETIF_F_LLTX;
  
  	if (nfp_app_has_tc(app)) {
diff --combined drivers/net/ethernet/realtek/r8169.c
index 19efa88f3f02,c9ee1c8eb635..8a8a4f70db1e
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@@ -5460,7 -5460,7 +5460,7 @@@ static void rtl_hw_start_8168(struct rt
  	tp->cp_cmd |= PktCntrDisable | INTT_1;
  	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
  
 -	RTL_W16(tp, IntrMitigate, 0x5151);
 +	RTL_W16(tp, IntrMitigate, 0x5100);
  
  	/* Work around for RxFIFO overflow. */
  	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@@ -6267,7 -6267,7 +6267,7 @@@ static netdev_tx_t rtl8169_start_xmit(s
  		 */
  		smp_mb();
  		if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
- 			netif_wake_queue(dev);
+ 			netif_start_queue(dev);
  	}
  
  	return NETDEV_TX_OK;
@@@ -6426,6 -6426,7 +6426,7 @@@ static int rtl_rx(struct net_device *de
  {
  	unsigned int cur_rx, rx_left;
  	unsigned int count;
+ 	LIST_HEAD(rx_list);
  
  	cur_rx = tp->cur_rx;
  
@@@ -6501,7 -6502,7 +6502,7 @@@ process_pkt
  			if (skb->pkt_type == PACKET_MULTICAST)
  				dev->stats.multicast++;
  
- 			napi_gro_receive(&tp->napi, skb);
+ 			list_add_tail(&skb->list, &rx_list);
  
  			u64_stats_update_begin(&tp->rx_stats.syncp);
  			tp->rx_stats.packets++;
@@@ -6516,6 -6517,8 +6517,8 @@@ release_descriptor
  	count = cur_rx - tp->cur_rx;
  	tp->cur_rx = cur_rx;
  
+ 	netif_receive_skb_list(&rx_list);
+ 
  	return count;
  }
  
diff --combined drivers/net/hyperv/netvsc.c
index e0dce373cdd9,9a022539d305..fdbeb7070d42
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@@ -110,7 -110,6 +110,7 @@@ static struct netvsc_device *alloc_net_
  
  	init_waitqueue_head(&net_device->wait_drain);
  	net_device->destroy = false;
 +	net_device->tx_disable = false;
  
  	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
  	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@@ -720,7 -719,7 +720,7 @@@ static void netvsc_send_tx_complete(str
  	} else {
  		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
  
 -		if (netif_tx_queue_stopped(txq) &&
 +		if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
  		    (hv_get_avail_to_write_percent(&channel->outbound) >
  		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
  			netif_tx_wake_queue(txq);
@@@ -875,8 -874,7 +875,8 @@@ static inline int netvsc_send_pkt
  	} else if (ret == -EAGAIN) {
  		netif_tx_stop_queue(txq);
  		ndev_ctx->eth_stats.stop_queue++;
 -		if (atomic_read(&nvchan->queue_sends) < 1) {
 +		if (atomic_read(&nvchan->queue_sends) < 1 &&
 +		    !net_device->tx_disable) {
  			netif_tx_wake_queue(txq);
  			ndev_ctx->eth_stats.wake_queue++;
  			ret = -ENOSPC;
@@@ -966,7 -964,7 +966,7 @@@ int netvsc_send(struct net_device *ndev
  	/* Keep aggregating only if stack says more data is coming
  	 * and not doing mixed modes send and not flow blocked
  	 */
- 	xmit_more = skb->xmit_more &&
+ 	xmit_more = netdev_xmit_more() &&
  		!packet->cp_partial &&
  		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
  
diff --combined drivers/net/hyperv/netvsc_drv.c
index b20fb0fb595b,1a08679f90ce..06393b215102
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@@ -109,15 -109,6 +109,15 @@@ static void netvsc_set_rx_mode(struct n
  	rcu_read_unlock();
  }
  
 +static void netvsc_tx_enable(struct netvsc_device *nvscdev,
 +			     struct net_device *ndev)
 +{
 +	nvscdev->tx_disable = false;
 +	virt_wmb(); /* ensure queue wake up mechanism is on */
 +
 +	netif_tx_wake_all_queues(ndev);
 +}
 +
  static int netvsc_open(struct net_device *net)
  {
  	struct net_device_context *ndev_ctx = netdev_priv(net);
@@@ -138,7 -129,7 +138,7 @@@
  	rdev = nvdev->extension;
  	if (!rdev->link_state) {
  		netif_carrier_on(net);
 -		netif_tx_wake_all_queues(net);
 +		netvsc_tx_enable(nvdev, net);
  	}
  
  	if (vf_netdev) {
@@@ -193,17 -184,6 +193,17 @@@ static int netvsc_wait_until_empty(stru
  	}
  }
  
 +static void netvsc_tx_disable(struct netvsc_device *nvscdev,
 +			      struct net_device *ndev)
 +{
 +	if (nvscdev) {
 +		nvscdev->tx_disable = true;
 +		virt_wmb(); /* ensure txq will not wake up after stop */
 +	}
 +
 +	netif_tx_disable(ndev);
 +}
 +
  static int netvsc_close(struct net_device *net)
  {
  	struct net_device_context *net_device_ctx = netdev_priv(net);
@@@ -212,7 -192,7 +212,7 @@@
  	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
  	int ret;
  
 -	netif_tx_disable(net);
 +	netvsc_tx_disable(nvdev, net);
  
  	/* No need to close rndis filter if it is removed already */
  	if (!nvdev)
@@@ -328,7 -308,7 +328,7 @@@ static inline int netvsc_get_tx_queue(s
   * If a valid queue has already been assigned, then use that.
   * Otherwise compute tx queue based on hash and the send table.
   *
-  * This is basically similar to default (__netdev_pick_tx) with the added step
+  * This is basically similar to default (netdev_pick_tx) with the added step
   * of using the host send_table when no other queue has been assigned.
   *
   * TODO support XPS - but get_xps_queue not exported
@@@ -351,8 -331,7 +351,7 @@@ static u16 netvsc_pick_tx(struct net_de
  }
  
  static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
- 			       struct net_device *sb_dev,
- 			       select_queue_fallback_t fallback)
+ 			       struct net_device *sb_dev)
  {
  	struct net_device_context *ndc = netdev_priv(ndev);
  	struct net_device *vf_netdev;
@@@ -364,10 -343,9 +363,9 @@@
  		const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
  
  		if (vf_ops->ndo_select_queue)
- 			txq = vf_ops->ndo_select_queue(vf_netdev, skb,
- 						       sb_dev, fallback);
+ 			txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
  		else
- 			txq = fallback(vf_netdev, skb, NULL);
+ 			txq = netdev_pick_tx(vf_netdev, skb, NULL);
  
  		/* Record the queue selected by VF so that it can be
  		 * used for common case where VF has more queues than
@@@ -940,7 -918,7 +938,7 @@@ static int netvsc_detach(struct net_dev
  
  	/* If device was up (receiving) then shutdown */
  	if (netif_running(ndev)) {
 -		netif_tx_disable(ndev);
 +		netvsc_tx_disable(nvdev, ndev);
  
  		ret = rndis_filter_close(nvdev);
  		if (ret) {
@@@ -1928,7 -1906,7 +1926,7 @@@ static void netvsc_link_change(struct w
  		if (rdev->link_state) {
  			rdev->link_state = false;
  			netif_carrier_on(net);
 -			netif_tx_wake_all_queues(net);
 +			netvsc_tx_enable(net_device, net);
  		} else {
  			notify = true;
  		}
@@@ -1938,7 -1916,7 +1936,7 @@@
  		if (!rdev->link_state) {
  			rdev->link_state = true;
  			netif_carrier_off(net);
 -			netif_tx_stop_all_queues(net);
 +			netvsc_tx_disable(net_device, net);
  		}
  		kfree(event);
  		break;
@@@ -1947,7 -1925,7 +1945,7 @@@
  		if (!rdev->link_state) {
  			rdev->link_state = true;
  			netif_carrier_off(net);
 -			netif_tx_stop_all_queues(net);
 +			netvsc_tx_disable(net_device, net);
  			event->event = RNDIS_STATUS_MEDIA_CONNECT;
  			spin_lock_irqsave(&ndev_ctx->lock, flags);
  			list_add(&event->list, &ndev_ctx->reconfig_events);
diff --combined include/net/ip.h
index 583526aad1d0,aa09ae5f01a5..2d3cce7c3e8a
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@@ -38,6 -38,10 +38,10 @@@
  #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
  #define IPV4_MIN_MTU		68			/* RFC 791 */
  
+ extern unsigned int sysctl_fib_sync_mem;
+ extern unsigned int sysctl_fib_sync_mem_min;
+ extern unsigned int sysctl_fib_sync_mem_max;
+ 
  struct sock;
  
  struct inet_skb_parm {
@@@ -677,7 -681,7 +681,7 @@@ int ip_options_get_from_user(struct ne
  			     unsigned char __user *data, int optlen);
  void ip_options_undo(struct ip_options *opt);
  void ip_forward_options(struct sk_buff *skb);
 -int ip_options_rcv_srr(struct sk_buff *skb);
 +int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
  
  /*
   *	Functions provided by ip_sockglue.c
diff --combined include/net/sch_generic.h
index a2b38b3deeca,2269383c1399..0aea0e262452
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@@ -113,6 -113,9 +113,9 @@@ struct Qdisc 
  
  	spinlock_t		busylock ____cacheline_aligned_in_smp;
  	spinlock_t		seqlock;
+ 
+ 	/* for NOLOCK qdisc, true if there are no enqueued skbs */
+ 	bool			empty;
  	struct rcu_head		rcu;
  };
  
@@@ -143,11 -146,19 +146,19 @@@ static inline bool qdisc_is_running(str
  	return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
  }
  
+ static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
+ {
+ 	if (qdisc->flags & TCQ_F_NOLOCK)
+ 		return qdisc->empty;
+ 	return !qdisc->q.qlen;
+ }
+ 
  static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  {
  	if (qdisc->flags & TCQ_F_NOLOCK) {
  		if (!spin_trylock(&qdisc->seqlock))
  			return false;
+ 		qdisc->empty = false;
  	} else if (qdisc_is_running(qdisc)) {
  		return false;
  	}
@@@ -923,41 -934,6 +934,41 @@@ static inline void qdisc_qstats_overlim
  	sch->qstats.overlimits++;
  }
  
 +static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
 +{
 +	__u32 qlen = qdisc_qlen_sum(sch);
 +
 +	return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
 +}
 +
 +static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
 +					     __u32 *backlog)
 +{
 +	struct gnet_stats_queue qstats = { 0 };
 +	__u32 len = qdisc_qlen_sum(sch);
 +
 +	__gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
 +	*qlen = qstats.qlen;
 +	*backlog = qstats.backlog;
 +}
 +
 +static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
 +{
 +	__u32 qlen, backlog;
 +
 +	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
 +	qdisc_tree_reduce_backlog(sch, qlen, backlog);
 +}
 +
 +static inline void qdisc_purge_queue(struct Qdisc *sch)
 +{
 +	__u32 qlen, backlog;
 +
 +	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
 +	qdisc_reset(sch);
 +	qdisc_tree_reduce_backlog(sch, qlen, backlog);
 +}
 +
  static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
  {
  	qh->head = NULL;
@@@ -1141,8 -1117,13 +1152,8 @@@ static inline struct Qdisc *qdisc_repla
  	sch_tree_lock(sch);
  	old = *pold;
  	*pold = new;
 -	if (old != NULL) {
 -		unsigned int qlen = old->q.qlen;
 -		unsigned int backlog = old->qstats.backlog;
 -
 -		qdisc_reset(old);
 -		qdisc_tree_reduce_backlog(old, qlen, backlog);
 -	}
 +	if (old != NULL)
 +		qdisc_tree_flush_backlog(old);
  	sch_tree_unlock(sch);
  
  	return old;
diff --combined kernel/bpf/verifier.c
index 6c5a41f7f338,2fe89138309a..b7ad8003c4e6
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@@ -377,7 -377,8 +377,8 @@@ static bool is_release_function(enum bp
  static bool is_acquire_function(enum bpf_func_id func_id)
  {
  	return func_id == BPF_FUNC_sk_lookup_tcp ||
- 		func_id == BPF_FUNC_sk_lookup_udp;
+ 		func_id == BPF_FUNC_sk_lookup_udp ||
+ 		func_id == BPF_FUNC_skc_lookup_tcp;
  }
  
  static bool is_ptr_cast_function(enum bpf_func_id func_id)
@@@ -1897,9 -1898,8 +1898,9 @@@ continue_func
  		}
  		frame++;
  		if (frame >= MAX_CALL_FRAMES) {
 -			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
 -			return -EFAULT;
 +			verbose(env, "the call stack of %d frames is too deep !\n",
 +				frame);
 +			return -E2BIG;
  		}
  		goto process_func;
  	}
@@@ -3157,19 -3157,11 +3158,11 @@@ static int check_helper_call(struct bpf
  	} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
  		mark_reg_known_zero(env, regs, BPF_REG_0);
  		regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
- 		if (is_acquire_function(func_id)) {
- 			int id = acquire_reference_state(env, insn_idx);
- 
- 			if (id < 0)
- 				return id;
- 			/* For mark_ptr_or_null_reg() */
- 			regs[BPF_REG_0].id = id;
- 			/* For release_reference() */
- 			regs[BPF_REG_0].ref_obj_id = id;
- 		} else {
- 			/* For mark_ptr_or_null_reg() */
- 			regs[BPF_REG_0].id = ++env->id_gen;
- 		}
+ 		regs[BPF_REG_0].id = ++env->id_gen;
+ 	} else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
+ 		mark_reg_known_zero(env, regs, BPF_REG_0);
+ 		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
+ 		regs[BPF_REG_0].id = ++env->id_gen;
  	} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
  		mark_reg_known_zero(env, regs, BPF_REG_0);
  		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
@@@ -3180,9 -3172,19 +3173,19 @@@
  		return -EINVAL;
  	}
  
- 	if (is_ptr_cast_function(func_id))
+ 	if (is_ptr_cast_function(func_id)) {
  		/* For release_reference() */
  		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+ 	} else if (is_acquire_function(func_id)) {
+ 		int id = acquire_reference_state(env, insn_idx);
+ 
+ 		if (id < 0)
+ 			return id;
+ 		/* For mark_ptr_or_null_reg() */
+ 		regs[BPF_REG_0].id = id;
+ 		/* For release_reference() */
+ 		regs[BPF_REG_0].ref_obj_id = id;
+ 	}
  
  	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
  
diff --combined net/batman-adv/bat_v_elp.c
index d5df0114f08a,13b9ab860a25..2614a9caee00
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@@ -2,18 -2,6 +2,6 @@@
  /* Copyright (C) 2011-2019  B.A.T.M.A.N. contributors:
   *
   * Linus L├╝ssing, Marek Lindner
-  *
-  * This program is free software; you can redistribute it and/or
-  * modify it under the terms of version 2 of the GNU General Public
-  * License as published by the Free Software Foundation.
-  *
-  * This program is distributed in the hope that it will be useful, but
-  * WITHOUT ANY WARRANTY; without even the implied warranty of
-  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-  * General Public License for more details.
-  *
-  * You should have received a copy of the GNU General Public License
-  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include "bat_v_elp.h"
@@@ -104,10 -92,8 +92,10 @@@ static u32 batadv_v_elp_get_throughput(
  
  		ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
  
 -		/* free the TID stats immediately */
 -		cfg80211_sinfo_release_content(&sinfo);
 +		if (!ret) {
 +			/* free the TID stats immediately */
 +			cfg80211_sinfo_release_content(&sinfo);
 +		}
  
  		dev_put(real_netdev);
  		if (ret == -ENOENT) {
diff --combined net/batman-adv/bridge_loop_avoidance.c
index 4fb01108e5f5,8d6b7c9c2a7e..663a53b6d36e
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@@ -2,18 -2,6 +2,6 @@@
  /* Copyright (C) 2011-2019  B.A.T.M.A.N. contributors:
   *
   * Simon Wunderlich
-  *
-  * This program is free software; you can redistribute it and/or
-  * modify it under the terms of version 2 of the GNU General Public
-  * License as published by the Free Software Foundation.
-  *
-  * This program is distributed in the hope that it will be useful, but
-  * WITHOUT ANY WARRANTY; without even the implied warranty of
-  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-  * General Public License for more details.
-  *
-  * You should have received a copy of the GNU General Public License
-  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include "bridge_loop_avoidance.h"
@@@ -59,7 -47,6 +47,6 @@@
  #include "netlink.h"
  #include "originator.h"
  #include "soft-interface.h"
- #include "sysfs.h"
  #include "translation-table.h"
  
  static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
@@@ -803,8 -790,6 +790,8 @@@ static void batadv_bla_del_claim(struc
  				 const u8 *mac, const unsigned short vid)
  {
  	struct batadv_bla_claim search_claim, *claim;
 +	struct batadv_bla_claim *claim_removed_entry;
 +	struct hlist_node *claim_removed_node;
  
  	ether_addr_copy(search_claim.addr, mac);
  	search_claim.vid = vid;
@@@ -815,18 -800,10 +802,18 @@@
  	batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
  		   mac, batadv_print_vid(vid));
  
 -	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
 -			   batadv_choose_claim, claim);
 -	batadv_claim_put(claim); /* reference from the hash is gone */
 +	claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
 +						batadv_compare_claim,
 +						batadv_choose_claim, claim);
 +	if (!claim_removed_node)
 +		goto free_claim;
  
 +	/* reference from the hash is gone */
 +	claim_removed_entry = hlist_entry(claim_removed_node,
 +					  struct batadv_bla_claim, hash_entry);
 +	batadv_claim_put(claim_removed_entry);
 +
 +free_claim:
  	/* don't need the reference from hash_find() anymore */
  	batadv_claim_put(claim);
  }
diff --combined net/batman-adv/sysfs.c
index 208655cf6717,ad14c8086fe7..80fc3253c336
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@@ -2,23 -2,12 +2,12 @@@
  /* Copyright (C) 2010-2019  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner
-  *
-  * This program is free software; you can redistribute it and/or
-  * modify it under the terms of version 2 of the GNU General Public
-  * License as published by the Free Software Foundation.
-  *
-  * This program is distributed in the hope that it will be useful, but
-  * WITHOUT ANY WARRANTY; without even the implied warranty of
-  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-  * General Public License for more details.
-  *
-  * You should have received a copy of the GNU General Public License
-  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include "sysfs.h"
  #include "main.h"
  
+ #include <asm/current.h>
  #include <linux/atomic.h>
  #include <linux/compiler.h>
  #include <linux/device.h>
@@@ -34,6 -23,7 +23,7 @@@
  #include <linux/rculist.h>
  #include <linux/rcupdate.h>
  #include <linux/rtnetlink.h>
+ #include <linux/sched.h>
  #include <linux/slab.h>
  #include <linux/stddef.h>
  #include <linux/string.h>
@@@ -52,6 -42,16 +42,16 @@@
  #include "network-coding.h"
  #include "soft-interface.h"
  
+ /**
+  * batadv_sysfs_deprecated() - Log use of deprecated batadv sysfs access
+  * @attr: attribute which was accessed
+  */
+ static void batadv_sysfs_deprecated(struct attribute *attr)
+ {
+ 	pr_warn_ratelimited(DEPRECATED "%s (pid %d) Use of sysfs file \"%s\".\nUse batadv genl family instead",
+ 			    current->comm, task_pid_nr(current), attr->name);
+ }
+ 
  static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
  {
  	struct device *dev = container_of(obj->parent, struct device, kobj);
@@@ -114,22 -114,6 +114,6 @@@ batadv_kobj_to_vlan(struct batadv_priv 
  	return vlan;
  }
  
- #define BATADV_UEV_TYPE_VAR	"BATTYPE="
- #define BATADV_UEV_ACTION_VAR	"BATACTION="
- #define BATADV_UEV_DATA_VAR	"BATDATA="
- 
- static char *batadv_uev_action_str[] = {
- 	"add",
- 	"del",
- 	"change",
- 	"loopdetect",
- };
- 
- static char *batadv_uev_type_str[] = {
- 	"gw",
- 	"bla",
- };
- 
  /* Use this, if you have customized show and store functions for vlan attrs */
  #define BATADV_ATTR_VLAN(_name, _mode, _show, _store)	\
  struct batadv_attribute batadv_attr_vlan_##_name = {	\
@@@ -157,6 -141,7 +141,7 @@@ ssize_t batadv_store_##_name(struct kob
  	struct batadv_priv *bat_priv = netdev_priv(net_dev);		\
  	ssize_t length;							\
  									\
+ 	batadv_sysfs_deprecated(attr);					\
  	length = __batadv_store_bool_attr(buff, count, _post_func, attr,\
  					  &bat_priv->_name, net_dev);	\
  									\
@@@ -171,6 -156,7 +156,7 @@@ ssize_t batadv_show_##_name(struct kobj
  {									\
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);	\
  									\
+ 	batadv_sysfs_deprecated(attr);					\
  	return sprintf(buff, "%s\n",					\
  		       atomic_read(&bat_priv->_name) == 0 ?		\
  		       "disabled" : "enabled");				\
@@@ -194,6 -180,7 +180,7 @@@ ssize_t batadv_store_##_name(struct kob
  	struct batadv_priv *bat_priv = netdev_priv(net_dev);		\
  	ssize_t length;							\
  									\
+ 	batadv_sysfs_deprecated(attr);					\
  	length = __batadv_store_uint_attr(buff, count, _min, _max,	\
  					  _post_func, attr,		\
  					  &bat_priv->_var, net_dev,	\
@@@ -210,6 -197,7 +197,7 @@@ ssize_t batadv_show_##_name(struct kobj
  {									\
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);	\
  									\
+ 	batadv_sysfs_deprecated(attr);					\
  	return sprintf(buff, "%i\n", atomic_read(&bat_priv->_var));	\
  }									\
  
@@@ -234,6 -222,7 +222,7 @@@ ssize_t batadv_store_vlan_##_name(struc
  					      attr, &vlan->_name,	\
  					      bat_priv->soft_iface);	\
  									\
+ 	batadv_sysfs_deprecated(attr);					\
  	if (vlan->vid)							\
  		batadv_netlink_notify_vlan(bat_priv, vlan);		\
  	else								\
@@@ -254,6 -243,7 +243,7 @@@ ssize_t batadv_show_vlan_##_name(struc
  			     atomic_read(&vlan->_name) == 0 ?		\
  			     "disabled" : "enabled");			\
  									\
+ 	batadv_sysfs_deprecated(attr);					\
  	batadv_softif_vlan_put(vlan);					\
  	return res;							\
  }
@@@ -275,6 -265,7 +265,7 @@@ ssize_t batadv_store_##_name(struct kob
  	struct batadv_priv *bat_priv;					\
  	ssize_t length;							\
  									\
+ 	batadv_sysfs_deprecated(attr);					\
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);		\
  	if (!hard_iface)						\
  		return 0;						\
@@@ -302,6 -293,7 +293,7 @@@ ssize_t batadv_show_##_name(struct kobj
  	struct batadv_hard_iface *hard_iface;				\
  	ssize_t length;							\
  									\
+ 	batadv_sysfs_deprecated(attr);					\
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);		\
  	if (!hard_iface)						\
  		return 0;						\
@@@ -446,6 -438,7 +438,7 @@@ static ssize_t batadv_show_bat_algo(str
  {
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  
+ 	batadv_sysfs_deprecated(attr);
  	return sprintf(buff, "%s\n", bat_priv->algo_ops->name);
  }
  
@@@ -462,6 -455,8 +455,8 @@@ static ssize_t batadv_show_gw_mode(stru
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  	int bytes_written;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	/* GW mode is not available if the routing algorithm in use does not
  	 * implement the GW API
  	 */
@@@ -496,6 -491,8 +491,8 @@@ static ssize_t batadv_store_gw_mode(str
  	char *curr_gw_mode_str;
  	int gw_mode_tmp = -1;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	/* toggling GW mode is allowed only if the routing algorithm in use
  	 * provides the GW API
  	 */
@@@ -570,6 -567,8 +567,8 @@@ static ssize_t batadv_show_gw_sel_class
  {
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	/* GW selection class is not available if the routing algorithm in use
  	 * does not implement the GW API
  	 */
@@@ -590,6 -589,8 +589,8 @@@ static ssize_t batadv_store_gw_sel_clas
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  	ssize_t length;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	/* setting the GW selection class is allowed only if the routing
  	 * algorithm in use implements the GW API
  	 */
@@@ -620,6 -621,8 +621,8 @@@ static ssize_t batadv_show_gw_bwidth(st
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  	u32 down, up;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	down = atomic_read(&bat_priv->gw.bandwidth_down);
  	up = atomic_read(&bat_priv->gw.bandwidth_up);
  
@@@ -635,6 -638,8 +638,8 @@@ static ssize_t batadv_store_gw_bwidth(s
  	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
  	ssize_t length;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	if (buff[count - 1] == '\n')
  		buff[count - 1] = '\0';
  
@@@ -659,6 -664,7 +664,7 @@@ static ssize_t batadv_show_isolation_ma
  {
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  
+ 	batadv_sysfs_deprecated(attr);
  	return sprintf(buff, "%#.8x/%#.8x\n", bat_priv->isolation_mark,
  		       bat_priv->isolation_mark_mask);
  }
@@@ -682,6 -688,8 +688,8 @@@ static ssize_t batadv_store_isolation_m
  	u32 mark, mask;
  	char *mask_ptr;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	/* parse the mask if it has been specified, otherwise assume the mask is
  	 * the biggest possible
  	 */
@@@ -937,6 -945,8 +945,8 @@@ static ssize_t batadv_show_mesh_iface(s
  	ssize_t length;
  	const char *ifname;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);
  	if (!hard_iface)
  		return 0;
@@@ -1041,6 -1051,8 +1051,8 @@@ static ssize_t batadv_store_mesh_iface(
  	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
  	struct batadv_store_mesh_work *store_work;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	if (buff[count - 1] == '\n')
  		buff[count - 1] = '\0';
  
@@@ -1072,6 -1084,8 +1084,8 @@@ static ssize_t batadv_show_iface_status
  	struct batadv_hard_iface *hard_iface;
  	ssize_t length;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);
  	if (!hard_iface)
  		return 0;
@@@ -1116,13 -1130,15 +1130,15 @@@ static ssize_t batadv_store_throughput_
  						struct attribute *attr,
  						char *buff, size_t count)
  {
 -	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
  	struct batadv_hard_iface *hard_iface;
 +	struct batadv_priv *bat_priv;
  	u32 tp_override;
  	u32 old_tp_override;
  	bool ret;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);
  	if (!hard_iface)
  		return -EINVAL;
@@@ -1147,10 -1163,7 +1163,10 @@@
  
  	atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
  
 -	batadv_netlink_notify_hardif(bat_priv, hard_iface);
 +	if (hard_iface->soft_iface) {
 +		bat_priv = netdev_priv(hard_iface->soft_iface);
 +		batadv_netlink_notify_hardif(bat_priv, hard_iface);
 +	}
  
  out:
  	batadv_hardif_put(hard_iface);
@@@ -1165,6 -1178,8 +1181,8 @@@ static ssize_t batadv_show_throughput_o
  	struct batadv_hard_iface *hard_iface;
  	u32 tp_override;
  
+ 	batadv_sysfs_deprecated(attr);
+ 
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);
  	if (!hard_iface)
  		return -EINVAL;
@@@ -1250,57 -1265,3 +1268,3 @@@ void batadv_sysfs_del_hardif(struct kob
  	kobject_put(*hardif_obj);
  	*hardif_obj = NULL;
  }
- 
- /**
-  * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
-  * @bat_priv: the bat priv with all the soft interface information
-  * @type: subsystem type of event. Stored in uevent's BATTYPE
-  * @action: action type of event. Stored in uevent's BATACTION
-  * @data: string with additional information to the event (ignored for
-  *  BATADV_UEV_DEL). Stored in uevent's BATDATA
-  *
-  * Return: 0 on success or negative error number in case of failure
-  */
- int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
- 			enum batadv_uev_action action, const char *data)
- {
- 	int ret = -ENOMEM;
- 	struct kobject *bat_kobj;
- 	char *uevent_env[4] = { NULL, NULL, NULL, NULL };
- 
- 	bat_kobj = &bat_priv->soft_iface->dev.kobj;
- 
- 	uevent_env[0] = kasprintf(GFP_ATOMIC,
- 				  "%s%s", BATADV_UEV_TYPE_VAR,
- 				  batadv_uev_type_str[type]);
- 	if (!uevent_env[0])
- 		goto out;
- 
- 	uevent_env[1] = kasprintf(GFP_ATOMIC,
- 				  "%s%s", BATADV_UEV_ACTION_VAR,
- 				  batadv_uev_action_str[action]);
- 	if (!uevent_env[1])
- 		goto out;
- 
- 	/* If the event is DEL, ignore the data field */
- 	if (action != BATADV_UEV_DEL) {
- 		uevent_env[2] = kasprintf(GFP_ATOMIC,
- 					  "%s%s", BATADV_UEV_DATA_VAR, data);
- 		if (!uevent_env[2])
- 			goto out;
- 	}
- 
- 	ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
- out:
- 	kfree(uevent_env[0]);
- 	kfree(uevent_env[1]);
- 	kfree(uevent_env[2]);
- 
- 	if (ret)
- 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- 			   "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
- 			   batadv_uev_type_str[type],
- 			   batadv_uev_action_str[action],
- 			   (action == BATADV_UEV_DEL ? "NULL" : data), ret);
- 	return ret;
- }
diff --combined net/batman-adv/translation-table.c
index 26c4e2493ddf,5d8bf8048e4e..1ddfd5e011ee
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@@ -2,18 -2,6 +2,6 @@@
  /* Copyright (C) 2007-2019  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich, Antonio Quartulli
-  *
-  * This program is free software; you can redistribute it and/or
-  * modify it under the terms of version 2 of the GNU General Public
-  * License as published by the Free Software Foundation.
-  *
-  * This program is distributed in the hope that it will be useful, but
-  * WITHOUT ANY WARRANTY; without even the implied warranty of
-  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-  * General Public License for more details.
-  *
-  * You should have received a copy of the GNU General Public License
-  * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include "translation-table.h"
@@@ -205,7 -193,7 +193,7 @@@ batadv_tt_local_hash_find(struct batadv
   * Return: a pointer to the corresponding tt_global_entry struct if the client
   * is found, NULL otherwise.
   */
- static struct batadv_tt_global_entry *
+ struct batadv_tt_global_entry *
  batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
  			   unsigned short vid)
  {
@@@ -300,8 -288,7 +288,7 @@@ static void batadv_tt_global_entry_rele
   *  possibly release it
   * @tt_global_entry: tt_global_entry to be free'd
   */
- static void
- batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
+ void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
  {
  	kref_put(&tt_global_entry->common.refcount,
  		 batadv_tt_global_entry_release);
@@@ -616,26 -603,14 +603,26 @@@ static void batadv_tt_global_free(struc
  				  struct batadv_tt_global_entry *tt_global,
  				  const char *message)
  {
 +	struct batadv_tt_global_entry *tt_removed_entry;
 +	struct hlist_node *tt_removed_node;
 +
  	batadv_dbg(BATADV_DBG_TT, bat_priv,
  		   "Deleting global tt entry %pM (vid: %d): %s\n",
  		   tt_global->common.addr,
  		   batadv_print_vid(tt_global->common.vid), message);
  
 -	batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
 -			   batadv_choose_tt, &tt_global->common);
 -	batadv_tt_global_entry_put(tt_global);
 +	tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
 +					     batadv_compare_tt,
 +					     batadv_choose_tt,
 +					     &tt_global->common);
 +	if (!tt_removed_node)
 +		return;
 +
 +	/* drop reference of remove hash entry */
 +	tt_removed_entry = hlist_entry(tt_removed_node,
 +				       struct batadv_tt_global_entry,
 +				       common.hash_entry);
 +	batadv_tt_global_entry_put(tt_removed_entry);
  }
  
  /**
@@@ -1349,10 -1324,9 +1336,10 @@@ u16 batadv_tt_local_remove(struct batad
  			   unsigned short vid, const char *message,
  			   bool roaming)
  {
 +	struct batadv_tt_local_entry *tt_removed_entry;
  	struct batadv_tt_local_entry *tt_local_entry;
  	u16 flags, curr_flags = BATADV_NO_FLAGS;
 -	void *tt_entry_exists;
 +	struct hlist_node *tt_removed_node;
  
  	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
  	if (!tt_local_entry)
@@@ -1381,18 -1355,15 +1368,18 @@@
  	 */
  	batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
  
 -	tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
 +	tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
  					     batadv_compare_tt,
  					     batadv_choose_tt,
  					     &tt_local_entry->common);
 -	if (!tt_entry_exists)
 +	if (!tt_removed_node)
  		goto out;
  
 -	/* extra call to free the local tt entry */
 -	batadv_tt_local_entry_put(tt_local_entry);
 +	/* drop reference of remove hash entry */
 +	tt_removed_entry = hlist_entry(tt_removed_node,
 +				       struct batadv_tt_local_entry,
 +				       common.hash_entry);
 +	batadv_tt_local_entry_put(tt_removed_entry);
  
  out:
  	if (tt_local_entry)
diff --combined net/core/datagram.c
index e657289db4ac,0dafec5cada0..91bb5a083fee
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@@ -61,6 -61,8 +61,8 @@@
  #include <trace/events/skb.h>
  #include <net/busy_poll.h>
  
+ #include "datagram.h"
+ 
  /*
   *	Is a socket 'connection oriented' ?
   */
@@@ -279,7 -281,7 +281,7 @@@ struct sk_buff *__skb_try_recv_datagram
  			break;
  
  		sk_busy_loop(sk, flags & MSG_DONTWAIT);
 -	} while (!skb_queue_empty(&sk->sk_receive_queue));
 +	} while (sk->sk_receive_queue.prev != *last);
  
  	error = -EAGAIN;
  
@@@ -408,10 -410,10 +410,10 @@@ int skb_kill_datagram(struct sock *sk, 
  }
  EXPORT_SYMBOL(skb_kill_datagram);
  
- int __skb_datagram_iter(const struct sk_buff *skb, int offset,
- 			struct iov_iter *to, int len, bool fault_short,
- 			size_t (*cb)(const void *, size_t, void *, struct iov_iter *),
- 			void *data)
+ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ 			       struct iov_iter *to, int len, bool fault_short,
+ 			       size_t (*cb)(const void *, size_t, void *,
+ 					    struct iov_iter *), void *data)
  {
  	int start = skb_headlen(skb);
  	int i, copy = start - offset, start_off = offset, n;
diff --combined net/core/dev.c
index fdcff29df915,d5b1315218d3..3c8b0a84165a
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -146,6 -146,7 +146,7 @@@
  #include <net/udp_tunnel.h>
  #include <linux/net_namespace.h>
  #include <linux/indirect_call_wrapper.h>
+ #include <net/devlink.h>
  
  #include "net-sysfs.h"
  
@@@ -3468,6 -3469,15 +3469,15 @@@ static inline int __dev_xmit_skb(struc
  		if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  			__qdisc_drop(skb, &to_free);
  			rc = NET_XMIT_DROP;
+ 		} else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+ 			   qdisc_run_begin(q)) {
+ 			qdisc_bstats_cpu_update(q, skb);
+ 
+ 			if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
+ 				__qdisc_run(q);
+ 
+ 			qdisc_run_end(q);
+ 			rc = NET_XMIT_SUCCESS;
  		} else {
  			rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
  			qdisc_run(q);
@@@ -3556,9 -3566,6 +3566,6 @@@ static void skb_update_prio(struct sk_b
  #define skb_update_prio(skb)
  #endif
  
- DEFINE_PER_CPU(int, xmit_recursion);
- EXPORT_SYMBOL(xmit_recursion);
- 
  /**
   *	dev_loopback_xmit - loop back @skb
   *	@net: network namespace this loopback is happening in
@@@ -3689,23 -3696,21 +3696,21 @@@ get_cpus_map
  }
  
  u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
- 		     struct net_device *sb_dev,
- 		     select_queue_fallback_t fallback)
+ 		     struct net_device *sb_dev)
  {
  	return 0;
  }
  EXPORT_SYMBOL(dev_pick_tx_zero);
  
  u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- 		       struct net_device *sb_dev,
- 		       select_queue_fallback_t fallback)
+ 		       struct net_device *sb_dev)
  {
  	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
  }
  EXPORT_SYMBOL(dev_pick_tx_cpu_id);
  
- static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
- 			    struct net_device *sb_dev)
+ u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ 		     struct net_device *sb_dev)
  {
  	struct sock *sk = skb->sk;
  	int queue_index = sk_tx_queue_get(sk);
@@@ -3729,10 -3734,11 +3734,11 @@@
  
  	return queue_index;
  }
+ EXPORT_SYMBOL(netdev_pick_tx);
  
- struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- 				    struct sk_buff *skb,
- 				    struct net_device *sb_dev)
+ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
+ 					 struct sk_buff *skb,
+ 					 struct net_device *sb_dev)
  {
  	int queue_index = 0;
  
@@@ -3747,10 -3753,9 +3753,9 @@@
  		const struct net_device_ops *ops = dev->netdev_ops;
  
  		if (ops->ndo_select_queue)
- 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
- 							    __netdev_pick_tx);
+ 			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
  		else
- 			queue_index = __netdev_pick_tx(dev, skb, sb_dev);
+ 			queue_index = netdev_pick_tx(dev, skb, sb_dev);
  
  		queue_index = netdev_cap_txqueue(dev, queue_index);
  	}
@@@ -3824,7 -3829,7 +3829,7 @@@ static int __dev_queue_xmit(struct sk_b
  	else
  		skb_dst_force(skb);
  
- 	txq = netdev_pick_tx(dev, skb, sb_dev);
+ 	txq = netdev_core_pick_tx(dev, skb, sb_dev);
  	q = rcu_dereference_bh(txq->qdisc);
  
  	trace_net_dev_queue(skb);
@@@ -3849,8 -3854,7 +3854,7 @@@
  		int cpu = smp_processor_id(); /* ok because BHs are off */
  
  		if (txq->xmit_lock_owner != cpu) {
- 			if (unlikely(__this_cpu_read(xmit_recursion) >
- 				     XMIT_RECURSION_LIMIT))
+ 			if (dev_xmit_recursion())
  				goto recursion_alert;
  
  			skb = validate_xmit_skb(skb, dev, &again);
@@@ -3860,9 -3864,9 +3864,9 @@@
  			HARD_TX_LOCK(dev, txq, cpu);
  
  			if (!netif_xmit_stopped(txq)) {
- 				__this_cpu_inc(xmit_recursion);
+ 				dev_xmit_recursion_inc();
  				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
- 				__this_cpu_dec(xmit_recursion);
+ 				dev_xmit_recursion_dec();
  				if (dev_xmit_complete(rc)) {
  					HARD_TX_UNLOCK(dev, txq);
  					goto out;
@@@ -3975,9 -3979,9 +3979,9 @@@ EXPORT_SYMBOL(rps_sock_flow_table)
  u32 rps_cpu_mask __read_mostly;
  EXPORT_SYMBOL(rps_cpu_mask);
  
- struct static_key rps_needed __read_mostly;
+ struct static_key_false rps_needed __read_mostly;
  EXPORT_SYMBOL(rps_needed);
- struct static_key rfs_needed __read_mostly;
+ struct static_key_false rfs_needed __read_mostly;
  EXPORT_SYMBOL(rfs_needed);
  
  static struct rps_dev_flow *
@@@ -4429,7 -4433,7 +4433,7 @@@ void generic_xdp_tx(struct sk_buff *skb
  	bool free_skb = true;
  	int cpu, rc;
  
- 	txq = netdev_pick_tx(dev, skb, NULL);
+ 	txq = netdev_core_pick_tx(dev, skb, NULL);
  	cpu = smp_processor_id();
  	HARD_TX_LOCK(dev, txq, cpu);
  	if (!netif_xmit_stopped(txq)) {
@@@ -4503,7 -4507,7 +4507,7 @@@ static int netif_rx_internal(struct sk_
  	}
  
  #ifdef CONFIG_RPS
- 	if (static_key_false(&rps_needed)) {
+ 	if (static_branch_unlikely(&rps_needed)) {
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu;
  
@@@ -5014,10 -5018,8 +5018,10 @@@ static inline void __netif_receive_skb_
  	if (pt_prev->list_func != NULL)
  		pt_prev->list_func(head, pt_prev, orig_dev);
  	else
 -		list_for_each_entry_safe(skb, next, head, list)
 +		list_for_each_entry_safe(skb, next, head, list) {
 +			skb_list_del_init(skb);
  			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
 +		}
  }
  
  static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
@@@ -5174,7 -5176,7 +5178,7 @@@ static int netif_receive_skb_internal(s
  
  	rcu_read_lock();
  #ifdef CONFIG_RPS
- 	if (static_key_false(&rps_needed)) {
+ 	if (static_branch_unlikely(&rps_needed)) {
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  
@@@ -5222,7 -5224,7 +5226,7 @@@ static void netif_receive_skb_list_inte
  
  	rcu_read_lock();
  #ifdef CONFIG_RPS
- 	if (static_key_false(&rps_needed)) {
+ 	if (static_branch_unlikely(&rps_needed)) {
  		list_for_each_entry_safe(skb, next, head, list) {
  			struct rps_dev_flow voidflow, *rflow = &voidflow;
  			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@@ -7872,10 -7874,14 +7876,14 @@@ int dev_get_phys_port_name(struct net_d
  			   char *name, size_t len)
  {
  	const struct net_device_ops *ops = dev->netdev_ops;
+ 	int err;
  
- 	if (!ops->ndo_get_phys_port_name)
- 		return -EOPNOTSUPP;
- 	return ops->ndo_get_phys_port_name(dev, name, len);
+ 	if (ops->ndo_get_phys_port_name) {
+ 		err = ops->ndo_get_phys_port_name(dev, name, len);
+ 		if (err != -EOPNOTSUPP)
+ 			return err;
+ 	}
+ 	return devlink_compat_phys_port_name_get(dev, name, len);
  }
  EXPORT_SYMBOL(dev_get_phys_port_name);
  
diff --combined net/core/ethtool.c
index 36ed619faf36,387d67eb75ab..4a593853cbf2
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@@ -136,6 -136,7 +136,7 @@@ static const cha
  phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
  	[ETHTOOL_ID_UNSPEC]     = "Unspec",
  	[ETHTOOL_PHY_DOWNSHIFT]	= "phy-downshift",
+ 	[ETHTOOL_PHY_FAST_LINK_DOWN] = "phy-fast-link-down",
  };
  
  static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
@@@ -1797,16 -1798,11 +1798,16 @@@ static int ethtool_get_strings(struct n
  	WARN_ON_ONCE(!ret);
  
  	gstrings.len = ret;
 -	data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
 -	if (gstrings.len && !data)
 -		return -ENOMEM;
  
 -	__ethtool_get_strings(dev, gstrings.string_set, data);
 +	if (gstrings.len) {
 +		data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
 +		if (!data)
 +			return -ENOMEM;
 +
 +		__ethtool_get_strings(dev, gstrings.string_set, data);
 +	} else {
 +		data = NULL;
 +	}
  
  	ret = -EFAULT;
  	if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@@ -1902,15 -1898,11 +1903,15 @@@ static int ethtool_get_stats(struct net
  		return -EFAULT;
  
  	stats.n_stats = n_stats;
 -	data = vzalloc(array_size(n_stats, sizeof(u64)));
 -	if (n_stats && !data)
 -		return -ENOMEM;
  
 -	ops->get_ethtool_stats(dev, &stats, data);
 +	if (n_stats) {
 +		data = vzalloc(array_size(n_stats, sizeof(u64)));
 +		if (!data)
 +			return -ENOMEM;
 +		ops->get_ethtool_stats(dev, &stats, data);
 +	} else {
 +		data = NULL;
 +	}
  
  	ret = -EFAULT;
  	if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@@ -1950,21 -1942,16 +1951,21 @@@ static int ethtool_get_phy_stats(struc
  		return -EFAULT;
  
  	stats.n_stats = n_stats;
 -	data = vzalloc(array_size(n_stats, sizeof(u64)));
 -	if (n_stats && !data)
 -		return -ENOMEM;
  
 -	if (dev->phydev && !ops->get_ethtool_phy_stats) {
 -		ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
 -		if (ret < 0)
 -			return ret;
 +	if (n_stats) {
 +		data = vzalloc(array_size(n_stats, sizeof(u64)));
 +		if (!data)
 +			return -ENOMEM;
 +
 +		if (dev->phydev && !ops->get_ethtool_phy_stats) {
 +			ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
 +			if (ret < 0)
 +				goto out;
 +		} else {
 +			ops->get_ethtool_phy_stats(dev, &stats, data);
 +		}
  	} else {
 -		ops->get_ethtool_phy_stats(dev, &stats, data);
 +		data = NULL;
  	}
  
  	ret = -EFAULT;
@@@ -2446,6 -2433,7 +2447,7 @@@ static int ethtool_phy_tunable_valid(co
  {
  	switch (tuna->id) {
  	case ETHTOOL_PHY_DOWNSHIFT:
+ 	case ETHTOOL_PHY_FAST_LINK_DOWN:
  		if (tuna->len != sizeof(u8) ||
  		    tuna->type_id != ETHTOOL_TUNABLE_U8)
  			return -EINVAL;
diff --combined net/ipv4/tcp_ipv4.c
index 2f8039a26b08,3979939804b7..faa6fa619f59
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@@ -1774,6 -1774,7 +1774,7 @@@ static void tcp_v4_fill_cb(struct sk_bu
  int tcp_v4_rcv(struct sk_buff *skb)
  {
  	struct net *net = dev_net(skb->dev);
+ 	struct sk_buff *skb_to_free;
  	int sdif = inet_sdif(skb);
  	const struct iphdr *iph;
  	const struct tcphdr *th;
@@@ -1905,11 -1906,17 +1906,17 @@@ process
  	tcp_segs_in(tcp_sk(sk), skb);
  	ret = 0;
  	if (!sock_owned_by_user(sk)) {
+ 		skb_to_free = sk->sk_rx_skb_cache;
+ 		sk->sk_rx_skb_cache = NULL;
  		ret = tcp_v4_do_rcv(sk, skb);
- 	} else if (tcp_add_backlog(sk, skb)) {
- 		goto discard_and_relse;
+ 	} else {
+ 		if (tcp_add_backlog(sk, skb))
+ 			goto discard_and_relse;
+ 		skb_to_free = NULL;
  	}
  	bh_unlock_sock(sk);
+ 	if (skb_to_free)
+ 		__kfree_skb(skb_to_free);
  
  put_and_return:
  	if (refcounted)
@@@ -2578,8 -2585,7 +2585,8 @@@ static void __net_exit tcp_sk_exit(stru
  {
  	int cpu;
  
 -	module_put(net->ipv4.tcp_congestion_control->owner);
 +	if (net->ipv4.tcp_congestion_control)
 +		module_put(net->ipv4.tcp_congestion_control->owner);
  
  	for_each_possible_cpu(cpu)
  		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
diff --combined net/openvswitch/flow_netlink.c
index 4bdf5e3ac208,bd019058fc6f..3563acd5f92e
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@@ -91,6 -91,7 +91,7 @@@ static bool actions_may_change_flow(con
  		case OVS_ACTION_ATTR_SET:
  		case OVS_ACTION_ATTR_SET_MASKED:
  		case OVS_ACTION_ATTR_METER:
+ 		case OVS_ACTION_ATTR_CHECK_PKT_LEN:
  		default:
  			return true;
  		}
@@@ -403,6 -404,7 +404,7 @@@ static const struct ovs_len_tbl ovs_tun
  	[OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
  	[OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
  	[OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS]   = { .len = OVS_ATTR_VARIABLE },
+ 	[OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE]   = { .len = 0 },
  };
  
  static const struct ovs_len_tbl
@@@ -666,6 -668,7 +668,7 @@@ static int ip_tun_from_nlattr(const str
  			      bool log)
  {
  	bool ttl = false, ipv4 = false, ipv6 = false;
+ 	bool info_bridge_mode = false;
  	__be16 tun_flags = 0;
  	int opts_type = 0;
  	struct nlattr *a;
@@@ -782,6 -785,10 +785,10 @@@
  			tun_flags |= TUNNEL_ERSPAN_OPT;
  			opts_type = type;
  			break;
+ 		case OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE:
+ 			info_bridge_mode = true;
+ 			ipv4 = true;
+ 			break;
  		default:
  			OVS_NLERR(log, "Unknown IP tunnel attribute %d",
  				  type);
@@@ -812,16 -819,29 +819,29 @@@
  			OVS_NLERR(log, "IP tunnel dst address not specified");
  			return -EINVAL;
  		}
- 		if (ipv4 && !match->key->tun_key.u.ipv4.dst) {
- 			OVS_NLERR(log, "IPv4 tunnel dst address is zero");
- 			return -EINVAL;
+ 		if (ipv4) {
+ 			if (info_bridge_mode) {
+ 				if (match->key->tun_key.u.ipv4.src ||
+ 				    match->key->tun_key.u.ipv4.dst ||
+ 				    match->key->tun_key.tp_src ||
+ 				    match->key->tun_key.tp_dst ||
+ 				    match->key->tun_key.ttl ||
+ 				    match->key->tun_key.tos ||
+ 				    tun_flags & ~TUNNEL_KEY) {
+ 					OVS_NLERR(log, "IPv4 tun info is not correct");
+ 					return -EINVAL;
+ 				}
+ 			} else if (!match->key->tun_key.u.ipv4.dst) {
+ 				OVS_NLERR(log, "IPv4 tunnel dst address is zero");
+ 				return -EINVAL;
+ 			}
  		}
  		if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
  			OVS_NLERR(log, "IPv6 tunnel dst address is zero");
  			return -EINVAL;
  		}
  
- 		if (!ttl) {
+ 		if (!ttl && !info_bridge_mode) {
  			OVS_NLERR(log, "IP tunnel TTL not specified.");
  			return -EINVAL;
  		}
@@@ -850,12 -870,17 +870,17 @@@ static int vxlan_opt_to_nlattr(struct s
  static int __ip_tun_to_nlattr(struct sk_buff *skb,
  			      const struct ip_tunnel_key *output,
  			      const void *tun_opts, int swkey_tun_opts_len,
- 			      unsigned short tun_proto)
+ 			      unsigned short tun_proto, u8 mode)
  {
  	if (output->tun_flags & TUNNEL_KEY &&
  	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
  			 OVS_TUNNEL_KEY_ATTR_PAD))
  		return -EMSGSIZE;
+ 
+ 	if (mode & IP_TUNNEL_INFO_BRIDGE)
+ 		return nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE)
+ 		       ? -EMSGSIZE : 0;
+ 
  	switch (tun_proto) {
  	case AF_INET:
  		if (output->u.ipv4.src &&
@@@ -918,7 -943,7 +943,7 @@@
  static int ip_tun_to_nlattr(struct sk_buff *skb,
  			    const struct ip_tunnel_key *output,
  			    const void *tun_opts, int swkey_tun_opts_len,
- 			    unsigned short tun_proto)
+ 			    unsigned short tun_proto, u8 mode)
  {
  	struct nlattr *nla;
  	int err;
@@@ -928,7 -953,7 +953,7 @@@
  		return -EMSGSIZE;
  
  	err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
- 				 tun_proto);
+ 				 tun_proto, mode);
  	if (err)
  		return err;
  
@@@ -942,7 -967,7 +967,7 @@@ int ovs_nla_put_tunnel_info(struct sk_b
  	return __ip_tun_to_nlattr(skb, &tun_info->key,
  				  ip_tunnel_info_opts(tun_info),
  				  tun_info->options_len,
- 				  ip_tunnel_info_af(tun_info));
+ 				  ip_tunnel_info_af(tun_info), tun_info->mode);
  }
  
  static int encode_vlan_from_nlattrs(struct sw_flow_match *match,
@@@ -1980,7 -2005,7 +2005,7 @@@ static int __ovs_nla_put_key(const stru
  			opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
  
  		if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
- 				     swkey->tun_opts_len, swkey->tun_proto))
+ 				     swkey->tun_opts_len, swkey->tun_proto, 0))
  			goto nla_put_failure;
  	}
  
@@@ -2306,14 -2331,14 +2331,14 @@@ static struct nlattr *reserve_sfa_size(
  
  	struct sw_flow_actions *acts;
  	int new_acts_size;
 -	int req_size = NLA_ALIGN(attr_len);
 +	size_t req_size = NLA_ALIGN(attr_len);
  	int next_offset = offsetof(struct sw_flow_actions, actions) +
  					(*sfa)->actions_len;
  
  	if (req_size <= (ksize(*sfa) - next_offset))
  		goto out;
  
 -	new_acts_size = ksize(*sfa) * 2;
 +	new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
  
  	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
  		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
@@@ -2605,6 -2630,8 +2630,8 @@@ static int validate_and_copy_set_tun(co
  	tun_info->mode = IP_TUNNEL_INFO_TX;
  	if (key.tun_proto == AF_INET6)
  		tun_info->mode |= IP_TUNNEL_INFO_IPV6;
+ 	else if (key.tun_proto == AF_INET && key.tun_key.u.ipv4.dst == 0)
+ 		tun_info->mode |= IP_TUNNEL_INFO_BRIDGE;
  	tun_info->key = key.tun_key;
  
  	/* We need to store the options in the action itself since
@@@ -2838,6 -2865,87 +2865,87 @@@ static int validate_userspace(const str
  	return 0;
  }
  
+ static const struct nla_policy cpl_policy[OVS_CHECK_PKT_LEN_ATTR_MAX + 1] = {
+ 	[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = {.type = NLA_U16 },
+ 	[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = {.type = NLA_NESTED },
+ 	[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL] = {.type = NLA_NESTED },
+ };
+ 
+ static int validate_and_copy_check_pkt_len(struct net *net,
+ 					   const struct nlattr *attr,
+ 					   const struct sw_flow_key *key,
+ 					   struct sw_flow_actions **sfa,
+ 					   __be16 eth_type, __be16 vlan_tci,
+ 					   bool log, bool last)
+ {
+ 	const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
+ 	struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
+ 	struct check_pkt_len_arg arg;
+ 	int nested_acts_start;
+ 	int start, err;
+ 
+ 	err = nla_parse_strict(a, OVS_CHECK_PKT_LEN_ATTR_MAX, nla_data(attr),
+ 			       nla_len(attr), cpl_policy, NULL);
+ 	if (err)
+ 		return err;
+ 
+ 	if (!a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] ||
+ 	    !nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]))
+ 		return -EINVAL;
+ 
+ 	acts_if_lesser_eq = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
+ 	acts_if_greater = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
+ 
+ 	/* Both the nested action should be present. */
+ 	if (!acts_if_greater || !acts_if_lesser_eq)
+ 		return -EINVAL;
+ 
+ 	/* validation done, copy the nested actions. */
+ 	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CHECK_PKT_LEN,
+ 					log);
+ 	if (start < 0)
+ 		return start;
+ 
+ 	arg.pkt_len = nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
+ 	arg.exec_for_lesser_equal =
+ 		last || !actions_may_change_flow(acts_if_lesser_eq);
+ 	arg.exec_for_greater =
+ 		last || !actions_may_change_flow(acts_if_greater);
+ 
+ 	err = ovs_nla_add_action(sfa, OVS_CHECK_PKT_LEN_ATTR_ARG, &arg,
+ 				 sizeof(arg), log);
+ 	if (err)
+ 		return err;
+ 
+ 	nested_acts_start = add_nested_action_start(sfa,
+ 		OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL, log);
+ 	if (nested_acts_start < 0)
+ 		return nested_acts_start;
+ 
+ 	err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
+ 				     eth_type, vlan_tci, log);
+ 
+ 	if (err)
+ 		return err;
+ 
+ 	add_nested_action_end(*sfa, nested_acts_start);
+ 
+ 	nested_acts_start = add_nested_action_start(sfa,
+ 		OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER, log);
+ 	if (nested_acts_start < 0)
+ 		return nested_acts_start;
+ 
+ 	err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
+ 				     eth_type, vlan_tci, log);
+ 
+ 	if (err)
+ 		return err;
+ 
+ 	add_nested_action_end(*sfa, nested_acts_start);
+ 	add_nested_action_end(*sfa, start);
+ 	return 0;
+ }
+ 
  static int copy_action(const struct nlattr *from,
  		       struct sw_flow_actions **sfa, bool log)
  {
@@@ -2884,6 -2992,7 +2992,7 @@@ static int __ovs_nla_copy_actions(struc
  			[OVS_ACTION_ATTR_POP_NSH] = 0,
  			[OVS_ACTION_ATTR_METER] = sizeof(u32),
  			[OVS_ACTION_ATTR_CLONE] = (u32)-1,
+ 			[OVS_ACTION_ATTR_CHECK_PKT_LEN] = (u32)-1,
  		};
  		const struct ovs_action_push_vlan *vlan;
  		int type = nla_type(a);
@@@ -3085,6 -3194,19 +3194,19 @@@
  			break;
  		}
  
+ 		case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
+ 			bool last = nla_is_last(a, rem);
+ 
+ 			err = validate_and_copy_check_pkt_len(net, a, key, sfa,
+ 							      eth_type,
+ 							      vlan_tci, log,
+ 							      last);
+ 			if (err)
+ 				return err;
+ 			skip_copy = true;
+ 			break;
+ 		}
+ 
  		default:
  			OVS_NLERR(log, "Unknown Action type %d", type);
  			return -EINVAL;
@@@ -3183,6 -3305,75 +3305,75 @@@ static int clone_action_to_attr(const s
  	return err;
  }
  
+ static int check_pkt_len_action_to_attr(const struct nlattr *attr,
+ 					struct sk_buff *skb)
+ {
+ 	struct nlattr *start, *ac_start = NULL;
+ 	const struct check_pkt_len_arg *arg;
+ 	const struct nlattr *a, *cpl_arg;
+ 	int err = 0, rem = nla_len(attr);
+ 
+ 	start = nla_nest_start(skb, OVS_ACTION_ATTR_CHECK_PKT_LEN);
+ 	if (!start)
+ 		return -EMSGSIZE;
+ 
+ 	/* The first nested attribute in 'attr' is always
+ 	 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
+ 	 */
+ 	cpl_arg = nla_data(attr);
+ 	arg = nla_data(cpl_arg);
+ 
+ 	if (nla_put_u16(skb, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, arg->pkt_len)) {
+ 		err = -EMSGSIZE;
+ 		goto out;
+ 	}
+ 
+ 	/* Second nested attribute in 'attr' is always
+ 	 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
+ 	 */
+ 	a = nla_next(cpl_arg, &rem);
+ 	ac_start =  nla_nest_start(skb,
+ 		OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
+ 	if (!ac_start) {
+ 		err = -EMSGSIZE;
+ 		goto out;
+ 	}
+ 
+ 	err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
+ 	if (err) {
+ 		nla_nest_cancel(skb, ac_start);
+ 		goto out;
+ 	} else {
+ 		nla_nest_end(skb, ac_start);
+ 	}
+ 
+ 	/* Third nested attribute in 'attr' is always
+ 	 * OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER.
+ 	 */
+ 	a = nla_next(a, &rem);
+ 	ac_start =  nla_nest_start(skb,
+ 				   OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
+ 	if (!ac_start) {
+ 		err = -EMSGSIZE;
+ 		goto out;
+ 	}
+ 
+ 	err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
+ 	if (err) {
+ 		nla_nest_cancel(skb, ac_start);
+ 		goto out;
+ 	} else {
+ 		nla_nest_end(skb, ac_start);
+ 	}
+ 
+ 	nla_nest_end(skb, start);
+ 	return 0;
+ 
+ out:
+ 	nla_nest_cancel(skb, start);
+ 	return err;
+ }
+ 
  static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
  {
  	const struct nlattr *ovs_key = nla_data(a);
@@@ -3202,7 -3393,7 +3393,7 @@@
  		err =  ip_tun_to_nlattr(skb, &tun_info->key,
  					ip_tunnel_info_opts(tun_info),
  					tun_info->options_len,
- 					ip_tunnel_info_af(tun_info));
+ 					ip_tunnel_info_af(tun_info), tun_info->mode);
  		if (err)
  			return err;
  		nla_nest_end(skb, start);
@@@ -3277,6 -3468,12 +3468,12 @@@ int ovs_nla_put_actions(const struct nl
  				return err;
  			break;
  
+ 		case OVS_ACTION_ATTR_CHECK_PKT_LEN:
+ 			err = check_pkt_len_action_to_attr(a, skb);
+ 			if (err)
+ 				return err;
+ 			break;
+ 
  		default:
  			if (nla_put(skb, type, nla_len(a), nla_data(a)))
  				return -EMSGSIZE;
diff --combined net/tls/tls_sw.c
index 20b191227969,4f821edeeae6..4741edf4bb1e
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@@ -42,8 -42,6 +42,6 @@@
  #include <net/strparser.h>
  #include <net/tls.h>
  
- #define MAX_IV_SIZE	TLS_CIPHER_AES_GCM_128_IV_SIZE
- 
  static int __skb_nsg(struct sk_buff *skb, int offset, int len,
                       unsigned int recursion_level)
  {
@@@ -225,7 -223,7 +223,7 @@@ static int tls_do_decryption(struct soc
  		/* Using skb->sk to push sk through to crypto async callback
  		 * handler. This allows propagating errors up to the socket
  		 * if needed. It _must_ be cleared in the async handler
- 		 * before kfree_skb is called. We _know_ skb->sk is NULL
+ 		 * before consume_skb is called. We _know_ skb->sk is NULL
  		 * because it is a clone from strparser.
  		 */
  		skb->sk = sk;
@@@ -479,11 -477,18 +477,18 @@@ static int tls_do_encryption(struct soc
  	struct tls_rec *rec = ctx->open_rec;
  	struct sk_msg *msg_en = &rec->msg_encrypted;
  	struct scatterlist *sge = sk_msg_elem(msg_en, start);
- 	int rc;
+ 	int rc, iv_offset = 0;
+ 
+ 	/* For CCM based ciphers, first byte of IV is a constant */
+ 	if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
+ 		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
+ 		iv_offset = 1;
+ 	}
+ 
+ 	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
+ 	       prot->iv_size + prot->salt_size);
  
- 	memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
- 	xor_iv_with_seq(prot->version, rec->iv_data,
- 			tls_ctx->tx.rec_seq);
+ 	xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
  
  	sge->offset += prot->prepend_size;
  	sge->length -= prot->prepend_size;
@@@ -1344,6 -1349,7 +1349,7 @@@ static int decrypt_internal(struct soc
  	struct scatterlist *sgout = NULL;
  	const int data_len = rxm->full_len - prot->overhead_size +
  			     prot->tail_size;
+ 	int iv_offset = 0;
  
  	if (*zc && (out_iov || out_sg)) {
  		if (out_iov)
@@@ -1386,18 -1392,25 +1392,25 @@@
  	aad = (u8 *)(sgout + n_sgout);
  	iv = aad + prot->aad_size;
  
+ 	/* For CCM based ciphers, first byte of nonce+iv is always '2' */
+ 	if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
+ 		iv[0] = 2;
+ 		iv_offset = 1;
+ 	}
+ 
  	/* Prepare IV */
  	err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
- 			    iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ 			    iv + iv_offset + prot->salt_size,
  			    prot->iv_size);
  	if (err < 0) {
  		kfree(mem);
  		return err;
  	}
  	if (prot->version == TLS_1_3_VERSION)
- 		memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv));
+ 		memcpy(iv + iv_offset, tls_ctx->rx.iv,
+ 		       crypto_aead_ivsize(ctx->aead_recv));
  	else
- 		memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
+ 		memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
  
  	xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
  
@@@ -1484,8 -1497,6 +1497,8 @@@ static int decrypt_skb_update(struct so
  
  				return err;
  			}
 +		} else {
 +			*zc = false;
  		}
  
  		rxm->full_len -= padding_length(ctx, tls_ctx, skb);
@@@ -1524,7 -1535,7 +1537,7 @@@ static bool tls_sw_advance_skb(struct s
  			rxm->full_len -= len;
  			return false;
  		}
- 		kfree_skb(skb);
+ 		consume_skb(skb);
  	}
  
  	/* Finished with message */
@@@ -1633,7 -1644,7 +1646,7 @@@ static int process_rx_list(struct tls_s
  
  		if (!is_peek) {
  			skb_unlink(skb, &ctx->rx_list);
- 			kfree_skb(skb);
+ 			consume_skb(skb);
  		}
  
  		skb = next_skb;
@@@ -2154,14 -2165,15 +2167,15 @@@ int tls_set_sw_offload(struct sock *sk
  	struct tls_crypto_info *crypto_info;
  	struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
  	struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
+ 	struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
  	struct tls_sw_context_tx *sw_ctx_tx = NULL;
  	struct tls_sw_context_rx *sw_ctx_rx = NULL;
  	struct cipher_context *cctx;
  	struct crypto_aead **aead;
  	struct strp_callbacks cb;
- 	u16 nonce_size, tag_size, iv_size, rec_seq_size;
+ 	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
  	struct crypto_tfm *tfm;
- 	char *iv, *rec_seq, *key, *salt;
+ 	char *iv, *rec_seq, *key, *salt, *cipher_name;
  	size_t keysize;
  	int rc = 0;
  
@@@ -2226,6 -2238,8 +2240,8 @@@
  		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
  		key = gcm_128_info->key;
  		salt = gcm_128_info->salt;
+ 		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
+ 		cipher_name = "gcm(aes)";
  		break;
  	}
  	case TLS_CIPHER_AES_GCM_256: {
@@@ -2241,6 -2255,25 +2257,25 @@@
  		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
  		key = gcm_256_info->key;
  		salt = gcm_256_info->salt;
+ 		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
+ 		cipher_name = "gcm(aes)";
+ 		break;
+ 	}
+ 	case TLS_CIPHER_AES_CCM_128: {
+ 		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
+ 		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
+ 		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
+ 		iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
+ 		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
+ 		rec_seq =
+ 		((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
+ 		ccm_128_info =
+ 		(struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
+ 		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
+ 		key = ccm_128_info->key;
+ 		salt = ccm_128_info->salt;
+ 		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
+ 		cipher_name = "ccm(aes)";
  		break;
  	}
  	default:
@@@ -2270,16 -2303,16 +2305,16 @@@
  	prot->overhead_size = prot->prepend_size +
  			      prot->tag_size + prot->tail_size;
  	prot->iv_size = iv_size;
- 	cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
- 			   GFP_KERNEL);
+ 	prot->salt_size = salt_size;
+ 	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
  	if (!cctx->iv) {
  		rc = -ENOMEM;
  		goto free_priv;
  	}
  	/* Note: 128 & 256 bit salt are the same size */
- 	memcpy(cctx->iv, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
- 	memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  	prot->rec_seq_size = rec_seq_size;
+ 	memcpy(cctx->iv, salt, salt_size);
+ 	memcpy(cctx->iv + salt_size, iv, iv_size);
  	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
  	if (!cctx->rec_seq) {
  		rc = -ENOMEM;
@@@ -2287,7 -2320,7 +2322,7 @@@
  	}
  
  	if (!*aead) {
- 		*aead = crypto_alloc_aead("gcm(aes)", 0, 0);
+ 		*aead = crypto_alloc_aead(cipher_name, 0, 0);
  		if (IS_ERR(*aead)) {
  			rc = PTR_ERR(*aead);
  			*aead = NULL;
diff --combined net/wireless/nl80211.c
index 47e30a58566c,33408ba1d7ee..e7ee18ab6cb7
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@@ -13368,7 -13368,6 +13368,6 @@@ static const struct genl_ops nl80211_op
  		.doit = nl80211_get_wiphy,
  		.dumpit = nl80211_dump_wiphy,
  		.done = nl80211_dump_wiphy_done,
- 		.policy = nl80211_policy,
  		/* can be retrieved by unprivileged users */
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13376,7 -13375,6 +13375,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_WIPHY,
  		.doit = nl80211_set_wiphy,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_RTNL,
  	},
@@@ -13384,7 -13382,6 +13382,6 @@@
  		.cmd = NL80211_CMD_GET_INTERFACE,
  		.doit = nl80211_get_interface,
  		.dumpit = nl80211_dump_interface,
- 		.policy = nl80211_policy,
  		/* can be retrieved by unprivileged users */
  		.internal_flags = NL80211_FLAG_NEED_WDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13392,7 -13389,6 +13389,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_INTERFACE,
  		.doit = nl80211_set_interface,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13400,7 -13396,6 +13396,6 @@@
  	{
  		.cmd = NL80211_CMD_NEW_INTERFACE,
  		.doit = nl80211_new_interface,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13408,7 -13403,6 +13403,6 @@@
  	{
  		.cmd = NL80211_CMD_DEL_INTERFACE,
  		.doit = nl80211_del_interface,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13416,7 -13410,6 +13410,6 @@@
  	{
  		.cmd = NL80211_CMD_GET_KEY,
  		.doit = nl80211_get_key,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13424,7 -13417,6 +13417,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_KEY,
  		.doit = nl80211_set_key,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL |
@@@ -13433,7 -13425,6 +13425,6 @@@
  	{
  		.cmd = NL80211_CMD_NEW_KEY,
  		.doit = nl80211_new_key,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL |
@@@ -13442,14 -13433,12 +13433,12 @@@
  	{
  		.cmd = NL80211_CMD_DEL_KEY,
  		.doit = nl80211_del_key,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
  	},
  	{
  		.cmd = NL80211_CMD_SET_BEACON,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.doit = nl80211_set_beacon,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
@@@ -13457,7 -13446,6 +13446,6 @@@
  	},
  	{
  		.cmd = NL80211_CMD_START_AP,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.doit = nl80211_start_ap,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
@@@ -13465,7 -13453,6 +13453,6 @@@
  	},
  	{
  		.cmd = NL80211_CMD_STOP_AP,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.doit = nl80211_stop_ap,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
@@@ -13475,14 -13462,12 +13462,12 @@@
  		.cmd = NL80211_CMD_GET_STATION,
  		.doit = nl80211_get_station,
  		.dumpit = nl80211_dump_station,
- 		.policy = nl80211_policy,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
  	},
  	{
  		.cmd = NL80211_CMD_SET_STATION,
  		.doit = nl80211_set_station,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13490,7 -13475,6 +13475,6 @@@
  	{
  		.cmd = NL80211_CMD_NEW_STATION,
  		.doit = nl80211_new_station,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13498,7 -13482,6 +13482,6 @@@
  	{
  		.cmd = NL80211_CMD_DEL_STATION,
  		.doit = nl80211_del_station,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13507,7 -13490,6 +13490,6 @@@
  		.cmd = NL80211_CMD_GET_MPATH,
  		.doit = nl80211_get_mpath,
  		.dumpit = nl80211_dump_mpath,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13516,7 -13498,6 +13498,6 @@@
  		.cmd = NL80211_CMD_GET_MPP,
  		.doit = nl80211_get_mpp,
  		.dumpit = nl80211_dump_mpp,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13524,7 -13505,6 +13505,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_MPATH,
  		.doit = nl80211_set_mpath,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13532,7 -13512,6 +13512,6 @@@
  	{
  		.cmd = NL80211_CMD_NEW_MPATH,
  		.doit = nl80211_new_mpath,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13540,7 -13519,6 +13519,6 @@@
  	{
  		.cmd = NL80211_CMD_DEL_MPATH,
  		.doit = nl80211_del_mpath,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13548,7 -13526,6 +13526,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_BSS,
  		.doit = nl80211_set_bss,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13557,7 -13534,6 +13534,6 @@@
  		.cmd = NL80211_CMD_GET_REG,
  		.doit = nl80211_get_reg_do,
  		.dumpit = nl80211_get_reg_dump,
- 		.policy = nl80211_policy,
  		.internal_flags = NL80211_FLAG_NEED_RTNL,
  		/* can be retrieved by unprivileged users */
  	},
@@@ -13565,7 -13541,6 +13541,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_REG,
  		.doit = nl80211_set_reg,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_RTNL,
  	},
@@@ -13573,19 -13548,16 +13548,16 @@@
  	{
  		.cmd = NL80211_CMD_REQ_SET_REG,
  		.doit = nl80211_req_set_reg,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  	},
  	{
  		.cmd = NL80211_CMD_RELOAD_REGDB,
  		.doit = nl80211_reload_regdb,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  	},
  	{
  		.cmd = NL80211_CMD_GET_MESH_CONFIG,
  		.doit = nl80211_get_mesh_config,
- 		.policy = nl80211_policy,
  		/* can be retrieved by unprivileged users */
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13593,7 -13565,6 +13565,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_MESH_CONFIG,
  		.doit = nl80211_update_mesh_config,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13601,7 -13572,6 +13572,6 @@@
  	{
  		.cmd = NL80211_CMD_TRIGGER_SCAN,
  		.doit = nl80211_trigger_scan,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13609,20 -13579,17 +13579,17 @@@
  	{
  		.cmd = NL80211_CMD_ABORT_SCAN,
  		.doit = nl80211_abort_scan,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
  	},
  	{
  		.cmd = NL80211_CMD_GET_SCAN,
- 		.policy = nl80211_policy,
  		.dumpit = nl80211_dump_scan,
  	},
  	{
  		.cmd = NL80211_CMD_START_SCHED_SCAN,
  		.doit = nl80211_start_sched_scan,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13630,7 -13597,6 +13597,6 @@@
  	{
  		.cmd = NL80211_CMD_STOP_SCHED_SCAN,
  		.doit = nl80211_stop_sched_scan,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13638,7 -13604,6 +13604,6 @@@
  	{
  		.cmd = NL80211_CMD_AUTHENTICATE,
  		.doit = nl80211_authenticate,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL |
@@@ -13647,16 -13612,13 +13612,14 @@@
  	{
  		.cmd = NL80211_CMD_ASSOCIATE,
  		.doit = nl80211_associate,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 -				  NL80211_FLAG_NEED_RTNL,
 +				  NL80211_FLAG_NEED_RTNL |
 +				  NL80211_FLAG_CLEAR_SKB,
  	},
  	{
  		.cmd = NL80211_CMD_DEAUTHENTICATE,
  		.doit = nl80211_deauthenticate,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13664,7 -13626,6 +13627,6 @@@
  	{
  		.cmd = NL80211_CMD_DISASSOCIATE,
  		.doit = nl80211_disassociate,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13672,7 -13633,6 +13634,6 @@@
  	{
  		.cmd = NL80211_CMD_JOIN_IBSS,
  		.doit = nl80211_join_ibss,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13680,7 -13640,6 +13641,6 @@@
  	{
  		.cmd = NL80211_CMD_LEAVE_IBSS,
  		.doit = nl80211_leave_ibss,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13690,7 -13649,6 +13650,6 @@@
  		.cmd = NL80211_CMD_TESTMODE,
  		.doit = nl80211_testmode_do,
  		.dumpit = nl80211_testmode_dump,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13699,25 -13657,20 +13658,22 @@@
  	{
  		.cmd = NL80211_CMD_CONNECT,
  		.doit = nl80211_connect,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 -				  NL80211_FLAG_NEED_RTNL,
 +				  NL80211_FLAG_NEED_RTNL |
 +				  NL80211_FLAG_CLEAR_SKB,
  	},
  	{
  		.cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
  		.doit = nl80211_update_connect_params,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 -				  NL80211_FLAG_NEED_RTNL,
 +				  NL80211_FLAG_NEED_RTNL |
 +				  NL80211_FLAG_CLEAR_SKB,
  	},
  	{
  		.cmd = NL80211_CMD_DISCONNECT,
  		.doit = nl80211_disconnect,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13725,29 -13678,24 +13681,25 @@@
  	{
  		.cmd = NL80211_CMD_SET_WIPHY_NETNS,
  		.doit = nl80211_wiphy_netns,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
  	},
  	{
  		.cmd = NL80211_CMD_GET_SURVEY,
- 		.policy = nl80211_policy,
  		.dumpit = nl80211_dump_survey,
  	},
  	{
  		.cmd = NL80211_CMD_SET_PMKSA,
  		.doit = nl80211_setdel_pmksa,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 -				  NL80211_FLAG_NEED_RTNL,
 +				  NL80211_FLAG_NEED_RTNL |
 +				  NL80211_FLAG_CLEAR_SKB,
  	},
  	{
  		.cmd = NL80211_CMD_DEL_PMKSA,
  		.doit = nl80211_setdel_pmksa,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13755,7 -13703,6 +13707,6 @@@
  	{
  		.cmd = NL80211_CMD_FLUSH_PMKSA,
  		.doit = nl80211_flush_pmksa,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13763,7 -13710,6 +13714,6 @@@
  	{
  		.cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
  		.doit = nl80211_remain_on_channel,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13771,7 -13717,6 +13721,6 @@@
  	{
  		.cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
  		.doit = nl80211_cancel_remain_on_channel,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13779,7 -13724,6 +13728,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
  		.doit = nl80211_set_tx_bitrate_mask,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13787,7 -13731,6 +13735,6 @@@
  	{
  		.cmd = NL80211_CMD_REGISTER_FRAME,
  		.doit = nl80211_register_mgmt,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13795,7 -13738,6 +13742,6 @@@
  	{
  		.cmd = NL80211_CMD_FRAME,
  		.doit = nl80211_tx_mgmt,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13803,7 -13745,6 +13749,6 @@@
  	{
  		.cmd = NL80211_CMD_FRAME_WAIT_CANCEL,
  		.doit = nl80211_tx_mgmt_cancel_wait,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13811,7 -13752,6 +13756,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_POWER_SAVE,
  		.doit = nl80211_set_power_save,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13819,7 -13759,6 +13763,6 @@@
  	{
  		.cmd = NL80211_CMD_GET_POWER_SAVE,
  		.doit = nl80211_get_power_save,
- 		.policy = nl80211_policy,
  		/* can be retrieved by unprivileged users */
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13827,7 -13766,6 +13770,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_CQM,
  		.doit = nl80211_set_cqm,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13835,7 -13773,6 +13777,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_CHANNEL,
  		.doit = nl80211_set_channel,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13843,7 -13780,6 +13784,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_WDS_PEER,
  		.doit = nl80211_set_wds_peer,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13851,7 -13787,6 +13791,6 @@@
  	{
  		.cmd = NL80211_CMD_JOIN_MESH,
  		.doit = nl80211_join_mesh,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13859,7 -13794,6 +13798,6 @@@
  	{
  		.cmd = NL80211_CMD_LEAVE_MESH,
  		.doit = nl80211_leave_mesh,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13867,7 -13801,6 +13805,6 @@@
  	{
  		.cmd = NL80211_CMD_JOIN_OCB,
  		.doit = nl80211_join_ocb,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13875,7 -13808,6 +13812,6 @@@
  	{
  		.cmd = NL80211_CMD_LEAVE_OCB,
  		.doit = nl80211_leave_ocb,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13884,7 -13816,6 +13820,6 @@@
  	{
  		.cmd = NL80211_CMD_GET_WOWLAN,
  		.doit = nl80211_get_wowlan,
- 		.policy = nl80211_policy,
  		/* can be retrieved by unprivileged users */
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13892,7 -13823,6 +13827,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_WOWLAN,
  		.doit = nl80211_set_wowlan,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13901,7 -13831,6 +13835,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_REKEY_OFFLOAD,
  		.doit = nl80211_set_rekey_data,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL |
@@@ -13910,7 -13839,6 +13843,6 @@@
  	{
  		.cmd = NL80211_CMD_TDLS_MGMT,
  		.doit = nl80211_tdls_mgmt,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13918,7 -13846,6 +13850,6 @@@
  	{
  		.cmd = NL80211_CMD_TDLS_OPER,
  		.doit = nl80211_tdls_oper,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13926,7 -13853,6 +13857,6 @@@
  	{
  		.cmd = NL80211_CMD_UNEXPECTED_FRAME,
  		.doit = nl80211_register_unexpected_frame,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13934,7 -13860,6 +13864,6 @@@
  	{
  		.cmd = NL80211_CMD_PROBE_CLIENT,
  		.doit = nl80211_probe_client,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13942,7 -13867,6 +13871,6 @@@
  	{
  		.cmd = NL80211_CMD_REGISTER_BEACONS,
  		.doit = nl80211_register_beacons,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13950,7 -13874,6 +13878,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_NOACK_MAP,
  		.doit = nl80211_set_noack_map,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13958,7 -13881,6 +13885,6 @@@
  	{
  		.cmd = NL80211_CMD_START_P2P_DEVICE,
  		.doit = nl80211_start_p2p_device,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13966,7 -13888,6 +13892,6 @@@
  	{
  		.cmd = NL80211_CMD_STOP_P2P_DEVICE,
  		.doit = nl80211_stop_p2p_device,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13974,7 -13895,6 +13899,6 @@@
  	{
  		.cmd = NL80211_CMD_START_NAN,
  		.doit = nl80211_start_nan,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13982,7 -13902,6 +13906,6 @@@
  	{
  		.cmd = NL80211_CMD_STOP_NAN,
  		.doit = nl80211_stop_nan,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13990,7 -13909,6 +13913,6 @@@
  	{
  		.cmd = NL80211_CMD_ADD_NAN_FUNCTION,
  		.doit = nl80211_nan_add_func,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -13998,7 -13916,6 +13920,6 @@@
  	{
  		.cmd = NL80211_CMD_DEL_NAN_FUNCTION,
  		.doit = nl80211_nan_del_func,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14006,7 -13923,6 +13927,6 @@@
  	{
  		.cmd = NL80211_CMD_CHANGE_NAN_CONFIG,
  		.doit = nl80211_nan_change_config,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14014,7 -13930,6 +13934,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_MCAST_RATE,
  		.doit = nl80211_set_mcast_rate,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14022,7 -13937,6 +13941,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_MAC_ACL,
  		.doit = nl80211_set_mac_acl,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14030,7 -13944,6 +13948,6 @@@
  	{
  		.cmd = NL80211_CMD_RADAR_DETECT,
  		.doit = nl80211_start_radar_detection,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14038,12 -13951,10 +13955,10 @@@
  	{
  		.cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
  		.doit = nl80211_get_protocol_features,
- 		.policy = nl80211_policy,
  	},
  	{
  		.cmd = NL80211_CMD_UPDATE_FT_IES,
  		.doit = nl80211_update_ft_ies,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14051,7 -13962,6 +13966,6 @@@
  	{
  		.cmd = NL80211_CMD_CRIT_PROTOCOL_START,
  		.doit = nl80211_crit_protocol_start,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14059,7 -13969,6 +13973,6 @@@
  	{
  		.cmd = NL80211_CMD_CRIT_PROTOCOL_STOP,
  		.doit = nl80211_crit_protocol_stop,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14067,14 -13976,12 +13980,12 @@@
  	{
  		.cmd = NL80211_CMD_GET_COALESCE,
  		.doit = nl80211_get_coalesce,
- 		.policy = nl80211_policy,
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
  	},
  	{
  		.cmd = NL80211_CMD_SET_COALESCE,
  		.doit = nl80211_set_coalesce,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14082,7 -13989,6 +13993,6 @@@
  	{
  		.cmd = NL80211_CMD_CHANNEL_SWITCH,
  		.doit = nl80211_channel_switch,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14091,16 -13997,13 +14001,14 @@@
  		.cmd = NL80211_CMD_VENDOR,
  		.doit = nl80211_vendor_cmd,
  		.dumpit = nl80211_vendor_cmd_dump,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WIPHY |
 -				  NL80211_FLAG_NEED_RTNL,
 +				  NL80211_FLAG_NEED_RTNL |
 +				  NL80211_FLAG_CLEAR_SKB,
  	},
  	{
  		.cmd = NL80211_CMD_SET_QOS_MAP,
  		.doit = nl80211_set_qos_map,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14108,7 -14011,6 +14016,6 @@@
  	{
  		.cmd = NL80211_CMD_ADD_TX_TS,
  		.doit = nl80211_add_tx_ts,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14116,7 -14018,6 +14023,6 @@@
  	{
  		.cmd = NL80211_CMD_DEL_TX_TS,
  		.doit = nl80211_del_tx_ts,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14124,7 -14025,6 +14030,6 @@@
  	{
  		.cmd = NL80211_CMD_TDLS_CHANNEL_SWITCH,
  		.doit = nl80211_tdls_channel_switch,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14132,7 -14032,6 +14037,6 @@@
  	{
  		.cmd = NL80211_CMD_TDLS_CANCEL_CHANNEL_SWITCH,
  		.doit = nl80211_tdls_cancel_channel_switch,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14140,7 -14039,6 +14044,6 @@@
  	{
  		.cmd = NL80211_CMD_SET_MULTICAST_TO_UNICAST,
  		.doit = nl80211_set_multicast_to_unicast,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14148,22 -14046,18 +14051,19 @@@
  	{
  		.cmd = NL80211_CMD_SET_PMK,
  		.doit = nl80211_set_pmk,
- 		.policy = nl80211_policy,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
 -				  NL80211_FLAG_NEED_RTNL,
 +				  NL80211_FLAG_NEED_RTNL |
 +				  NL80211_FLAG_CLEAR_SKB,
  	},
  	{
  		.cmd = NL80211_CMD_DEL_PMK,
  		.doit = nl80211_del_pmk,
- 		.policy = nl80211_policy,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
  	},
  	{
  		.cmd = NL80211_CMD_EXTERNAL_AUTH,
  		.doit = nl80211_external_auth,
- 		.policy = nl80211_policy,
  		.flags = GENL_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14171,7 -14065,6 +14071,6 @@@
  	{
  		.cmd = NL80211_CMD_CONTROL_PORT_FRAME,
  		.doit = nl80211_tx_control_port,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14179,14 -14072,12 +14078,12 @@@
  	{
  		.cmd = NL80211_CMD_GET_FTM_RESPONDER_STATS,
  		.doit = nl80211_get_ftm_responder_stats,
- 		.policy = nl80211_policy,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV |
  				  NL80211_FLAG_NEED_RTNL,
  	},
  	{
  		.cmd = NL80211_CMD_PEER_MEASUREMENT_START,
  		.doit = nl80211_pmsr_start,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14194,7 -14085,6 +14091,6 @@@
  	{
  		.cmd = NL80211_CMD_NOTIFY_RADAR,
  		.doit = nl80211_notify_radar_detection,
- 		.policy = nl80211_policy,
  		.flags = GENL_UNS_ADMIN_PERM,
  		.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
  				  NL80211_FLAG_NEED_RTNL,
@@@ -14206,6 -14096,7 +14102,7 @@@ static struct genl_family nl80211_fam _
  	.hdrsize = 0,			/* no private header */
  	.version = 1,			/* no particular meaning now */
  	.maxattr = NL80211_ATTR_MAX,
+ 	.policy = nl80211_policy,
  	.netnsok = true,
  	.pre_doit = nl80211_pre_doit,
  	.post_doit = nl80211_post_doit,

-- 
LinuxNextTracking


More information about the linux-merge mailing list