[linux-next] LinuxNextTracking branch, master, updated. next-20190408

batman at open-mesh.org batman at open-mesh.org
Tue Apr 9 00:18:26 CEST 2019


The following commit has been merged in the master branch:
commit f83f7151950dd9e0f6b4a1a405bf5e55c5294e4d
Merge: 8f4043f1253292495dbf9c8be0c1b07b4b9902b7 7f46774c6480174eb869a3c15167eafac467a6af
Author: David S. Miller <davem at davemloft.net>
Date:   Fri Apr 5 14:14:19 2019 -0700

    Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
    
    Minor comment merge conflict in mlx5.
    
    Staging driver has a fixup due to the skb->xmit_more changes
    in 'net-next', but was removed in 'net'.
    
    Signed-off-by: David S. Miller <davem at davemloft.net>

diff --combined MAINTAINERS
index c1e2f4070aa5,6771bd784f5f..cdb088103b2e
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -2356,7 -2356,7 +2356,7 @@@ F:	arch/arm/mm/cache-uniphier.
  F:	arch/arm64/boot/dts/socionext/uniphier*
  F:	drivers/bus/uniphier-system-bus.c
  F:	drivers/clk/uniphier/
- F:	drivers/dmaengine/uniphier-mdmac.c
+ F:	drivers/dma/uniphier-mdmac.c
  F:	drivers/gpio/gpio-uniphier.c
  F:	drivers/i2c/busses/i2c-uniphier*
  F:	drivers/irqchip/irq-uniphier-aidet.c
@@@ -2793,13 -2793,10 +2793,13 @@@ M:	Simon Wunderlich <sw at simonwunderlich
  M:	Antonio Quartulli <a at unstable.cc>
  L:	b.a.t.m.a.n at lists.open-mesh.org (moderated for non-subscribers)
  W:	https://www.open-mesh.org/
 +B:	https://www.open-mesh.org/projects/batman-adv/issues
 +C:	irc://chat.freenode.net/batman
  Q:	https://patchwork.open-mesh.org/project/batman/list/
 +T:	git https://git.open-mesh.org/linux-merge.git
  S:	Maintained
 -F:	Documentation/ABI/testing/sysfs-class-net-batman-adv
 -F:	Documentation/ABI/testing/sysfs-class-net-mesh
 +F:	Documentation/ABI/obsolete/sysfs-class-net-batman-adv
 +F:	Documentation/ABI/obsolete/sysfs-class-net-mesh
  F:	Documentation/networking/batman-adv.rst
  F:	include/uapi/linux/batadv_packet.h
  F:	include/uapi/linux/batman_adv.h
@@@ -4132,7 -4129,7 +4132,7 @@@ F:	drivers/cpuidle/
  F:	include/linux/cpuidle.h
  
  CRAMFS FILESYSTEM
- M:	Nicolas Pitre <nico at linaro.org>
+ M:	Nicolas Pitre <nico at fluxnic.net>
  S:	Maintained
  F:	Documentation/filesystems/cramfs.txt
  F:	fs/cramfs/
@@@ -5836,7 -5833,7 +5836,7 @@@ L:	netdev at vger.kernel.or
  S:	Maintained
  F:	Documentation/ABI/testing/sysfs-bus-mdio
  F:	Documentation/devicetree/bindings/net/mdio*
- F:	Documentation/networking/phy.txt
+ F:	Documentation/networking/phy.rst
  F:	drivers/net/phy/
  F:	drivers/of/of_mdio.c
  F:	drivers/of/of_net.c
@@@ -6411,7 -6408,6 +6411,6 @@@ L:	linux-kernel at vger.kernel.or
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
  S:	Maintained
  F:	kernel/futex.c
- F:	kernel/futex_compat.c
  F:	include/asm-generic/futex.h
  F:	include/linux/futex.h
  F:	include/uapi/linux/futex.h
@@@ -9879,6 -9875,15 +9878,6 @@@ F:	drivers/net/ethernet/mellanox/mlx5/c
  F:	drivers/net/ethernet/mellanox/mlx5/core/fpga/*
  F:	include/linux/mlx5/mlx5_ifc_fpga.h
  
 -MELLANOX ETHERNET INNOVA IPSEC DRIVER
 -R:	Boris Pismenny <borisp at mellanox.com>
 -L:	netdev at vger.kernel.org
 -S:	Supported
 -W:	http://www.mellanox.com
 -Q:	http://patchwork.ozlabs.org/project/netdev/list/
 -F:	drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
 -F:	drivers/net/ethernet/mellanox/mlx5/core/ipsec*
 -
  MELLANOX ETHERNET SWITCH DRIVERS
  M:	Jiri Pirko <jiri at mellanox.com>
  M:	Ido Schimmel <idosch at mellanox.com>
@@@ -13976,7 -13981,7 +13975,7 @@@ F:	drivers/media/rc/serial_ir.
  SFC NETWORK DRIVER
  M:	Solarflare linux maintainers <linux-net-drivers at solarflare.com>
  M:	Edward Cree <ecree at solarflare.com>
- M:	Bert Kenward <bkenward at solarflare.com>
+ M:	Martin Habets <mhabets at solarflare.com>
  L:	netdev at vger.kernel.org
  S:	Supported
  F:	drivers/net/ethernet/sfc/
diff --combined drivers/net/ethernet/hisilicon/hns/hns_enet.c
index e37a0ca0db89,4cd86ba1f050..297b95c1b3c1
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@@ -29,9 -29,6 +29,6 @@@
  
  #define SERVICE_TIMER_HZ (1 * HZ)
  
- #define NIC_TX_CLEAN_MAX_NUM 256
- #define NIC_RX_CLEAN_MAX_NUM 64
- 
  #define RCB_IRQ_NOT_INITED 0
  #define RCB_IRQ_INITED 1
  #define HNS_BUFFER_SIZE_2048 2048
@@@ -376,8 -373,6 +373,6 @@@ netdev_tx_t hns_nic_net_xmit_hw(struct 
  	wmb(); /* commit all data before submit */
  	assert(skb->queue_mapping < priv->ae_handle->q_num);
  	hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
- 	ring->stats.tx_pkts++;
- 	ring->stats.tx_bytes += skb->len;
  
  	return NETDEV_TX_OK;
  
@@@ -999,6 -994,9 +994,9 @@@ static int hns_nic_tx_poll_one(struct h
  		/* issue prefetch for next Tx descriptor */
  		prefetch(&ring->desc_cb[ring->next_to_clean]);
  	}
+ 	/* update tx ring statistics. */
+ 	ring->stats.tx_pkts += pkts;
+ 	ring->stats.tx_bytes += bytes;
  
  	NETIF_TX_UNLOCK(ring);
  
@@@ -1964,7 -1962,8 +1962,7 @@@ static void hns_nic_get_stats64(struct 
  
  static u16
  hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
 -		     struct net_device *sb_dev,
 -		     select_queue_fallback_t fallback)
 +		     struct net_device *sb_dev)
  {
  	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
  	struct hns_nic_priv *priv = netdev_priv(ndev);
@@@ -1974,7 -1973,7 +1972,7 @@@
  	    is_multicast_ether_addr(eth_hdr->h_dest))
  		return 0;
  	else
 -		return fallback(ndev, skb, NULL);
 +		return netdev_pick_tx(ndev, skb, NULL);
  }
  
  static const struct net_device_ops hns_nic_netdev_ops = {
@@@ -2151,7 -2150,7 +2149,7 @@@ static int hns_nic_init_ring_data(struc
  			hns_nic_tx_fini_pro_v2;
  
  		netif_napi_add(priv->netdev, &rd->napi,
- 			       hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ 			       hns_nic_common_poll, NAPI_POLL_WEIGHT);
  		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
  	}
  	for (i = h->q_num; i < h->q_num * 2; i++) {
@@@ -2164,7 -2163,7 +2162,7 @@@
  			hns_nic_rx_fini_pro_v2;
  
  		netif_napi_add(priv->netdev, &rd->napi,
- 			       hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ 			       hns_nic_common_poll, NAPI_POLL_WEIGHT);
  		rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
  	}
  
diff --combined drivers/net/ethernet/ibm/ibmvnic.c
index 20c4e0835ba8,51cfe95f3e24..1de691e76b86
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@@ -120,7 -120,6 +120,7 @@@ static int ibmvnic_reset_init(struct ib
  static void release_crq_queue(struct ibmvnic_adapter *);
  static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
  static int init_crq_queue(struct ibmvnic_adapter *adapter);
 +static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
  
  struct ibmvnic_stat {
  	char name[ETH_GSTRING_LEN];
@@@ -1886,6 -1885,7 +1886,7 @@@ static int do_hard_reset(struct ibmvnic
  	 */
  	adapter->state = VNIC_PROBED;
  
+ 	reinit_completion(&adapter->init_done);
  	rc = init_crq_queue(adapter);
  	if (rc) {
  		netdev_err(adapter->netdev,
@@@ -1968,11 -1968,13 +1969,11 @@@ static void __ibmvnic_reset(struct work
  {
  	struct ibmvnic_rwi *rwi;
  	struct ibmvnic_adapter *adapter;
 -	struct net_device *netdev;
  	bool we_lock_rtnl = false;
  	u32 reset_state;
  	int rc = 0;
  
  	adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
 -	netdev = adapter->netdev;
  
  	/* netif_set_real_num_xx_queues needs to take rtnl lock here
  	 * unless wait_for_reset is set, in which case the rtnl lock
@@@ -2277,20 -2279,23 +2278,20 @@@ static const struct net_device_ops ibmv
  static int ibmvnic_get_link_ksettings(struct net_device *netdev,
  				      struct ethtool_link_ksettings *cmd)
  {
 -	u32 supported, advertising;
 -
 -	supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
 -			  SUPPORTED_FIBRE);
 -	advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
 -			    ADVERTISED_FIBRE);
 -	cmd->base.speed = SPEED_1000;
 -	cmd->base.duplex = DUPLEX_FULL;
 +	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 +	int rc;
 +
 +	rc = send_query_phys_parms(adapter);
 +	if (rc) {
 +		adapter->speed = SPEED_UNKNOWN;
 +		adapter->duplex = DUPLEX_UNKNOWN;
 +	}
 +	cmd->base.speed = adapter->speed;
 +	cmd->base.duplex = adapter->duplex;
  	cmd->base.port = PORT_FIBRE;
  	cmd->base.phy_address = 0;
  	cmd->base.autoneg = AUTONEG_ENABLE;
  
 -	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
 -						supported);
 -	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
 -						advertising);
 -
  	return 0;
  }
  
@@@ -4274,73 -4279,6 +4275,73 @@@ out
  	}
  }
  
 +static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
 +{
 +	union ibmvnic_crq crq;
 +	int rc;
 +
 +	memset(&crq, 0, sizeof(crq));
 +	crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
 +	crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
 +	init_completion(&adapter->fw_done);
 +	rc = ibmvnic_send_crq(adapter, &crq);
 +	if (rc)
 +		return rc;
 +	wait_for_completion(&adapter->fw_done);
 +	return adapter->fw_done_rc ? -EIO : 0;
 +}
 +
 +static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
 +				       struct ibmvnic_adapter *adapter)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	int rc;
 +
 +	rc = crq->query_phys_parms_rsp.rc.code;
 +	if (rc) {
 +		netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
 +		return rc;
 +	}
 +	switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
 +	case IBMVNIC_10MBPS:
 +		adapter->speed = SPEED_10;
 +		break;
 +	case IBMVNIC_100MBPS:
 +		adapter->speed = SPEED_100;
 +		break;
 +	case IBMVNIC_1GBPS:
 +		adapter->speed = SPEED_1000;
 +		break;
 +	case IBMVNIC_10GBP:
 +		adapter->speed = SPEED_10000;
 +		break;
 +	case IBMVNIC_25GBPS:
 +		adapter->speed = SPEED_25000;
 +		break;
 +	case IBMVNIC_40GBPS:
 +		adapter->speed = SPEED_40000;
 +		break;
 +	case IBMVNIC_50GBPS:
 +		adapter->speed = SPEED_50000;
 +		break;
 +	case IBMVNIC_100GBPS:
 +		adapter->speed = SPEED_100000;
 +		break;
 +	default:
 +		netdev_warn(netdev, "Unknown speed 0x%08x\n",
 +			    cpu_to_be32(crq->query_phys_parms_rsp.speed));
 +		adapter->speed = SPEED_UNKNOWN;
 +	}
 +	if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
 +		adapter->duplex = DUPLEX_FULL;
 +	else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
 +		adapter->duplex = DUPLEX_HALF;
 +	else
 +		adapter->duplex = DUPLEX_UNKNOWN;
 +
 +	return rc;
 +}
 +
  static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
  			       struct ibmvnic_adapter *adapter)
  {
@@@ -4489,10 -4427,6 +4490,10 @@@
  	case GET_VPD_RSP:
  		handle_vpd_rsp(crq, adapter);
  		break;
 +	case QUERY_PHYS_PARMS_RSP:
 +		adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
 +		complete(&adapter->fw_done);
 +		break;
  	default:
  		netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
  			   gen_crq->cmd);
@@@ -4692,7 -4626,7 +4693,7 @@@ static int ibmvnic_reset_init(struct ib
  	old_num_rx_queues = adapter->req_rx_queues;
  	old_num_tx_queues = adapter->req_tx_queues;
  
- 	init_completion(&adapter->init_done);
+ 	reinit_completion(&adapter->init_done);
  	adapter->init_done_rc = 0;
  	ibmvnic_send_crq_init(adapter);
  	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@@ -4747,7 -4681,6 +4748,6 @@@ static int ibmvnic_init(struct ibmvnic_
  
  	adapter->from_passive_init = false;
  
- 	init_completion(&adapter->init_done);
  	adapter->init_done_rc = 0;
  	ibmvnic_send_crq_init(adapter);
  	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@@ -4826,6 -4759,7 +4826,7 @@@ static int ibmvnic_probe(struct vio_de
  	INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
  	INIT_LIST_HEAD(&adapter->rwi_list);
  	spin_lock_init(&adapter->rwi_lock);
+ 	init_completion(&adapter->init_done);
  	adapter->resetting = false;
  
  	adapter->mac_change_pending = false;
diff --combined drivers/net/ethernet/intel/fm10k/fm10k_main.c
index e2fa112bed9a,ecef949f3baa..2325cee76211
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@@ -41,6 -41,8 +41,8 @@@ static int __init fm10k_init_module(voi
  	/* create driver workqueue */
  	fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
  					  fm10k_driver_name);
+ 	if (!fm10k_workqueue)
+ 		return -ENOMEM;
  
  	fm10k_dbg_init();
  
@@@ -1035,7 -1037,7 +1037,7 @@@ static void fm10k_tx_map(struct fm10k_r
  	fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
  
  	/* notify HW of packet */
 -	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
 +	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
  		writel(i, tx_ring->tail);
  
  		/* we need this if more than one processor can write to our tail
diff --combined drivers/net/ethernet/intel/igb/igb_main.c
index 32d61d5a2706,3269d8e94744..acbb5b4f333d
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@@ -2480,7 -2480,7 +2480,7 @@@ static int igb_set_features(struct net_
  	else
  		igb_reset(adapter);
  
 -	return 0;
 +	return 1;
  }
  
  static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@@ -3452,9 -3452,6 +3452,9 @@@ static int igb_probe(struct pci_dev *pd
  			break;
  		}
  	}
 +
 +	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
 +
  	pm_runtime_put_noidle(&pdev->dev);
  	return 0;
  
@@@ -6029,7 -6026,7 +6029,7 @@@ static int igb_tx_map(struct igb_ring *
  	/* Make sure there is space in the ring for the next send. */
  	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
  
 -	if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
 +	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
  		writel(i, tx_ring->tail);
  
  		/* we need this if more than one processor can write to our tail
@@@ -8743,9 -8740,7 +8743,7 @@@ static int __igb_shutdown(struct pci_de
  	struct e1000_hw *hw = &adapter->hw;
  	u32 ctrl, rctl, status;
  	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
- #ifdef CONFIG_PM
- 	int retval = 0;
- #endif
+ 	bool wake;
  
  	rtnl_lock();
  	netif_device_detach(netdev);
@@@ -8758,14 -8753,6 +8756,6 @@@
  	igb_clear_interrupt_scheme(adapter);
  	rtnl_unlock();
  
- #ifdef CONFIG_PM
- 	if (!runtime) {
- 		retval = pci_save_state(pdev);
- 		if (retval)
- 			return retval;
- 	}
- #endif
- 
  	status = rd32(E1000_STATUS);
  	if (status & E1000_STATUS_LU)
  		wufc &= ~E1000_WUFC_LNKC;
@@@ -8782,10 -8769,6 +8772,6 @@@
  		}
  
  		ctrl = rd32(E1000_CTRL);
- 		/* advertise wake from D3Cold */
- 		#define E1000_CTRL_ADVD3WUC 0x00100000
- 		/* phy power management enable */
- 		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  		ctrl |= E1000_CTRL_ADVD3WUC;
  		wr32(E1000_CTRL, ctrl);
  
@@@ -8799,12 -8782,15 +8785,15 @@@
  		wr32(E1000_WUFC, 0);
  	}
  
- 	*enable_wake = wufc || adapter->en_mng_pt;
- 	if (!*enable_wake)
+ 	wake = wufc || adapter->en_mng_pt;
+ 	if (!wake)
  		igb_power_down_link(adapter);
  	else
  		igb_power_up_link(adapter);
  
+ 	if (enable_wake)
+ 		*enable_wake = wake;
+ 
  	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
  	 * would have already happened in close and is redundant.
  	 */
@@@ -8847,22 -8833,7 +8836,7 @@@ static void igb_deliver_wake_packet(str
  
  static int __maybe_unused igb_suspend(struct device *dev)
  {
- 	int retval;
- 	bool wake;
- 	struct pci_dev *pdev = to_pci_dev(dev);
- 
- 	retval = __igb_shutdown(pdev, &wake, 0);
- 	if (retval)
- 		return retval;
- 
- 	if (wake) {
- 		pci_prepare_to_sleep(pdev);
- 	} else {
- 		pci_wake_from_d3(pdev, false);
- 		pci_set_power_state(pdev, PCI_D3hot);
- 	}
- 
- 	return 0;
+ 	return __igb_shutdown(to_pci_dev(dev), NULL, 0);
  }
  
  static int __maybe_unused igb_resume(struct device *dev)
@@@ -8933,22 -8904,7 +8907,7 @@@ static int __maybe_unused igb_runtime_i
  
  static int __maybe_unused igb_runtime_suspend(struct device *dev)
  {
- 	struct pci_dev *pdev = to_pci_dev(dev);
- 	int retval;
- 	bool wake;
- 
- 	retval = __igb_shutdown(pdev, &wake, 1);
- 	if (retval)
- 		return retval;
- 
- 	if (wake) {
- 		pci_prepare_to_sleep(pdev);
- 	} else {
- 		pci_wake_from_d3(pdev, false);
- 		pci_set_power_state(pdev, PCI_D3hot);
- 	}
- 
- 	return 0;
+ 	return __igb_shutdown(to_pci_dev(dev), NULL, 1);
  }
  
  static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index b0ce68feb0f3,4ab0d030b544..633b117eb13e
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@@ -122,7 -122,9 +122,9 @@@ out
  	return err;
  }
  
- /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+ /* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+  * minimum speed value is 40Gbps
+  */
  static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
  {
  	u32 speed;
@@@ -130,10 -132,9 +132,9 @@@
  	int err;
  
  	err = mlx5e_port_linkspeed(priv->mdev, &speed);
- 	if (err) {
- 		mlx5_core_warn(priv->mdev, "cannot get port speed\n");
- 		return 0;
- 	}
+ 	if (err)
+ 		speed = SPEED_40000;
+ 	speed = max_t(u32, speed, SPEED_40000);
  
  	xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
  
@@@ -142,7 -143,7 +143,7 @@@
  }
  
  static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
- 				 u32 xoff, unsigned int mtu)
+ 				 u32 xoff, unsigned int max_mtu)
  {
  	int i;
  
@@@ -154,36 -155,37 +155,37 @@@
  		}
  
  		if (port_buffer->buffer[i].size <
- 		    (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+ 		    (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
  			return -ENOMEM;
  
  		port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
- 		port_buffer->buffer[i].xon  = port_buffer->buffer[i].xoff - mtu;
+ 		port_buffer->buffer[i].xon  =
+ 			port_buffer->buffer[i].xoff - max_mtu;
  	}
  
  	return 0;
  }
  
  /**
 - * update_buffer_lossy()
 - *   max_mtu: netdev's max_mtu
 - *   pfc_en: <input> current pfc configuration
 - *   buffer: <input> current prio to buffer mapping
 - *   xoff:   <input> xoff value
 - *   port_buffer: <output> port receive buffer configuration
 - *   change: <output>
 + *	update_buffer_lossy	- Update buffer configuration based on pfc
-  *	@mtu: device's MTU
++ *	@max_mtu: netdev's max_mtu
 + *	@pfc_en: <input> current pfc configuration
 + *	@buffer: <input> current prio to buffer mapping
 + *	@xoff:   <input> xoff value
 + *	@port_buffer: <output> port receive buffer configuration
 + *	@change: <output>
   *
 - *   Update buffer configuration based on pfc configuraiton and priority
 - *   to buffer mapping.
 - *   Buffer's lossy bit is changed to:
 - *     lossless if there is at least one PFC enabled priority mapped to this buffer
 - *     lossy if all priorities mapped to this buffer are PFC disabled
 + *	Update buffer configuration based on pfc configuraiton and
 + *	priority to buffer mapping.
 + *	Buffer's lossy bit is changed to:
 + *		lossless if there is at least one PFC enabled priority
 + *		mapped to this buffer lossy if all priorities mapped to
 + *		this buffer are PFC disabled
   *
 - *   Return:
 - *     Return 0 if no error.
 - *     Set change to true if buffer configuration is modified.
 + *	@return: 0 if no error,
 + *	sets change to true if buffer configuration was modified.
   */
- static int update_buffer_lossy(unsigned int mtu,
+ static int update_buffer_lossy(unsigned int max_mtu,
  			       u8 pfc_en, u8 *buffer, u32 xoff,
  			       struct mlx5e_port_buffer *port_buffer,
  			       bool *change)
@@@ -220,7 -222,7 +222,7 @@@
  	}
  
  	if (changed) {
- 		err = update_xoff_threshold(port_buffer, xoff, mtu);
+ 		err = update_xoff_threshold(port_buffer, xoff, max_mtu);
  		if (err)
  			return err;
  
@@@ -230,6 -232,7 +232,7 @@@
  	return 0;
  }
  
+ #define MINIMUM_MAX_MTU 9216
  int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
  				    u32 change, unsigned int mtu,
  				    struct ieee_pfc *pfc,
@@@ -241,12 -244,14 +244,14 @@@
  	bool update_prio2buffer = false;
  	u8 buffer[MLX5E_MAX_PRIORITY];
  	bool update_buffer = false;
+ 	unsigned int max_mtu;
  	u32 total_used = 0;
  	u8 curr_pfc_en;
  	int err;
  	int i;
  
  	mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+ 	max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
  
  	err = mlx5e_port_query_buffer(priv, &port_buffer);
  	if (err)
@@@ -254,7 -259,7 +259,7 @@@
  
  	if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
  		update_buffer = true;
- 		err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
  		if (err)
  			return err;
  	}
@@@ -264,7 -269,7 +269,7 @@@
  		if (err)
  			return err;
  
- 		err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+ 		err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
  					  &port_buffer, &update_buffer);
  		if (err)
  			return err;
@@@ -276,8 -281,8 +281,8 @@@
  		if (err)
  			return err;
  
- 		err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
- 					  &port_buffer, &update_buffer);
+ 		err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+ 					  xoff, &port_buffer, &update_buffer);
  		if (err)
  			return err;
  	}
@@@ -301,7 -306,7 +306,7 @@@
  			return -EINVAL;
  
  		update_buffer = true;
- 		err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
  		if (err)
  			return err;
  	}
@@@ -309,7 -314,7 +314,7 @@@
  	/* Need to update buffer configuration if xoff value is changed */
  	if (!update_buffer && xoff != priv->dcbx.xoff) {
  		update_buffer = true;
- 		err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ 		err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
  		if (err)
  			return err;
  	}
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 2fd425a7b156,d75dc44eb2ff..ffc4a36551c8
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@@ -44,7 -44,6 +44,7 @@@
  #include <net/tc_act/tc_pedit.h>
  #include <net/tc_act/tc_csum.h>
  #include <net/arp.h>
 +#include <net/ipv6_stubs.h>
  #include "en.h"
  #include "en_rep.h"
  #include "en_tc.h"
@@@ -1828,7 -1827,6 +1828,7 @@@ static int parse_cls_flower(struct mlx5
  
  struct pedit_headers {
  	struct ethhdr  eth;
 +	struct vlan_hdr vlan;
  	struct iphdr   ip4;
  	struct ipv6hdr ip6;
  	struct tcphdr  tcp;
@@@ -1886,7 -1884,6 +1886,7 @@@ static struct mlx5_fields fields[] = 
  	OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
  	OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0),
  	OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0),
 +	OFFLOAD(FIRST_VID,  2, vlan.h_vlan_TCI, 0),
  
  	OFFLOAD(IP_TTL, 1, ip4.ttl,   0),
  	OFFLOAD(SIPV4,  4, ip4.saddr, 0),
@@@ -2161,6 -2158,52 +2161,52 @@@ static bool csum_offload_supported(stru
  	return true;
  }
  
+ struct ip_ttl_word {
+ 	__u8	ttl;
+ 	__u8	protocol;
+ 	__sum16	check;
+ };
+ 
+ struct ipv6_hoplimit_word {
+ 	__be16	payload_len;
+ 	__u8	nexthdr;
+ 	__u8	hop_limit;
+ };
+ 
+ static bool is_action_keys_supported(const struct flow_action_entry *act)
+ {
+ 	u32 mask, offset;
+ 	u8 htype;
+ 
+ 	htype = act->mangle.htype;
+ 	offset = act->mangle.offset;
+ 	mask = ~act->mangle.mask;
+ 	/* For IPv4 & IPv6 header check 4 byte word,
+ 	 * to determine that modified fields
+ 	 * are NOT ttl & hop_limit only.
+ 	 */
+ 	if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+ 		struct ip_ttl_word *ttl_word =
+ 			(struct ip_ttl_word *)&mask;
+ 
+ 		if (offset != offsetof(struct iphdr, ttl) ||
+ 		    ttl_word->protocol ||
+ 		    ttl_word->check) {
+ 			return true;
+ 		}
+ 	} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ 		struct ipv6_hoplimit_word *hoplimit_word =
+ 			(struct ipv6_hoplimit_word *)&mask;
+ 
+ 		if (offset != offsetof(struct ipv6hdr, payload_len) ||
+ 		    hoplimit_word->payload_len ||
+ 		    hoplimit_word->nexthdr) {
+ 			return true;
+ 		}
+ 	}
+ 	return false;
+ }
+ 
  static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
  					  struct flow_action *flow_action,
  					  u32 actions,
@@@ -2168,9 -2211,9 +2214,9 @@@
  {
  	const struct flow_action_entry *act;
  	bool modify_ip_header;
- 	u8 htype, ip_proto;
  	void *headers_v;
  	u16 ethertype;
+ 	u8 ip_proto;
  	int i;
  
  	if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@@ -2190,9 -2233,7 +2236,7 @@@
  		    act->id != FLOW_ACTION_ADD)
  			continue;
  
- 		htype = act->mangle.htype;
- 		if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
- 		    htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ 		if (is_action_keys_supported(act)) {
  			modify_ip_header = true;
  			break;
  		}
@@@ -2250,35 -2291,6 +2294,35 @@@ static bool same_hw_devs(struct mlx5e_p
  	return (fsystem_guid == psystem_guid);
  }
  
 +static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
 +				   const struct flow_action_entry *act,
 +				   struct mlx5e_tc_flow_parse_attr *parse_attr,
 +				   struct pedit_headers_action *hdrs,
 +				   u32 *action, struct netlink_ext_ack *extack)
 +{
 +	u16 mask16 = VLAN_VID_MASK;
 +	u16 val16 = act->vlan.vid & VLAN_VID_MASK;
 +	const struct flow_action_entry pedit_act = {
 +		.id = FLOW_ACTION_MANGLE,
 +		.mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
 +		.mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
 +		.mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
 +		.mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
 +	};
 +	int err;
 +
 +	if (act->vlan.prio) {
 +		NL_SET_ERR_MSG_MOD(extack, "Setting VLAN prio is not supported");
 +		return -EOPNOTSUPP;
 +	}
 +
 +	err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
 +				    hdrs, NULL);
 +	*action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
 +
 +	return err;
 +}
 +
  static int parse_tc_nic_actions(struct mlx5e_priv *priv,
  				struct flow_action *flow_action,
  				struct mlx5e_tc_flow_parse_attr *parse_attr,
@@@ -2314,15 -2326,6 +2358,15 @@@
  			action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
  				  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
  			break;
 +		case FLOW_ACTION_VLAN_MANGLE:
 +			err = add_vlan_rewrite_action(priv,
 +						      MLX5_FLOW_NAMESPACE_KERNEL,
 +						      act, parse_attr, hdrs,
 +						      &action, extack);
 +			if (err)
 +				return err;
 +
 +			break;
  		case FLOW_ACTION_CSUM:
  			if (csum_offload_supported(priv, action,
  						   act->csum_flags,
@@@ -2381,15 -2384,22 +2425,22 @@@
  	return 0;
  }
  
- static inline int cmp_encap_info(struct ip_tunnel_key *a,
- 				 struct ip_tunnel_key *b)
+ struct encap_key {
+ 	struct ip_tunnel_key *ip_tun_key;
+ 	int tunnel_type;
+ };
+ 
+ static inline int cmp_encap_info(struct encap_key *a,
+ 				 struct encap_key *b)
  {
- 	return memcmp(a, b, sizeof(*a));
+ 	return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+ 	       a->tunnel_type != b->tunnel_type;
  }
  
- static inline int hash_encap_info(struct ip_tunnel_key *key)
+ static inline int hash_encap_info(struct encap_key *key)
  {
- 	return jhash(key, sizeof(*key), 0);
+ 	return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+ 		     key->tunnel_type);
  }
  
  
@@@ -2420,7 -2430,7 +2471,7 @@@ static int mlx5e_attach_encap(struct ml
  	struct mlx5_esw_flow_attr *attr = flow->esw_attr;
  	struct mlx5e_tc_flow_parse_attr *parse_attr;
  	struct ip_tunnel_info *tun_info;
- 	struct ip_tunnel_key *key;
+ 	struct encap_key key, e_key;
  	struct mlx5e_encap_entry *e;
  	unsigned short family;
  	uintptr_t hash_key;
@@@ -2430,13 -2440,16 +2481,16 @@@
  	parse_attr = attr->parse_attr;
  	tun_info = &parse_attr->tun_info[out_index];
  	family = ip_tunnel_info_af(tun_info);
- 	key = &tun_info->key;
+ 	key.ip_tun_key = &tun_info->key;
+ 	key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
  
- 	hash_key = hash_encap_info(key);
+ 	hash_key = hash_encap_info(&key);
  
  	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
  				   encap_hlist, hash_key) {
- 		if (!cmp_encap_info(&e->tun_info.key, key)) {
+ 		e_key.ip_tun_key = &e->tun_info.key;
+ 		e_key.tunnel_type = e->tunnel_type;
+ 		if (!cmp_encap_info(&e_key, &key)) {
  			found = true;
  			break;
  		}
@@@ -2531,7 -2544,8 +2585,7 @@@ static int parse_tc_vlan_action(struct 
  		}
  		break;
  	default:
 -		/* action is FLOW_ACT_VLAN_MANGLE */
 -		return -EOPNOTSUPP;
 +		return -EINVAL;
  	}
  
  	attr->total_vlan = vlan_idx + 1;
@@@ -2665,27 -2679,7 +2719,27 @@@ static int parse_tc_fdb_actions(struct 
  			break;
  		case FLOW_ACTION_VLAN_PUSH:
  		case FLOW_ACTION_VLAN_POP:
 -			err = parse_tc_vlan_action(priv, act, attr, &action);
 +			if (act->id == FLOW_ACTION_VLAN_PUSH &&
 +			    (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
 +				/* Replace vlan pop+push with vlan modify */
 +				action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
 +				err = add_vlan_rewrite_action(priv,
 +							      MLX5_FLOW_NAMESPACE_FDB,
 +							      act, parse_attr, hdrs,
 +							      &action, extack);
 +			} else {
 +				err = parse_tc_vlan_action(priv, act, attr, &action);
 +			}
 +			if (err)
 +				return err;
 +
 +			attr->split_count = attr->out_count;
 +			break;
 +		case FLOW_ACTION_VLAN_MANGLE:
 +			err = add_vlan_rewrite_action(priv,
 +						      MLX5_FLOW_NAMESPACE_FDB,
 +						      act, parse_attr, hdrs,
 +						      &action, extack);
  			if (err)
  				return err;
  
@@@ -2717,7 -2711,7 +2771,7 @@@
  
  	if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
  	    hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ 		err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
  					    parse_attr, hdrs, extack);
  		if (err)
  			return err;
diff --combined drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 6c72f33f6d09,9b2d78ee22b8..fe770cd2151c
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@@ -1287,13 -1287,13 +1287,13 @@@ void esw_offloads_cleanup_reps(struct m
  
  int esw_offloads_init_reps(struct mlx5_eswitch *esw)
  {
 -	int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
 +	int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
  	struct mlx5_core_dev *dev = esw->dev;
  	struct mlx5_eswitch_rep *rep;
  	u8 hw_id[ETH_ALEN], rep_type;
  	int vport;
  
 -	esw->offloads.vport_reps = kcalloc(total_vfs,
 +	esw->offloads.vport_reps = kcalloc(total_vports,
  					   sizeof(struct mlx5_eswitch_rep),
  					   GFP_KERNEL);
  	if (!esw->offloads.vport_reps)
@@@ -1523,6 -1523,8 +1523,6 @@@ static int mlx5_esw_offloads_pair(struc
  	return 0;
  }
  
 -void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
 -
  static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
  {
  	mlx5e_tc_clean_fdb_peer_flows(esw);
@@@ -1609,6 -1611,7 +1609,7 @@@ static int esw_offloads_steering_init(s
  {
  	int err;
  
+ 	memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
  	mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
  
  	err = esw_create_offloads_fdb_tables(esw, nvports);
diff --combined drivers/net/ethernet/netronome/nfp/flower/action.c
index 6e2a6caec3fb,e336f6ee94f5..c56e31d9f8a4
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@@ -48,8 -48,7 +48,7 @@@ nfp_fl_push_vlan(struct nfp_fl_push_vla
  
  	tmp_push_vlan_tci =
  		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
- 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
- 		NFP_FL_PUSH_VLAN_CFI;
+ 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
  	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
  }
  
@@@ -161,9 -160,9 +160,9 @@@ nfp_fl_get_tun_from_act_l4_port(struct 
  	struct nfp_flower_priv *priv = app->priv;
  
  	switch (tun->key.tp_dst) {
 -	case htons(NFP_FL_VXLAN_PORT):
 +	case htons(IANA_VXLAN_UDP_PORT):
  		return NFP_FL_TUNNEL_VXLAN;
 -	case htons(NFP_FL_GENEVE_PORT):
 +	case htons(GENEVE_UDP_PORT):
  		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
  			return NFP_FL_TUNNEL_GENEVE;
  		/* FALLTHROUGH */
@@@ -583,23 -582,60 +582,23 @@@ static u32 nfp_fl_csum_l4_to_flag(u8 ip
  	}
  }
  
 -static int
 -nfp_fl_pedit(const struct flow_action_entry *act,
 -	     struct tc_cls_flower_offload *flow,
 -	     char *nfp_action, int *a_len, u32 *csum_updated)
 -{
 -	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
 +struct nfp_flower_pedit_acts {
  	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
  	struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
  	struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
  	struct nfp_fl_set_ip4_addrs set_ip_addr;
 -	enum flow_action_mangle_base htype;
  	struct nfp_fl_set_tport set_tport;
  	struct nfp_fl_set_eth set_eth;
 +};
 +
 +static int
 +nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
 +		     int *a_len, struct nfp_flower_pedit_acts *set_act,
 +		     u32 *csum_updated)
 +{
 +	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
  	size_t act_size = 0;
  	u8 ip_proto = 0;
 -	u32 offset;
 -	int err;
 -
 -	memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
 -	memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
 -	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
 -	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
 -	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
 -	memset(&set_tport, 0, sizeof(set_tport));
 -	memset(&set_eth, 0, sizeof(set_eth));
 -
 -	htype = act->mangle.htype;
 -	offset = act->mangle.offset;
 -
 -	switch (htype) {
 -	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
 -		err = nfp_fl_set_eth(act, offset, &set_eth);
 -		break;
 -	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
 -		err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
 -				     &set_ip_ttl_tos);
 -		break;
 -	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
 -		err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
 -				     &set_ip6_src, &set_ip6_tc_hl_fl);
 -		break;
 -	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
 -		err = nfp_fl_set_tport(act, offset, &set_tport,
 -				       NFP_FL_ACTION_OPCODE_SET_TCP);
 -		break;
 -	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
 -		err = nfp_fl_set_tport(act, offset, &set_tport,
 -				       NFP_FL_ACTION_OPCODE_SET_UDP);
 -		break;
 -	default:
 -		return -EOPNOTSUPP;
 -	}
 -	if (err)
 -		return err;
  
  	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
  		struct flow_match_basic match;
@@@ -608,82 -644,77 +607,82 @@@
  		ip_proto = match.key->ip_proto;
  	}
  
 -	if (set_eth.head.len_lw) {
 -		act_size = sizeof(set_eth);
 -		memcpy(nfp_action, &set_eth, act_size);
 +	if (set_act->set_eth.head.len_lw) {
 +		act_size = sizeof(set_act->set_eth);
 +		memcpy(nfp_action, &set_act->set_eth, act_size);
  		*a_len += act_size;
  	}
 -	if (set_ip_ttl_tos.head.len_lw) {
 +
 +	if (set_act->set_ip_ttl_tos.head.len_lw) {
  		nfp_action += act_size;
 -		act_size = sizeof(set_ip_ttl_tos);
 -		memcpy(nfp_action, &set_ip_ttl_tos, act_size);
 +		act_size = sizeof(set_act->set_ip_ttl_tos);
 +		memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
  		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
  				nfp_fl_csum_l4_to_flag(ip_proto);
  	}
 -	if (set_ip_addr.head.len_lw) {
 +
 +	if (set_act->set_ip_addr.head.len_lw) {
  		nfp_action += act_size;
 -		act_size = sizeof(set_ip_addr);
 -		memcpy(nfp_action, &set_ip_addr, act_size);
 +		act_size = sizeof(set_act->set_ip_addr);
 +		memcpy(nfp_action, &set_act->set_ip_addr, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
  		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
  				nfp_fl_csum_l4_to_flag(ip_proto);
  	}
 -	if (set_ip6_tc_hl_fl.head.len_lw) {
 +
 +	if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
  		nfp_action += act_size;
 -		act_size = sizeof(set_ip6_tc_hl_fl);
 -		memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
 +		act_size = sizeof(set_act->set_ip6_tc_hl_fl);
 +		memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
  		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
  	}
 -	if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
 +
 +	if (set_act->set_ip6_dst.head.len_lw &&
 +	    set_act->set_ip6_src.head.len_lw) {
  		/* TC compiles set src and dst IPv6 address as a single action,
  		 * the hardware requires this to be 2 separate actions.
  		 */
  		nfp_action += act_size;
 -		act_size = sizeof(set_ip6_src);
 -		memcpy(nfp_action, &set_ip6_src, act_size);
 +		act_size = sizeof(set_act->set_ip6_src);
 +		memcpy(nfp_action, &set_act->set_ip6_src, act_size);
  		*a_len += act_size;
  
 -		act_size = sizeof(set_ip6_dst);
 -		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
 -		       act_size);
 +		act_size = sizeof(set_act->set_ip6_dst);
 +		memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
 +		       &set_act->set_ip6_dst, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
  		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 -	} else if (set_ip6_dst.head.len_lw) {
 +	} else if (set_act->set_ip6_dst.head.len_lw) {
  		nfp_action += act_size;
 -		act_size = sizeof(set_ip6_dst);
 -		memcpy(nfp_action, &set_ip6_dst, act_size);
 +		act_size = sizeof(set_act->set_ip6_dst);
 +		memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
  		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
 -	} else if (set_ip6_src.head.len_lw) {
 +	} else if (set_act->set_ip6_src.head.len_lw) {
  		nfp_action += act_size;
 -		act_size = sizeof(set_ip6_src);
 -		memcpy(nfp_action, &set_ip6_src, act_size);
 +		act_size = sizeof(set_act->set_ip6_src);
 +		memcpy(nfp_action, &set_act->set_ip6_src, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
  		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
  	}
 -	if (set_tport.head.len_lw) {
 +	if (set_act->set_tport.head.len_lw) {
  		nfp_action += act_size;
 -		act_size = sizeof(set_tport);
 -		memcpy(nfp_action, &set_tport, act_size);
 +		act_size = sizeof(set_act->set_tport);
 +		memcpy(nfp_action, &set_act->set_tport, act_size);
  		*a_len += act_size;
  
  		/* Hardware will automatically fix TCP/UDP checksum. */
@@@ -694,40 -725,7 +693,40 @@@
  }
  
  static int
 -nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
 +nfp_fl_pedit(const struct flow_action_entry *act,
 +	     struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
 +	     u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
 +{
 +	enum flow_action_mangle_base htype;
 +	u32 offset;
 +
 +	htype = act->mangle.htype;
 +	offset = act->mangle.offset;
 +
 +	switch (htype) {
 +	case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
 +		return nfp_fl_set_eth(act, offset, &set_act->set_eth);
 +	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
 +		return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
 +				      &set_act->set_ip_ttl_tos);
 +	case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
 +		return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
 +				      &set_act->set_ip6_src,
 +				      &set_act->set_ip6_tc_hl_fl);
 +	case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
 +		return nfp_fl_set_tport(act, offset, &set_act->set_tport,
 +					NFP_FL_ACTION_OPCODE_SET_TCP);
 +	case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
 +		return nfp_fl_set_tport(act, offset, &set_act->set_tport,
 +					NFP_FL_ACTION_OPCODE_SET_UDP);
 +	default:
 +		return -EOPNOTSUPP;
 +	}
 +}
 +
 +static int
 +nfp_flower_output_action(struct nfp_app *app,
 +			 const struct flow_action_entry *act,
  			 struct nfp_fl_payload *nfp_fl, int *a_len,
  			 struct net_device *netdev, bool last,
  			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@@ -777,8 -775,7 +776,8 @@@ nfp_flower_loop_action(struct nfp_app *
  		       struct nfp_fl_payload *nfp_fl, int *a_len,
  		       struct net_device *netdev,
  		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
 -		       int *out_cnt, u32 *csum_updated)
 +		       int *out_cnt, u32 *csum_updated,
 +		       struct nfp_flower_pedit_acts *set_act)
  {
  	struct nfp_fl_set_ipv4_udp_tun *set_tun;
  	struct nfp_fl_pre_tunnel *pre_tun;
@@@ -863,7 -860,7 +862,7 @@@
  		return 0;
  	case FLOW_ACTION_MANGLE:
  		if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
 -				 a_len, csum_updated))
 +				 a_len, csum_updated, set_act))
  			return -EOPNOTSUPP;
  		break;
  	case FLOW_ACTION_CSUM:
@@@ -883,49 -880,12 +882,49 @@@
  	return 0;
  }
  
 +static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
 +				      int current_act_idx)
 +{
 +	struct flow_action_entry current_act;
 +	struct flow_action_entry prev_act;
 +
 +	current_act = flow_act->entries[current_act_idx];
 +	if (current_act.id != FLOW_ACTION_MANGLE)
 +		return false;
 +
 +	if (current_act_idx == 0)
 +		return true;
 +
 +	prev_act = flow_act->entries[current_act_idx - 1];
 +
 +	return prev_act.id != FLOW_ACTION_MANGLE;
 +}
 +
 +static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
 +				    int current_act_idx)
 +{
 +	struct flow_action_entry current_act;
 +	struct flow_action_entry next_act;
 +
 +	current_act = flow_act->entries[current_act_idx];
 +	if (current_act.id != FLOW_ACTION_MANGLE)
 +		return false;
 +
 +	if (current_act_idx == flow_act->num_entries)
 +		return true;
 +
 +	next_act = flow_act->entries[current_act_idx + 1];
 +
 +	return next_act.id != FLOW_ACTION_MANGLE;
 +}
 +
  int nfp_flower_compile_action(struct nfp_app *app,
  			      struct tc_cls_flower_offload *flow,
  			      struct net_device *netdev,
  			      struct nfp_fl_payload *nfp_flow)
  {
  	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
 +	struct nfp_flower_pedit_acts set_act;
  	enum nfp_flower_tun_type tun_type;
  	struct flow_action_entry *act;
  	u32 csum_updated = 0;
@@@ -939,18 -899,12 +938,18 @@@
  	out_cnt = 0;
  
  	flow_action_for_each(i, act, &flow->rule->action) {
 +		if (nfp_fl_check_mangle_start(&flow->rule->action, i))
 +			memset(&set_act, 0, sizeof(set_act));
  		err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
  					     netdev, &tun_type, &tun_out_cnt,
 -					     &out_cnt, &csum_updated);
 +					     &out_cnt, &csum_updated, &set_act);
  		if (err)
  			return err;
  		act_cnt++;
 +		if (nfp_fl_check_mangle_end(&flow->rule->action, i))
 +			nfp_fl_commit_mangle(flow,
 +					     &nfp_flow->action_data[act_len],
 +					     &act_len, &set_act, &csum_updated);
  	}
  
  	/* We optimise when the action list is small, this can unfortunately
diff --combined drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index c3ad083d36c6,94d228c04496..08e9bfa95f9b
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@@ -195,7 -195,7 +195,7 @@@ static netdev_tx_t nfp_repr_xmit(struc
  	ret = dev_queue_xmit(skb);
  	nfp_repr_inc_tx_stats(netdev, len, ret);
  
- 	return ret;
+ 	return NETDEV_TX_OK;
  }
  
  static int nfp_repr_stop(struct net_device *netdev)
@@@ -272,7 -272,8 +272,7 @@@ const struct net_device_ops nfp_repr_ne
  	.ndo_fix_features	= nfp_repr_fix_features,
  	.ndo_set_features	= nfp_port_set_features,
  	.ndo_set_mac_address    = eth_mac_addr,
 -	.ndo_get_port_parent_id	= nfp_port_get_port_parent_id,
 -	.ndo_get_devlink	= nfp_devlink_get_devlink,
 +	.ndo_get_devlink_port	= nfp_devlink_get_devlink_port,
  };
  
  void
@@@ -382,7 -383,7 +382,7 @@@ int nfp_repr_init(struct nfp_app *app, 
  	netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
  	netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
  
- 	netdev->priv_flags |= IFF_NO_QUEUE;
+ 	netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
  	netdev->features |= NETIF_F_LLTX;
  
  	if (nfp_app_has_tc(app)) {
diff --combined drivers/net/ethernet/realtek/r8169.c
index a8ca26c2ae0c,19efa88f3f02..88eb9e05d2a1
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@@ -5460,7 -5460,7 +5460,7 @@@ static void rtl_hw_start_8168(struct rt
  	tp->cp_cmd |= PktCntrDisable | INTT_1;
  	RTL_W16(tp, CPlusCmd, tp->cp_cmd);
  
- 	RTL_W16(tp, IntrMitigate, 0x5151);
+ 	RTL_W16(tp, IntrMitigate, 0x5100);
  
  	/* Work around for RxFIFO overflow. */
  	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@@ -6267,7 -6267,7 +6267,7 @@@ static netdev_tx_t rtl8169_start_xmit(s
  		 */
  		smp_mb();
  		if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
 -			netif_wake_queue(dev);
 +			netif_start_queue(dev);
  	}
  
  	return NETDEV_TX_OK;
diff --combined drivers/net/hyperv/netvsc.c
index 9a022539d305,e0dce373cdd9..fdbeb7070d42
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@@ -110,6 -110,7 +110,7 @@@ static struct netvsc_device *alloc_net_
  
  	init_waitqueue_head(&net_device->wait_drain);
  	net_device->destroy = false;
+ 	net_device->tx_disable = false;
  
  	net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
  	net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@@ -719,7 -720,7 +720,7 @@@ static void netvsc_send_tx_complete(str
  	} else {
  		struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
  
- 		if (netif_tx_queue_stopped(txq) &&
+ 		if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
  		    (hv_get_avail_to_write_percent(&channel->outbound) >
  		     RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
  			netif_tx_wake_queue(txq);
@@@ -874,7 -875,8 +875,8 @@@ static inline int netvsc_send_pkt
  	} else if (ret == -EAGAIN) {
  		netif_tx_stop_queue(txq);
  		ndev_ctx->eth_stats.stop_queue++;
- 		if (atomic_read(&nvchan->queue_sends) < 1) {
+ 		if (atomic_read(&nvchan->queue_sends) < 1 &&
+ 		    !net_device->tx_disable) {
  			netif_tx_wake_queue(txq);
  			ndev_ctx->eth_stats.wake_queue++;
  			ret = -ENOSPC;
@@@ -964,7 -966,7 +966,7 @@@ int netvsc_send(struct net_device *ndev
  	/* Keep aggregating only if stack says more data is coming
  	 * and not doing mixed modes send and not flow blocked
  	 */
 -	xmit_more = skb->xmit_more &&
 +	xmit_more = netdev_xmit_more() &&
  		!packet->cp_partial &&
  		!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
  
diff --combined drivers/net/hyperv/netvsc_drv.c
index 1a08679f90ce,b20fb0fb595b..06393b215102
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@@ -109,6 -109,15 +109,15 @@@ static void netvsc_set_rx_mode(struct n
  	rcu_read_unlock();
  }
  
+ static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+ 			     struct net_device *ndev)
+ {
+ 	nvscdev->tx_disable = false;
+ 	virt_wmb(); /* ensure queue wake up mechanism is on */
+ 
+ 	netif_tx_wake_all_queues(ndev);
+ }
+ 
  static int netvsc_open(struct net_device *net)
  {
  	struct net_device_context *ndev_ctx = netdev_priv(net);
@@@ -129,7 -138,7 +138,7 @@@
  	rdev = nvdev->extension;
  	if (!rdev->link_state) {
  		netif_carrier_on(net);
- 		netif_tx_wake_all_queues(net);
+ 		netvsc_tx_enable(nvdev, net);
  	}
  
  	if (vf_netdev) {
@@@ -184,6 -193,17 +193,17 @@@ static int netvsc_wait_until_empty(stru
  	}
  }
  
+ static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+ 			      struct net_device *ndev)
+ {
+ 	if (nvscdev) {
+ 		nvscdev->tx_disable = true;
+ 		virt_wmb(); /* ensure txq will not wake up after stop */
+ 	}
+ 
+ 	netif_tx_disable(ndev);
+ }
+ 
  static int netvsc_close(struct net_device *net)
  {
  	struct net_device_context *net_device_ctx = netdev_priv(net);
@@@ -192,7 -212,7 +212,7 @@@
  	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
  	int ret;
  
- 	netif_tx_disable(net);
+ 	netvsc_tx_disable(nvdev, net);
  
  	/* No need to close rndis filter if it is removed already */
  	if (!nvdev)
@@@ -308,7 -328,7 +328,7 @@@ static inline int netvsc_get_tx_queue(s
   * If a valid queue has already been assigned, then use that.
   * Otherwise compute tx queue based on hash and the send table.
   *
 - * This is basically similar to default (__netdev_pick_tx) with the added step
 + * This is basically similar to default (netdev_pick_tx) with the added step
   * of using the host send_table when no other queue has been assigned.
   *
   * TODO support XPS - but get_xps_queue not exported
@@@ -331,7 -351,8 +351,7 @@@ static u16 netvsc_pick_tx(struct net_de
  }
  
  static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
 -			       struct net_device *sb_dev,
 -			       select_queue_fallback_t fallback)
 +			       struct net_device *sb_dev)
  {
  	struct net_device_context *ndc = netdev_priv(ndev);
  	struct net_device *vf_netdev;
@@@ -343,9 -364,10 +363,9 @@@
  		const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
  
  		if (vf_ops->ndo_select_queue)
 -			txq = vf_ops->ndo_select_queue(vf_netdev, skb,
 -						       sb_dev, fallback);
 +			txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
  		else
 -			txq = fallback(vf_netdev, skb, NULL);
 +			txq = netdev_pick_tx(vf_netdev, skb, NULL);
  
  		/* Record the queue selected by VF so that it can be
  		 * used for common case where VF has more queues than
@@@ -918,7 -940,7 +938,7 @@@ static int netvsc_detach(struct net_dev
  
  	/* If device was up (receiving) then shutdown */
  	if (netif_running(ndev)) {
- 		netif_tx_disable(ndev);
+ 		netvsc_tx_disable(nvdev, ndev);
  
  		ret = rndis_filter_close(nvdev);
  		if (ret) {
@@@ -1906,7 -1928,7 +1926,7 @@@ static void netvsc_link_change(struct w
  		if (rdev->link_state) {
  			rdev->link_state = false;
  			netif_carrier_on(net);
- 			netif_tx_wake_all_queues(net);
+ 			netvsc_tx_enable(net_device, net);
  		} else {
  			notify = true;
  		}
@@@ -1916,7 -1938,7 +1936,7 @@@
  		if (!rdev->link_state) {
  			rdev->link_state = true;
  			netif_carrier_off(net);
- 			netif_tx_stop_all_queues(net);
+ 			netvsc_tx_disable(net_device, net);
  		}
  		kfree(event);
  		break;
@@@ -1925,7 -1947,7 +1945,7 @@@
  		if (!rdev->link_state) {
  			rdev->link_state = true;
  			netif_carrier_off(net);
- 			netif_tx_stop_all_queues(net);
+ 			netvsc_tx_disable(net_device, net);
  			event->event = RNDIS_STATUS_MEDIA_CONNECT;
  			spin_lock_irqsave(&ndev_ctx->lock, flags);
  			list_add(&event->list, &ndev_ctx->reconfig_events);
diff --combined include/net/ip.h
index aa09ae5f01a5,583526aad1d0..2d3cce7c3e8a
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@@ -38,10 -38,6 +38,10 @@@
  #define IPV4_MAX_PMTU		65535U		/* RFC 2675, Section 5.1 */
  #define IPV4_MIN_MTU		68			/* RFC 791 */
  
 +extern unsigned int sysctl_fib_sync_mem;
 +extern unsigned int sysctl_fib_sync_mem_min;
 +extern unsigned int sysctl_fib_sync_mem_max;
 +
  struct sock;
  
  struct inet_skb_parm {
@@@ -681,7 -677,7 +681,7 @@@ int ip_options_get_from_user(struct ne
  			     unsigned char __user *data, int optlen);
  void ip_options_undo(struct ip_options *opt);
  void ip_forward_options(struct sk_buff *skb);
- int ip_options_rcv_srr(struct sk_buff *skb);
+ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
  
  /*
   *	Functions provided by ip_sockglue.c
diff --combined include/net/sch_generic.h
index 2269383c1399,a2b38b3deeca..0aea0e262452
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@@ -113,9 -113,6 +113,9 @@@ struct Qdisc 
  
  	spinlock_t		busylock ____cacheline_aligned_in_smp;
  	spinlock_t		seqlock;
 +
 +	/* for NOLOCK qdisc, true if there are no enqueued skbs */
 +	bool			empty;
  	struct rcu_head		rcu;
  };
  
@@@ -146,19 -143,11 +146,19 @@@ static inline bool qdisc_is_running(str
  	return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
  }
  
 +static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
 +{
 +	if (qdisc->flags & TCQ_F_NOLOCK)
 +		return qdisc->empty;
 +	return !qdisc->q.qlen;
 +}
 +
  static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  {
  	if (qdisc->flags & TCQ_F_NOLOCK) {
  		if (!spin_trylock(&qdisc->seqlock))
  			return false;
 +		qdisc->empty = false;
  	} else if (qdisc_is_running(qdisc)) {
  		return false;
  	}
@@@ -934,6 -923,41 +934,41 @@@ static inline void qdisc_qstats_overlim
  	sch->qstats.overlimits++;
  }
  
+ static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+ {
+ 	__u32 qlen = qdisc_qlen_sum(sch);
+ 
+ 	return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+ }
+ 
+ static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch,  __u32 *qlen,
+ 					     __u32 *backlog)
+ {
+ 	struct gnet_stats_queue qstats = { 0 };
+ 	__u32 len = qdisc_qlen_sum(sch);
+ 
+ 	__gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+ 	*qlen = qstats.qlen;
+ 	*backlog = qstats.backlog;
+ }
+ 
+ static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+ {
+ 	__u32 qlen, backlog;
+ 
+ 	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ 	qdisc_tree_reduce_backlog(sch, qlen, backlog);
+ }
+ 
+ static inline void qdisc_purge_queue(struct Qdisc *sch)
+ {
+ 	__u32 qlen, backlog;
+ 
+ 	qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ 	qdisc_reset(sch);
+ 	qdisc_tree_reduce_backlog(sch, qlen, backlog);
+ }
+ 
  static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
  {
  	qh->head = NULL;
@@@ -1117,13 -1141,8 +1152,8 @@@ static inline struct Qdisc *qdisc_repla
  	sch_tree_lock(sch);
  	old = *pold;
  	*pold = new;
- 	if (old != NULL) {
- 		unsigned int qlen = old->q.qlen;
- 		unsigned int backlog = old->qstats.backlog;
- 
- 		qdisc_reset(old);
- 		qdisc_tree_reduce_backlog(old, qlen, backlog);
- 	}
+ 	if (old != NULL)
+ 		qdisc_tree_flush_backlog(old);
  	sch_tree_unlock(sch);
  
  	return old;
diff --combined kernel/bpf/verifier.c
index 2fe89138309a,6c5a41f7f338..b7ad8003c4e6
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@@ -377,8 -377,7 +377,8 @@@ static bool is_release_function(enum bp
  static bool is_acquire_function(enum bpf_func_id func_id)
  {
  	return func_id == BPF_FUNC_sk_lookup_tcp ||
 -		func_id == BPF_FUNC_sk_lookup_udp;
 +		func_id == BPF_FUNC_sk_lookup_udp ||
 +		func_id == BPF_FUNC_skc_lookup_tcp;
  }
  
  static bool is_ptr_cast_function(enum bpf_func_id func_id)
@@@ -1898,8 -1897,9 +1898,9 @@@ continue_func
  		}
  		frame++;
  		if (frame >= MAX_CALL_FRAMES) {
- 			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
- 			return -EFAULT;
+ 			verbose(env, "the call stack of %d frames is too deep !\n",
+ 				frame);
+ 			return -E2BIG;
  		}
  		goto process_func;
  	}
@@@ -3157,11 -3157,19 +3158,11 @@@ static int check_helper_call(struct bpf
  	} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
  		mark_reg_known_zero(env, regs, BPF_REG_0);
  		regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
 -		if (is_acquire_function(func_id)) {
 -			int id = acquire_reference_state(env, insn_idx);
 -
 -			if (id < 0)
 -				return id;
 -			/* For mark_ptr_or_null_reg() */
 -			regs[BPF_REG_0].id = id;
 -			/* For release_reference() */
 -			regs[BPF_REG_0].ref_obj_id = id;
 -		} else {
 -			/* For mark_ptr_or_null_reg() */
 -			regs[BPF_REG_0].id = ++env->id_gen;
 -		}
 +		regs[BPF_REG_0].id = ++env->id_gen;
 +	} else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
 +		mark_reg_known_zero(env, regs, BPF_REG_0);
 +		regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
 +		regs[BPF_REG_0].id = ++env->id_gen;
  	} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
  		mark_reg_known_zero(env, regs, BPF_REG_0);
  		regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
@@@ -3172,19 -3180,9 +3173,19 @@@
  		return -EINVAL;
  	}
  
 -	if (is_ptr_cast_function(func_id))
 +	if (is_ptr_cast_function(func_id)) {
  		/* For release_reference() */
  		regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
 +	} else if (is_acquire_function(func_id)) {
 +		int id = acquire_reference_state(env, insn_idx);
 +
 +		if (id < 0)
 +			return id;
 +		/* For mark_ptr_or_null_reg() */
 +		regs[BPF_REG_0].id = id;
 +		/* For release_reference() */
 +		regs[BPF_REG_0].ref_obj_id = id;
 +	}
  
  	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
  
diff --combined net/batman-adv/bat_v_elp.c
index 13b9ab860a25,d5df0114f08a..2614a9caee00
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@@ -2,6 -2,18 +2,6 @@@
  /* Copyright (C) 2011-2019  B.A.T.M.A.N. contributors:
   *
   * Linus L├╝ssing, Marek Lindner
 - *
 - * This program is free software; you can redistribute it and/or
 - * modify it under the terms of version 2 of the GNU General Public
 - * License as published by the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but
 - * WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 - * General Public License for more details.
 - *
 - * You should have received a copy of the GNU General Public License
 - * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include "bat_v_elp.h"
@@@ -92,8 -104,10 +92,10 @@@ static u32 batadv_v_elp_get_throughput(
  
  		ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
  
- 		/* free the TID stats immediately */
- 		cfg80211_sinfo_release_content(&sinfo);
+ 		if (!ret) {
+ 			/* free the TID stats immediately */
+ 			cfg80211_sinfo_release_content(&sinfo);
+ 		}
  
  		dev_put(real_netdev);
  		if (ret == -ENOENT) {
diff --combined net/batman-adv/bridge_loop_avoidance.c
index 8d6b7c9c2a7e,4fb01108e5f5..663a53b6d36e
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@@ -2,6 -2,18 +2,6 @@@
  /* Copyright (C) 2011-2019  B.A.T.M.A.N. contributors:
   *
   * Simon Wunderlich
 - *
 - * This program is free software; you can redistribute it and/or
 - * modify it under the terms of version 2 of the GNU General Public
 - * License as published by the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but
 - * WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 - * General Public License for more details.
 - *
 - * You should have received a copy of the GNU General Public License
 - * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include "bridge_loop_avoidance.h"
@@@ -47,6 -59,7 +47,6 @@@
  #include "netlink.h"
  #include "originator.h"
  #include "soft-interface.h"
 -#include "sysfs.h"
  #include "translation-table.h"
  
  static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
@@@ -790,6 -803,8 +790,8 @@@ static void batadv_bla_del_claim(struc
  				 const u8 *mac, const unsigned short vid)
  {
  	struct batadv_bla_claim search_claim, *claim;
+ 	struct batadv_bla_claim *claim_removed_entry;
+ 	struct hlist_node *claim_removed_node;
  
  	ether_addr_copy(search_claim.addr, mac);
  	search_claim.vid = vid;
@@@ -800,10 -815,18 +802,18 @@@
  	batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
  		   mac, batadv_print_vid(vid));
  
- 	batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
- 			   batadv_choose_claim, claim);
- 	batadv_claim_put(claim); /* reference from the hash is gone */
+ 	claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
+ 						batadv_compare_claim,
+ 						batadv_choose_claim, claim);
+ 	if (!claim_removed_node)
+ 		goto free_claim;
  
+ 	/* reference from the hash is gone */
+ 	claim_removed_entry = hlist_entry(claim_removed_node,
+ 					  struct batadv_bla_claim, hash_entry);
+ 	batadv_claim_put(claim_removed_entry);
+ 
+ free_claim:
  	/* don't need the reference from hash_find() anymore */
  	batadv_claim_put(claim);
  }
diff --combined net/batman-adv/sysfs.c
index ad14c8086fe7,208655cf6717..80fc3253c336
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@@ -2,12 -2,23 +2,12 @@@
  /* Copyright (C) 2010-2019  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner
 - *
 - * This program is free software; you can redistribute it and/or
 - * modify it under the terms of version 2 of the GNU General Public
 - * License as published by the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but
 - * WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 - * General Public License for more details.
 - *
 - * You should have received a copy of the GNU General Public License
 - * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include "sysfs.h"
  #include "main.h"
  
 +#include <asm/current.h>
  #include <linux/atomic.h>
  #include <linux/compiler.h>
  #include <linux/device.h>
@@@ -23,7 -34,6 +23,7 @@@
  #include <linux/rculist.h>
  #include <linux/rcupdate.h>
  #include <linux/rtnetlink.h>
 +#include <linux/sched.h>
  #include <linux/slab.h>
  #include <linux/stddef.h>
  #include <linux/string.h>
@@@ -42,16 -52,6 +42,16 @@@
  #include "network-coding.h"
  #include "soft-interface.h"
  
 +/**
 + * batadv_sysfs_deprecated() - Log use of deprecated batadv sysfs access
 + * @attr: attribute which was accessed
 + */
 +static void batadv_sysfs_deprecated(struct attribute *attr)
 +{
 +	pr_warn_ratelimited(DEPRECATED "%s (pid %d) Use of sysfs file \"%s\".\nUse batadv genl family instead",
 +			    current->comm, task_pid_nr(current), attr->name);
 +}
 +
  static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
  {
  	struct device *dev = container_of(obj->parent, struct device, kobj);
@@@ -114,6 -114,22 +114,6 @@@ batadv_kobj_to_vlan(struct batadv_priv 
  	return vlan;
  }
  
 -#define BATADV_UEV_TYPE_VAR	"BATTYPE="
 -#define BATADV_UEV_ACTION_VAR	"BATACTION="
 -#define BATADV_UEV_DATA_VAR	"BATDATA="
 -
 -static char *batadv_uev_action_str[] = {
 -	"add",
 -	"del",
 -	"change",
 -	"loopdetect",
 -};
 -
 -static char *batadv_uev_type_str[] = {
 -	"gw",
 -	"bla",
 -};
 -
  /* Use this, if you have customized show and store functions for vlan attrs */
  #define BATADV_ATTR_VLAN(_name, _mode, _show, _store)	\
  struct batadv_attribute batadv_attr_vlan_##_name = {	\
@@@ -141,7 -157,6 +141,7 @@@ ssize_t batadv_store_##_name(struct kob
  	struct batadv_priv *bat_priv = netdev_priv(net_dev);		\
  	ssize_t length;							\
  									\
 +	batadv_sysfs_deprecated(attr);					\
  	length = __batadv_store_bool_attr(buff, count, _post_func, attr,\
  					  &bat_priv->_name, net_dev);	\
  									\
@@@ -156,7 -171,6 +156,7 @@@ ssize_t batadv_show_##_name(struct kobj
  {									\
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);	\
  									\
 +	batadv_sysfs_deprecated(attr);					\
  	return sprintf(buff, "%s\n",					\
  		       atomic_read(&bat_priv->_name) == 0 ?		\
  		       "disabled" : "enabled");				\
@@@ -180,7 -194,6 +180,7 @@@ ssize_t batadv_store_##_name(struct kob
  	struct batadv_priv *bat_priv = netdev_priv(net_dev);		\
  	ssize_t length;							\
  									\
 +	batadv_sysfs_deprecated(attr);					\
  	length = __batadv_store_uint_attr(buff, count, _min, _max,	\
  					  _post_func, attr,		\
  					  &bat_priv->_var, net_dev,	\
@@@ -197,7 -210,6 +197,7 @@@ ssize_t batadv_show_##_name(struct kobj
  {									\
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);	\
  									\
 +	batadv_sysfs_deprecated(attr);					\
  	return sprintf(buff, "%i\n", atomic_read(&bat_priv->_var));	\
  }									\
  
@@@ -222,7 -234,6 +222,7 @@@ ssize_t batadv_store_vlan_##_name(struc
  					      attr, &vlan->_name,	\
  					      bat_priv->soft_iface);	\
  									\
 +	batadv_sysfs_deprecated(attr);					\
  	if (vlan->vid)							\
  		batadv_netlink_notify_vlan(bat_priv, vlan);		\
  	else								\
@@@ -243,7 -254,6 +243,7 @@@ ssize_t batadv_show_vlan_##_name(struc
  			     atomic_read(&vlan->_name) == 0 ?		\
  			     "disabled" : "enabled");			\
  									\
 +	batadv_sysfs_deprecated(attr);					\
  	batadv_softif_vlan_put(vlan);					\
  	return res;							\
  }
@@@ -265,7 -275,6 +265,7 @@@ ssize_t batadv_store_##_name(struct kob
  	struct batadv_priv *bat_priv;					\
  	ssize_t length;							\
  									\
 +	batadv_sysfs_deprecated(attr);					\
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);		\
  	if (!hard_iface)						\
  		return 0;						\
@@@ -293,7 -302,6 +293,7 @@@ ssize_t batadv_show_##_name(struct kobj
  	struct batadv_hard_iface *hard_iface;				\
  	ssize_t length;							\
  									\
 +	batadv_sysfs_deprecated(attr);					\
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);		\
  	if (!hard_iface)						\
  		return 0;						\
@@@ -438,7 -446,6 +438,7 @@@ static ssize_t batadv_show_bat_algo(str
  {
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  
 +	batadv_sysfs_deprecated(attr);
  	return sprintf(buff, "%s\n", bat_priv->algo_ops->name);
  }
  
@@@ -455,8 -462,6 +455,8 @@@ static ssize_t batadv_show_gw_mode(stru
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  	int bytes_written;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	/* GW mode is not available if the routing algorithm in use does not
  	 * implement the GW API
  	 */
@@@ -491,8 -496,6 +491,8 @@@ static ssize_t batadv_store_gw_mode(str
  	char *curr_gw_mode_str;
  	int gw_mode_tmp = -1;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	/* toggling GW mode is allowed only if the routing algorithm in use
  	 * provides the GW API
  	 */
@@@ -567,8 -570,6 +567,8 @@@ static ssize_t batadv_show_gw_sel_class
  {
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  
 +	batadv_sysfs_deprecated(attr);
 +
  	/* GW selection class is not available if the routing algorithm in use
  	 * does not implement the GW API
  	 */
@@@ -589,8 -590,6 +589,8 @@@ static ssize_t batadv_store_gw_sel_clas
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  	ssize_t length;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	/* setting the GW selection class is allowed only if the routing
  	 * algorithm in use implements the GW API
  	 */
@@@ -621,8 -620,6 +621,8 @@@ static ssize_t batadv_show_gw_bwidth(st
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  	u32 down, up;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	down = atomic_read(&bat_priv->gw.bandwidth_down);
  	up = atomic_read(&bat_priv->gw.bandwidth_up);
  
@@@ -638,8 -635,6 +638,8 @@@ static ssize_t batadv_store_gw_bwidth(s
  	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
  	ssize_t length;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	if (buff[count - 1] == '\n')
  		buff[count - 1] = '\0';
  
@@@ -664,7 -659,6 +664,7 @@@ static ssize_t batadv_show_isolation_ma
  {
  	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  
 +	batadv_sysfs_deprecated(attr);
  	return sprintf(buff, "%#.8x/%#.8x\n", bat_priv->isolation_mark,
  		       bat_priv->isolation_mark_mask);
  }
@@@ -688,8 -682,6 +688,8 @@@ static ssize_t batadv_store_isolation_m
  	u32 mark, mask;
  	char *mask_ptr;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	/* parse the mask if it has been specified, otherwise assume the mask is
  	 * the biggest possible
  	 */
@@@ -945,8 -937,6 +945,8 @@@ static ssize_t batadv_show_mesh_iface(s
  	ssize_t length;
  	const char *ifname;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);
  	if (!hard_iface)
  		return 0;
@@@ -1051,8 -1041,6 +1051,8 @@@ static ssize_t batadv_store_mesh_iface(
  	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
  	struct batadv_store_mesh_work *store_work;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	if (buff[count - 1] == '\n')
  		buff[count - 1] = '\0';
  
@@@ -1084,8 -1072,6 +1084,8 @@@ static ssize_t batadv_show_iface_status
  	struct batadv_hard_iface *hard_iface;
  	ssize_t length;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);
  	if (!hard_iface)
  		return 0;
@@@ -1130,15 -1116,13 +1130,15 @@@ static ssize_t batadv_store_throughput_
  						struct attribute *attr,
  						char *buff, size_t count)
  {
- 	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
  	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
  	struct batadv_hard_iface *hard_iface;
+ 	struct batadv_priv *bat_priv;
  	u32 tp_override;
  	u32 old_tp_override;
  	bool ret;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);
  	if (!hard_iface)
  		return -EINVAL;
@@@ -1163,7 -1147,10 +1163,10 @@@
  
  	atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
  
- 	batadv_netlink_notify_hardif(bat_priv, hard_iface);
+ 	if (hard_iface->soft_iface) {
+ 		bat_priv = netdev_priv(hard_iface->soft_iface);
+ 		batadv_netlink_notify_hardif(bat_priv, hard_iface);
+ 	}
  
  out:
  	batadv_hardif_put(hard_iface);
@@@ -1178,8 -1165,6 +1181,8 @@@ static ssize_t batadv_show_throughput_o
  	struct batadv_hard_iface *hard_iface;
  	u32 tp_override;
  
 +	batadv_sysfs_deprecated(attr);
 +
  	hard_iface = batadv_hardif_get_by_netdev(net_dev);
  	if (!hard_iface)
  		return -EINVAL;
@@@ -1265,3 -1250,57 +1268,3 @@@ void batadv_sysfs_del_hardif(struct kob
  	kobject_put(*hardif_obj);
  	*hardif_obj = NULL;
  }
 -
 -/**
 - * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
 - * @bat_priv: the bat priv with all the soft interface information
 - * @type: subsystem type of event. Stored in uevent's BATTYPE
 - * @action: action type of event. Stored in uevent's BATACTION
 - * @data: string with additional information to the event (ignored for
 - *  BATADV_UEV_DEL). Stored in uevent's BATDATA
 - *
 - * Return: 0 on success or negative error number in case of failure
 - */
 -int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
 -			enum batadv_uev_action action, const char *data)
 -{
 -	int ret = -ENOMEM;
 -	struct kobject *bat_kobj;
 -	char *uevent_env[4] = { NULL, NULL, NULL, NULL };
 -
 -	bat_kobj = &bat_priv->soft_iface->dev.kobj;
 -
 -	uevent_env[0] = kasprintf(GFP_ATOMIC,
 -				  "%s%s", BATADV_UEV_TYPE_VAR,
 -				  batadv_uev_type_str[type]);
 -	if (!uevent_env[0])
 -		goto out;
 -
 -	uevent_env[1] = kasprintf(GFP_ATOMIC,
 -				  "%s%s", BATADV_UEV_ACTION_VAR,
 -				  batadv_uev_action_str[action]);
 -	if (!uevent_env[1])
 -		goto out;
 -
 -	/* If the event is DEL, ignore the data field */
 -	if (action != BATADV_UEV_DEL) {
 -		uevent_env[2] = kasprintf(GFP_ATOMIC,
 -					  "%s%s", BATADV_UEV_DATA_VAR, data);
 -		if (!uevent_env[2])
 -			goto out;
 -	}
 -
 -	ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
 -out:
 -	kfree(uevent_env[0]);
 -	kfree(uevent_env[1]);
 -	kfree(uevent_env[2]);
 -
 -	if (ret)
 -		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 -			   "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
 -			   batadv_uev_type_str[type],
 -			   batadv_uev_action_str[action],
 -			   (action == BATADV_UEV_DEL ? "NULL" : data), ret);
 -	return ret;
 -}
diff --combined net/batman-adv/translation-table.c
index 5d8bf8048e4e,26c4e2493ddf..1ddfd5e011ee
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@@ -2,6 -2,18 +2,6 @@@
  /* Copyright (C) 2007-2019  B.A.T.M.A.N. contributors:
   *
   * Marek Lindner, Simon Wunderlich, Antonio Quartulli
 - *
 - * This program is free software; you can redistribute it and/or
 - * modify it under the terms of version 2 of the GNU General Public
 - * License as published by the Free Software Foundation.
 - *
 - * This program is distributed in the hope that it will be useful, but
 - * WITHOUT ANY WARRANTY; without even the implied warranty of
 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 - * General Public License for more details.
 - *
 - * You should have received a copy of the GNU General Public License
 - * along with this program; if not, see <http://www.gnu.org/licenses/>.
   */
  
  #include "translation-table.h"
@@@ -193,7 -205,7 +193,7 @@@ batadv_tt_local_hash_find(struct batadv
   * Return: a pointer to the corresponding tt_global_entry struct if the client
   * is found, NULL otherwise.
   */
 -static struct batadv_tt_global_entry *
 +struct batadv_tt_global_entry *
  batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
  			   unsigned short vid)
  {
@@@ -288,7 -300,8 +288,7 @@@ static void batadv_tt_global_entry_rele
   *  possibly release it
   * @tt_global_entry: tt_global_entry to be free'd
   */
 -static void
 -batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
 +void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
  {
  	kref_put(&tt_global_entry->common.refcount,
  		 batadv_tt_global_entry_release);
@@@ -603,14 -616,26 +603,26 @@@ static void batadv_tt_global_free(struc
  				  struct batadv_tt_global_entry *tt_global,
  				  const char *message)
  {
+ 	struct batadv_tt_global_entry *tt_removed_entry;
+ 	struct hlist_node *tt_removed_node;
+ 
  	batadv_dbg(BATADV_DBG_TT, bat_priv,
  		   "Deleting global tt entry %pM (vid: %d): %s\n",
  		   tt_global->common.addr,
  		   batadv_print_vid(tt_global->common.vid), message);
  
- 	batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
- 			   batadv_choose_tt, &tt_global->common);
- 	batadv_tt_global_entry_put(tt_global);
+ 	tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
+ 					     batadv_compare_tt,
+ 					     batadv_choose_tt,
+ 					     &tt_global->common);
+ 	if (!tt_removed_node)
+ 		return;
+ 
+ 	/* drop reference of remove hash entry */
+ 	tt_removed_entry = hlist_entry(tt_removed_node,
+ 				       struct batadv_tt_global_entry,
+ 				       common.hash_entry);
+ 	batadv_tt_global_entry_put(tt_removed_entry);
  }
  
  /**
@@@ -1324,9 -1349,10 +1336,10 @@@ u16 batadv_tt_local_remove(struct batad
  			   unsigned short vid, const char *message,
  			   bool roaming)
  {
+ 	struct batadv_tt_local_entry *tt_removed_entry;
  	struct batadv_tt_local_entry *tt_local_entry;
  	u16 flags, curr_flags = BATADV_NO_FLAGS;
- 	void *tt_entry_exists;
+ 	struct hlist_node *tt_removed_node;
  
  	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
  	if (!tt_local_entry)
@@@ -1355,15 -1381,18 +1368,18 @@@
  	 */
  	batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
  
- 	tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+ 	tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
  					     batadv_compare_tt,
  					     batadv_choose_tt,
  					     &tt_local_entry->common);
- 	if (!tt_entry_exists)
+ 	if (!tt_removed_node)
  		goto out;
  
- 	/* extra call to free the local tt entry */
- 	batadv_tt_local_entry_put(tt_local_entry);
+ 	/* drop reference of remove hash entry */
+ 	tt_removed_entry = hlist_entry(tt_removed_node,
+ 				       struct batadv_tt_local_entry,
+ 				       common.hash_entry);
+ 	batadv_tt_local_entry_put(tt_removed_entry);
  
  out:
  	if (tt_local_entry)
diff --combined net/bridge/br_multicast.c
index afef6fc2c074,02da21d771c9..8d82107c6419
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@@ -65,6 -65,23 +65,6 @@@ static void br_ip6_multicast_leave_grou
  					 __u16 vid, const unsigned char *src);
  #endif
  
 -static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
 -{
 -	if (a->proto != b->proto)
 -		return 0;
 -	if (a->vid != b->vid)
 -		return 0;
 -	switch (a->proto) {
 -	case htons(ETH_P_IP):
 -		return a->u.ip4 == b->u.ip4;
 -#if IS_ENABLED(CONFIG_IPV6)
 -	case htons(ETH_P_IPV6):
 -		return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
 -#endif
 -	}
 -	return 0;
 -}
 -
  static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
  						      struct br_ip *dst)
  {
@@@ -500,7 -517,7 +500,7 @@@ struct net_bridge_port_group *br_multic
  	if (src)
  		memcpy(p->eth_addr, src, ETH_ALEN);
  	else
 -		memset(p->eth_addr, 0xff, ETH_ALEN);
 +		eth_broadcast_addr(p->eth_addr);
  
  	return p;
  }
@@@ -584,6 -601,7 +584,7 @@@ static int br_ip4_multicast_add_group(s
  	if (ipv4_is_local_multicast(group))
  		return 0;
  
+ 	memset(&br_group, 0, sizeof(br_group));
  	br_group.u.ip4 = group;
  	br_group.proto = htons(ETH_P_IP);
  	br_group.vid = vid;
@@@ -1480,6 -1498,7 +1481,7 @@@ static void br_ip4_multicast_leave_grou
  
  	own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
  
+ 	memset(&br_group, 0, sizeof(br_group));
  	br_group.u.ip4 = group;
  	br_group.proto = htons(ETH_P_IP);
  	br_group.vid = vid;
@@@ -1503,6 -1522,7 +1505,7 @@@ static void br_ip6_multicast_leave_grou
  
  	own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
  
+ 	memset(&br_group, 0, sizeof(br_group));
  	br_group.u.ip6 = *group;
  	br_group.proto = htons(ETH_P_IPV6);
  	br_group.vid = vid;
@@@ -2172,7 -2192,7 +2175,7 @@@ int br_multicast_list_adjacent(struct n
  	int count = 0;
  
  	rcu_read_lock();
 -	if (!br_ip_list || !br_port_exists(dev))
 +	if (!br_ip_list || !netif_is_bridge_port(dev))
  		goto unlock;
  
  	port = br_port_get_rcu(dev);
@@@ -2219,7 -2239,7 +2222,7 @@@ bool br_multicast_has_querier_anywhere(
  	bool ret = false;
  
  	rcu_read_lock();
 -	if (!br_port_exists(dev))
 +	if (!netif_is_bridge_port(dev))
  		goto unlock;
  
  	port = br_port_get_rcu(dev);
@@@ -2255,7 -2275,7 +2258,7 @@@ bool br_multicast_has_querier_adjacent(
  	bool ret = false;
  
  	rcu_read_lock();
 -	if (!br_port_exists(dev))
 +	if (!netif_is_bridge_port(dev))
  		goto unlock;
  
  	port = br_port_get_rcu(dev);
diff --combined net/core/datagram.c
index 0dafec5cada0,e657289db4ac..91bb5a083fee
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@@ -61,8 -61,6 +61,8 @@@
  #include <trace/events/skb.h>
  #include <net/busy_poll.h>
  
 +#include "datagram.h"
 +
  /*
   *	Is a socket 'connection oriented' ?
   */
@@@ -281,7 -279,7 +281,7 @@@ struct sk_buff *__skb_try_recv_datagram
  			break;
  
  		sk_busy_loop(sk, flags & MSG_DONTWAIT);
- 	} while (!skb_queue_empty(&sk->sk_receive_queue));
+ 	} while (sk->sk_receive_queue.prev != *last);
  
  	error = -EAGAIN;
  
@@@ -410,10 -408,10 +410,10 @@@ int skb_kill_datagram(struct sock *sk, 
  }
  EXPORT_SYMBOL(skb_kill_datagram);
  
 -int __skb_datagram_iter(const struct sk_buff *skb, int offset,
 -			struct iov_iter *to, int len, bool fault_short,
 -			size_t (*cb)(const void *, size_t, void *, struct iov_iter *),
 -			void *data)
 +static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
 +			       struct iov_iter *to, int len, bool fault_short,
 +			       size_t (*cb)(const void *, size_t, void *,
 +					    struct iov_iter *), void *data)
  {
  	int start = skb_headlen(skb);
  	int i, copy = start - offset, start_off = offset, n;
diff --combined net/core/dev.c
index a95782764360,fdcff29df915..b430f851f377
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -131,6 -131,7 +131,6 @@@
  #include <trace/events/napi.h>
  #include <trace/events/net.h>
  #include <trace/events/skb.h>
 -#include <linux/pci.h>
  #include <linux/inetdevice.h>
  #include <linux/cpu_rmap.h>
  #include <linux/static_key.h>
@@@ -145,7 -146,6 +145,7 @@@
  #include <net/udp_tunnel.h>
  #include <linux/net_namespace.h>
  #include <linux/indirect_call_wrapper.h>
 +#include <net/devlink.h>
  
  #include "net-sysfs.h"
  
@@@ -3468,15 -3468,6 +3468,15 @@@ static inline int __dev_xmit_skb(struc
  		if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
  			__qdisc_drop(skb, &to_free);
  			rc = NET_XMIT_DROP;
 +		} else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
 +			   qdisc_run_begin(q)) {
 +			qdisc_bstats_cpu_update(q, skb);
 +
 +			if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
 +				__qdisc_run(q);
 +
 +			qdisc_run_end(q);
 +			rc = NET_XMIT_SUCCESS;
  		} else {
  			rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
  			qdisc_run(q);
@@@ -3565,6 -3556,9 +3565,6 @@@ static void skb_update_prio(struct sk_b
  #define skb_update_prio(skb)
  #endif
  
 -DEFINE_PER_CPU(int, xmit_recursion);
 -EXPORT_SYMBOL(xmit_recursion);
 -
  /**
   *	dev_loopback_xmit - loop back @skb
   *	@net: network namespace this loopback is happening in
@@@ -3695,21 -3689,23 +3695,21 @@@ get_cpus_map
  }
  
  u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
 -		     struct net_device *sb_dev,
 -		     select_queue_fallback_t fallback)
 +		     struct net_device *sb_dev)
  {
  	return 0;
  }
  EXPORT_SYMBOL(dev_pick_tx_zero);
  
  u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
 -		       struct net_device *sb_dev,
 -		       select_queue_fallback_t fallback)
 +		       struct net_device *sb_dev)
  {
  	return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
  }
  EXPORT_SYMBOL(dev_pick_tx_cpu_id);
  
 -static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
 -			    struct net_device *sb_dev)
 +u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
 +		     struct net_device *sb_dev)
  {
  	struct sock *sk = skb->sk;
  	int queue_index = sk_tx_queue_get(sk);
@@@ -3733,11 -3729,10 +3733,11 @@@
  
  	return queue_index;
  }
 +EXPORT_SYMBOL(netdev_pick_tx);
  
 -struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 -				    struct sk_buff *skb,
 -				    struct net_device *sb_dev)
 +struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
 +					 struct sk_buff *skb,
 +					 struct net_device *sb_dev)
  {
  	int queue_index = 0;
  
@@@ -3752,9 -3747,10 +3752,9 @@@
  		const struct net_device_ops *ops = dev->netdev_ops;
  
  		if (ops->ndo_select_queue)
 -			queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
 -							    __netdev_pick_tx);
 +			queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
  		else
 -			queue_index = __netdev_pick_tx(dev, skb, sb_dev);
 +			queue_index = netdev_pick_tx(dev, skb, sb_dev);
  
  		queue_index = netdev_cap_txqueue(dev, queue_index);
  	}
@@@ -3828,7 -3824,7 +3828,7 @@@ static int __dev_queue_xmit(struct sk_b
  	else
  		skb_dst_force(skb);
  
 -	txq = netdev_pick_tx(dev, skb, sb_dev);
 +	txq = netdev_core_pick_tx(dev, skb, sb_dev);
  	q = rcu_dereference_bh(txq->qdisc);
  
  	trace_net_dev_queue(skb);
@@@ -3853,7 -3849,8 +3853,7 @@@
  		int cpu = smp_processor_id(); /* ok because BHs are off */
  
  		if (txq->xmit_lock_owner != cpu) {
 -			if (unlikely(__this_cpu_read(xmit_recursion) >
 -				     XMIT_RECURSION_LIMIT))
 +			if (dev_xmit_recursion())
  				goto recursion_alert;
  
  			skb = validate_xmit_skb(skb, dev, &again);
@@@ -3863,9 -3860,9 +3863,9 @@@
  			HARD_TX_LOCK(dev, txq, cpu);
  
  			if (!netif_xmit_stopped(txq)) {
 -				__this_cpu_inc(xmit_recursion);
 +				dev_xmit_recursion_inc();
  				skb = dev_hard_start_xmit(skb, dev, txq, &rc);
 -				__this_cpu_dec(xmit_recursion);
 +				dev_xmit_recursion_dec();
  				if (dev_xmit_complete(rc)) {
  					HARD_TX_UNLOCK(dev, txq);
  					goto out;
@@@ -3978,9 -3975,9 +3978,9 @@@ EXPORT_SYMBOL(rps_sock_flow_table)
  u32 rps_cpu_mask __read_mostly;
  EXPORT_SYMBOL(rps_cpu_mask);
  
 -struct static_key rps_needed __read_mostly;
 +struct static_key_false rps_needed __read_mostly;
  EXPORT_SYMBOL(rps_needed);
 -struct static_key rfs_needed __read_mostly;
 +struct static_key_false rfs_needed __read_mostly;
  EXPORT_SYMBOL(rfs_needed);
  
  static struct rps_dev_flow *
@@@ -4432,7 -4429,7 +4432,7 @@@ void generic_xdp_tx(struct sk_buff *skb
  	bool free_skb = true;
  	int cpu, rc;
  
 -	txq = netdev_pick_tx(dev, skb, NULL);
 +	txq = netdev_core_pick_tx(dev, skb, NULL);
  	cpu = smp_processor_id();
  	HARD_TX_LOCK(dev, txq, cpu);
  	if (!netif_xmit_stopped(txq)) {
@@@ -4506,7 -4503,7 +4506,7 @@@ static int netif_rx_internal(struct sk_
  	}
  
  #ifdef CONFIG_RPS
 -	if (static_key_false(&rps_needed)) {
 +	if (static_branch_unlikely(&rps_needed)) {
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu;
  
@@@ -5017,8 -5014,10 +5017,10 @@@ static inline void __netif_receive_skb_
  	if (pt_prev->list_func != NULL)
  		pt_prev->list_func(head, pt_prev, orig_dev);
  	else
- 		list_for_each_entry_safe(skb, next, head, list)
+ 		list_for_each_entry_safe(skb, next, head, list) {
+ 			skb_list_del_init(skb);
  			pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ 		}
  }
  
  static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
@@@ -5175,7 -5174,7 +5177,7 @@@ static int netif_receive_skb_internal(s
  
  	rcu_read_lock();
  #ifdef CONFIG_RPS
 -	if (static_key_false(&rps_needed)) {
 +	if (static_branch_unlikely(&rps_needed)) {
  		struct rps_dev_flow voidflow, *rflow = &voidflow;
  		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
  
@@@ -5223,7 -5222,7 +5225,7 @@@ static void netif_receive_skb_list_inte
  
  	rcu_read_lock();
  #ifdef CONFIG_RPS
 -	if (static_key_false(&rps_needed)) {
 +	if (static_branch_unlikely(&rps_needed)) {
  		list_for_each_entry_safe(skb, next, head, list) {
  			struct rps_dev_flow voidflow, *rflow = &voidflow;
  			int cpu = get_rps_cpu(skb->dev, skb, &rflow);
@@@ -7873,14 -7872,10 +7875,14 @@@ int dev_get_phys_port_name(struct net_d
  			   char *name, size_t len)
  {
  	const struct net_device_ops *ops = dev->netdev_ops;
 +	int err;
  
 -	if (!ops->ndo_get_phys_port_name)
 -		return -EOPNOTSUPP;
 -	return ops->ndo_get_phys_port_name(dev, name, len);
 +	if (ops->ndo_get_phys_port_name) {
 +		err = ops->ndo_get_phys_port_name(dev, name, len);
 +		if (err != -EOPNOTSUPP)
 +			return err;
 +	}
 +	return devlink_compat_phys_port_name_get(dev, name, len);
  }
  EXPORT_SYMBOL(dev_get_phys_port_name);
  
@@@ -7900,21 -7895,14 +7902,21 @@@ int dev_get_port_parent_id(struct net_d
  	struct netdev_phys_item_id first = { };
  	struct net_device *lower_dev;
  	struct list_head *iter;
 -	int err = -EOPNOTSUPP;
 +	int err;
  
 -	if (ops->ndo_get_port_parent_id)
 -		return ops->ndo_get_port_parent_id(dev, ppid);
 +	if (ops->ndo_get_port_parent_id) {
 +		err = ops->ndo_get_port_parent_id(dev, ppid);
 +		if (err != -EOPNOTSUPP)
 +			return err;
 +	}
  
 -	if (!recurse)
 +	err = devlink_compat_switch_id_get(dev, ppid);
 +	if (!err || err != -EOPNOTSUPP)
  		return err;
  
 +	if (!recurse)
 +		return -EOPNOTSUPP;
 +
  	netdev_for_each_lower_dev(dev, lower_dev, iter) {
  		err = dev_get_port_parent_id(lower_dev, ppid, recurse);
  		if (err)
diff --combined net/core/ethtool.c
index 387d67eb75ab,36ed619faf36..4a593853cbf2
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@@ -136,7 -136,6 +136,7 @@@ static const cha
  phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
  	[ETHTOOL_ID_UNSPEC]     = "Unspec",
  	[ETHTOOL_PHY_DOWNSHIFT]	= "phy-downshift",
 +	[ETHTOOL_PHY_FAST_LINK_DOWN] = "phy-fast-link-down",
  };
  
  static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
@@@ -1798,11 -1797,16 +1798,16 @@@ static int ethtool_get_strings(struct n
  	WARN_ON_ONCE(!ret);
  
  	gstrings.len = ret;
- 	data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
- 	if (gstrings.len && !data)
- 		return -ENOMEM;
  
- 	__ethtool_get_strings(dev, gstrings.string_set, data);
+ 	if (gstrings.len) {
+ 		data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
+ 		if (!data)
+ 			return -ENOMEM;
+ 
+ 		__ethtool_get_strings(dev, gstrings.string_set, data);
+ 	} else {
+ 		data = NULL;
+ 	}
  
  	ret = -EFAULT;
  	if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@@ -1898,11 -1902,15 +1903,15 @@@ static int ethtool_get_stats(struct net
  		return -EFAULT;
  
  	stats.n_stats = n_stats;
- 	data = vzalloc(array_size(n_stats, sizeof(u64)));
- 	if (n_stats && !data)
- 		return -ENOMEM;
  
- 	ops->get_ethtool_stats(dev, &stats, data);
+ 	if (n_stats) {
+ 		data = vzalloc(array_size(n_stats, sizeof(u64)));
+ 		if (!data)
+ 			return -ENOMEM;
+ 		ops->get_ethtool_stats(dev, &stats, data);
+ 	} else {
+ 		data = NULL;
+ 	}
  
  	ret = -EFAULT;
  	if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@@ -1942,16 -1950,21 +1951,21 @@@ static int ethtool_get_phy_stats(struc
  		return -EFAULT;
  
  	stats.n_stats = n_stats;
- 	data = vzalloc(array_size(n_stats, sizeof(u64)));
- 	if (n_stats && !data)
- 		return -ENOMEM;
  
- 	if (dev->phydev && !ops->get_ethtool_phy_stats) {
- 		ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
- 		if (ret < 0)
- 			return ret;
+ 	if (n_stats) {
+ 		data = vzalloc(array_size(n_stats, sizeof(u64)));
+ 		if (!data)
+ 			return -ENOMEM;
+ 
+ 		if (dev->phydev && !ops->get_ethtool_phy_stats) {
+ 			ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
+ 			if (ret < 0)
+ 				goto out;
+ 		} else {
+ 			ops->get_ethtool_phy_stats(dev, &stats, data);
+ 		}
  	} else {
- 		ops->get_ethtool_phy_stats(dev, &stats, data);
+ 		data = NULL;
  	}
  
  	ret = -EFAULT;
@@@ -2433,7 -2446,6 +2447,7 @@@ static int ethtool_phy_tunable_valid(co
  {
  	switch (tuna->id) {
  	case ETHTOOL_PHY_DOWNSHIFT:
 +	case ETHTOOL_PHY_FAST_LINK_DOWN:
  		if (tuna->len != sizeof(u8) ||
  		    tuna->type_id != ETHTOOL_TUNABLE_U8)
  			return -EINVAL;
diff --combined net/core/filter.c
index 08b53af84132,fc92ebc4e200..8904e3407163
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@@ -74,7 -74,6 +74,7 @@@
  #include <net/seg6.h>
  #include <net/seg6_local.h>
  #include <net/lwtunnel.h>
 +#include <net/ipv6_stubs.h>
  
  /**
   *	sk_filter_trim_cap - run a packet through a socket filter
@@@ -2016,7 -2015,7 +2016,7 @@@ static inline int __bpf_tx_skb(struct n
  {
  	int ret;
  
 -	if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
 +	if (dev_xmit_recursion()) {
  		net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
  		kfree_skb(skb);
  		return -ENETDOWN;
@@@ -2024,9 -2023,9 +2024,9 @@@
  
  	skb->dev = dev;
  
 -	__this_cpu_inc(xmit_recursion);
 +	dev_xmit_recursion_inc();
  	ret = dev_queue_xmit(skb);
 -	__this_cpu_dec(xmit_recursion);
 +	dev_xmit_recursion_dec();
  
  	return ret;
  }
@@@ -2964,113 -2963,42 +2964,113 @@@ static u32 bpf_skb_net_base_len(const s
  	}
  }
  
 -static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
 +#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK	(BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
 +					 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
 +
 +#define BPF_F_ADJ_ROOM_MASK		(BPF_F_ADJ_ROOM_FIXED_GSO | \
 +					 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
 +					 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
 +					 BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
 +
 +static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
 +			    u64 flags)
  {
 -	u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
 +	bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
 +	u16 mac_len = 0, inner_net = 0, inner_trans = 0;
 +	unsigned int gso_type = SKB_GSO_DODGY;
  	int ret;
  
 -	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
 -		return -ENOTSUPP;
 +	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
 +		/* udp gso_size delineates datagrams, only allow if fixed */
 +		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
 +		    !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
 +			return -ENOTSUPP;
 +	}
  
 -	ret = skb_cow(skb, len_diff);
 +	ret = skb_cow_head(skb, len_diff);
  	if (unlikely(ret < 0))
  		return ret;
  
 +	if (encap) {
 +		if (skb->protocol != htons(ETH_P_IP) &&
 +		    skb->protocol != htons(ETH_P_IPV6))
 +			return -ENOTSUPP;
 +
 +		if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
 +		    flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
 +			return -EINVAL;
 +
 +		if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
 +		    flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
 +			return -EINVAL;
 +
 +		if (skb->encapsulation)
 +			return -EALREADY;
 +
 +		mac_len = skb->network_header - skb->mac_header;
 +		inner_net = skb->network_header;
 +		inner_trans = skb->transport_header;
 +	}
 +
  	ret = bpf_skb_net_hdr_push(skb, off, len_diff);
  	if (unlikely(ret < 0))
  		return ret;
  
 +	if (encap) {
 +		/* inner mac == inner_net on l3 encap */
 +		skb->inner_mac_header = inner_net;
 +		skb->inner_network_header = inner_net;
 +		skb->inner_transport_header = inner_trans;
 +		skb_set_inner_protocol(skb, skb->protocol);
 +
 +		skb->encapsulation = 1;
 +		skb_set_network_header(skb, mac_len);
 +
 +		if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
 +			gso_type |= SKB_GSO_UDP_TUNNEL;
 +		else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
 +			gso_type |= SKB_GSO_GRE;
 +		else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
 +			gso_type |= SKB_GSO_IPXIP6;
 +		else
 +			gso_type |= SKB_GSO_IPXIP4;
 +
 +		if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
 +		    flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
 +			int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
 +					sizeof(struct ipv6hdr) :
 +					sizeof(struct iphdr);
 +
 +			skb_set_transport_header(skb, mac_len + nh_len);
 +		}
 +	}
 +
  	if (skb_is_gso(skb)) {
  		struct skb_shared_info *shinfo = skb_shinfo(skb);
  
  		/* Due to header grow, MSS needs to be downgraded. */
 -		skb_decrease_gso_size(shinfo, len_diff);
 +		if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
 +			skb_decrease_gso_size(shinfo, len_diff);
 +
  		/* Header must be checked, and gso_segs recomputed. */
 -		shinfo->gso_type |= SKB_GSO_DODGY;
 +		shinfo->gso_type |= gso_type;
  		shinfo->gso_segs = 0;
  	}
  
  	return 0;
  }
  
 -static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
 +static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
 +			      u64 flags)
  {
 -	u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
  	int ret;
  
 -	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
 -		return -ENOTSUPP;
 +	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
 +		/* udp gso_size delineates datagrams, only allow if fixed */
 +		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
 +		    !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
 +			return -ENOTSUPP;
 +	}
  
  	ret = skb_unclone(skb, GFP_ATOMIC);
  	if (unlikely(ret < 0))
@@@ -3084,9 -3012,7 +3084,9 @@@
  		struct skb_shared_info *shinfo = skb_shinfo(skb);
  
  		/* Due to header shrink, MSS can be upgraded. */
 -		skb_increase_gso_size(shinfo, len_diff);
 +		if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
 +			skb_increase_gso_size(shinfo, len_diff);
 +
  		/* Header must be checked, and gso_segs recomputed. */
  		shinfo->gso_type |= SKB_GSO_DODGY;
  		shinfo->gso_segs = 0;
@@@ -3101,50 -3027,49 +3101,50 @@@ static u32 __bpf_skb_max_len(const stru
  			  SKB_MAX_ALLOC;
  }
  
 -static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
 +BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
 +	   u32, mode, u64, flags)
  {
 -	bool trans_same = skb->transport_header == skb->network_header;
  	u32 len_cur, len_diff_abs = abs(len_diff);
  	u32 len_min = bpf_skb_net_base_len(skb);
  	u32 len_max = __bpf_skb_max_len(skb);
  	__be16 proto = skb->protocol;
  	bool shrink = len_diff < 0;
 +	u32 off;
  	int ret;
  
 +	if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK))
 +		return -EINVAL;
  	if (unlikely(len_diff_abs > 0xfffU))
  		return -EFAULT;
  	if (unlikely(proto != htons(ETH_P_IP) &&
  		     proto != htons(ETH_P_IPV6)))
  		return -ENOTSUPP;
  
 +	off = skb_mac_header_len(skb);
 +	switch (mode) {
 +	case BPF_ADJ_ROOM_NET:
 +		off += bpf_skb_net_base_len(skb);
 +		break;
 +	case BPF_ADJ_ROOM_MAC:
 +		break;
 +	default:
 +		return -ENOTSUPP;
 +	}
 +
  	len_cur = skb->len - skb_network_offset(skb);
 -	if (skb_transport_header_was_set(skb) && !trans_same)
 -		len_cur = skb_network_header_len(skb);
  	if ((shrink && (len_diff_abs >= len_cur ||
  			len_cur - len_diff_abs < len_min)) ||
  	    (!shrink && (skb->len + len_diff_abs > len_max &&
  			 !skb_is_gso(skb))))
  		return -ENOTSUPP;
  
 -	ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) :
 -		       bpf_skb_net_grow(skb, len_diff_abs);
 +	ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
 +		       bpf_skb_net_grow(skb, off, len_diff_abs, flags);
  
  	bpf_compute_data_pointers(skb);
  	return ret;
  }
  
 -BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
 -	   u32, mode, u64, flags)
 -{
 -	if (unlikely(flags))
 -		return -EINVAL;
 -	if (likely(mode == BPF_ADJ_ROOM_NET))
 -		return bpf_skb_adjust_net(skb, len_diff);
 -
 -	return -ENOTSUPP;
 -}
 -
  static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
  	.func		= bpf_skb_adjust_room,
  	.gpl_only	= false,
@@@ -4555,11 -4480,11 +4555,11 @@@ static int bpf_fib_set_fwd_params(struc
  static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
  			       u32 flags, bool check_mtu)
  {
 +	struct fib_nh_common *nhc;
  	struct in_device *in_dev;
  	struct neighbour *neigh;
  	struct net_device *dev;
  	struct fib_result res;
 -	struct fib_nh *nh;
  	struct flowi4 fl4;
  	int err;
  	u32 mtu;
@@@ -4632,15 -4557,15 +4632,15 @@@
  			return BPF_FIB_LKUP_RET_FRAG_NEEDED;
  	}
  
 -	nh = &res.fi->fib_nh[res.nh_sel];
 +	nhc = res.nhc;
  
  	/* do not handle lwt encaps right now */
 -	if (nh->nh_lwtstate)
 +	if (nhc->nhc_lwtstate)
  		return BPF_FIB_LKUP_RET_UNSUPP_LWT;
  
 -	dev = nh->nh_dev;
 -	if (nh->nh_gw)
 -		params->ipv4_dst = nh->nh_gw;
 +	dev = nhc->nhc_dev;
 +	if (nhc->nhc_has_gw)
 +		params->ipv4_dst = nhc->nhc_gw.ipv4;
  
  	params->rt_metric = res.fi->fib_priority;
  
@@@ -4749,13 -4674,13 +4749,13 @@@ static int bpf_ipv6_fib_lookup(struct n
  			return BPF_FIB_LKUP_RET_FRAG_NEEDED;
  	}
  
 -	if (f6i->fib6_nh.nh_lwtstate)
 +	if (f6i->fib6_nh.fib_nh_lws)
  		return BPF_FIB_LKUP_RET_UNSUPP_LWT;
  
 -	if (f6i->fib6_flags & RTF_GATEWAY)
 -		*dst = f6i->fib6_nh.nh_gw;
 +	if (f6i->fib6_nh.fib_nh_has_gw)
 +		*dst = f6i->fib6_nh.fib_nh_gw6;
  
 -	dev = f6i->fib6_nh.nh_dev;
 +	dev = f6i->fib6_nh.fib_nh_dev;
  	params->rt_metric = f6i->fib6_metric;
  
  	/* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
@@@ -5231,15 -5156,15 +5231,15 @@@ static struct sock *sk_lookup(struct ne
  	return sk;
  }
  
 -/* bpf_sk_lookup performs the core lookup for different types of sockets,
 +/* bpf_skc_lookup performs the core lookup for different types of sockets,
   * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
   * Returns the socket as an 'unsigned long' to simplify the casting in the
   * callers to satisfy BPF_CALL declarations.
   */
 -static unsigned long
 -__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
 -		struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
 -		u64 flags)
 +static struct sock *
 +__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
 +		 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
 +		 u64 flags)
  {
  	struct sock *sk = NULL;
  	u8 family = AF_UNSPEC;
@@@ -5267,27 -5192,15 +5267,27 @@@
  		put_net(net);
  	}
  
 +out:
 +	return sk;
 +}
 +
 +static struct sock *
 +__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
 +		struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
 +		u64 flags)
 +{
 +	struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
 +					   ifindex, proto, netns_id, flags);
 +
  	if (sk)
  		sk = sk_to_full_sk(sk);
 -out:
 -	return (unsigned long) sk;
 +
 +	return sk;
  }
  
 -static unsigned long
 -bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
 -	      u8 proto, u64 netns_id, u64 flags)
 +static struct sock *
 +bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
 +	       u8 proto, u64 netns_id, u64 flags)
  {
  	struct net *caller_net;
  	int ifindex;
@@@ -5300,47 -5213,14 +5300,47 @@@
  		ifindex = 0;
  	}
  
 -	return __bpf_sk_lookup(skb, tuple, len, caller_net, ifindex,
 -			      proto, netns_id, flags);
 +	return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
 +				netns_id, flags);
 +}
 +
 +static struct sock *
 +bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
 +	      u8 proto, u64 netns_id, u64 flags)
 +{
 +	struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
 +					 flags);
 +
 +	if (sk)
 +		sk = sk_to_full_sk(sk);
 +
 +	return sk;
 +}
 +
 +BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
 +	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
 +{
 +	return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
 +					     netns_id, flags);
  }
  
 +static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
 +	.func		= bpf_skc_lookup_tcp,
 +	.gpl_only	= false,
 +	.pkt_access	= true,
 +	.ret_type	= RET_PTR_TO_SOCK_COMMON_OR_NULL,
 +	.arg1_type	= ARG_PTR_TO_CTX,
 +	.arg2_type	= ARG_PTR_TO_MEM,
 +	.arg3_type	= ARG_CONST_SIZE,
 +	.arg4_type	= ARG_ANYTHING,
 +	.arg5_type	= ARG_ANYTHING,
 +};
 +
  BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
  	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  {
 -	return bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags);
 +	return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
 +					    netns_id, flags);
  }
  
  static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
@@@ -5358,8 -5238,7 +5358,8 @@@
  BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
  	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  {
 -	return bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags);
 +	return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
 +					    netns_id, flags);
  }
  
  static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
@@@ -5394,9 -5273,8 +5394,9 @@@ BPF_CALL_5(bpf_xdp_sk_lookup_udp, struc
  	struct net *caller_net = dev_net(ctx->rxq->dev);
  	int ifindex = ctx->rxq->dev->ifindex;
  
 -	return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
 -			      IPPROTO_UDP, netns_id, flags);
 +	return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
 +					      ifindex, IPPROTO_UDP, netns_id,
 +					      flags);
  }
  
  static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
@@@ -5411,38 -5289,14 +5411,38 @@@
  	.arg5_type      = ARG_ANYTHING,
  };
  
 +BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
 +	   struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
 +{
 +	struct net *caller_net = dev_net(ctx->rxq->dev);
 +	int ifindex = ctx->rxq->dev->ifindex;
 +
 +	return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
 +					       ifindex, IPPROTO_TCP, netns_id,
 +					       flags);
 +}
 +
 +static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
 +	.func           = bpf_xdp_skc_lookup_tcp,
 +	.gpl_only       = false,
 +	.pkt_access     = true,
 +	.ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
 +	.arg1_type      = ARG_PTR_TO_CTX,
 +	.arg2_type      = ARG_PTR_TO_MEM,
 +	.arg3_type      = ARG_CONST_SIZE,
 +	.arg4_type      = ARG_ANYTHING,
 +	.arg5_type      = ARG_ANYTHING,
 +};
 +
  BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
  	   struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
  {
  	struct net *caller_net = dev_net(ctx->rxq->dev);
  	int ifindex = ctx->rxq->dev->ifindex;
  
 -	return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
 -			      IPPROTO_TCP, netns_id, flags);
 +	return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
 +					      ifindex, IPPROTO_TCP, netns_id,
 +					      flags);
  }
  
  static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
@@@ -5457,31 -5311,11 +5457,31 @@@
  	.arg5_type      = ARG_ANYTHING,
  };
  
 +BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
 +	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
 +{
 +	return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
 +					       sock_net(ctx->sk), 0,
 +					       IPPROTO_TCP, netns_id, flags);
 +}
 +
 +static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
 +	.func		= bpf_sock_addr_skc_lookup_tcp,
 +	.gpl_only	= false,
 +	.ret_type	= RET_PTR_TO_SOCK_COMMON_OR_NULL,
 +	.arg1_type	= ARG_PTR_TO_CTX,
 +	.arg2_type	= ARG_PTR_TO_MEM,
 +	.arg3_type	= ARG_CONST_SIZE,
 +	.arg4_type	= ARG_ANYTHING,
 +	.arg5_type	= ARG_ANYTHING,
 +};
 +
  BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
  	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  {
 -	return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
 -			       IPPROTO_TCP, netns_id, flags);
 +	return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
 +					      sock_net(ctx->sk), 0, IPPROTO_TCP,
 +					      netns_id, flags);
  }
  
  static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
@@@ -5498,9 -5332,8 +5498,9 @@@
  BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
  	   struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
  {
 -	return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
 -			       IPPROTO_UDP, netns_id, flags);
 +	return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
 +					      sock_net(ctx->sk), 0, IPPROTO_UDP,
 +					      netns_id, flags);
  }
  
  static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
@@@ -5628,74 -5461,6 +5628,74 @@@ static const struct bpf_func_proto bpf_
  	.ret_type       = RET_INTEGER,
  	.arg1_type      = ARG_PTR_TO_CTX,
  };
 +
 +BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
 +	   struct tcphdr *, th, u32, th_len)
 +{
 +#ifdef CONFIG_SYN_COOKIES
 +	u32 cookie;
 +	int ret;
 +
 +	if (unlikely(th_len < sizeof(*th)))
 +		return -EINVAL;
 +
 +	/* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
 +	if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
 +		return -EINVAL;
 +
 +	if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
 +		return -EINVAL;
 +
 +	if (!th->ack || th->rst || th->syn)
 +		return -ENOENT;
 +
 +	if (tcp_synq_no_recent_overflow(sk))
 +		return -ENOENT;
 +
 +	cookie = ntohl(th->ack_seq) - 1;
 +
 +	switch (sk->sk_family) {
 +	case AF_INET:
 +		if (unlikely(iph_len < sizeof(struct iphdr)))
 +			return -EINVAL;
 +
 +		ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
 +		break;
 +
 +#if IS_BUILTIN(CONFIG_IPV6)
 +	case AF_INET6:
 +		if (unlikely(iph_len < sizeof(struct ipv6hdr)))
 +			return -EINVAL;
 +
 +		ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
 +		break;
 +#endif /* CONFIG_IPV6 */
 +
 +	default:
 +		return -EPROTONOSUPPORT;
 +	}
 +
 +	if (ret > 0)
 +		return 0;
 +
 +	return -ENOENT;
 +#else
 +	return -ENOTSUPP;
 +#endif
 +}
 +
 +static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
 +	.func		= bpf_tcp_check_syncookie,
 +	.gpl_only	= true,
 +	.pkt_access	= true,
 +	.ret_type	= RET_INTEGER,
 +	.arg1_type	= ARG_PTR_TO_SOCK_COMMON,
 +	.arg2_type	= ARG_PTR_TO_MEM,
 +	.arg3_type	= ARG_CONST_SIZE,
 +	.arg4_type	= ARG_PTR_TO_MEM,
 +	.arg5_type	= ARG_CONST_SIZE,
 +};
 +
  #endif /* CONFIG_INET */
  
  bool bpf_helper_changes_pkt_data(void *func)
@@@ -5821,8 -5586,6 +5821,8 @@@ sock_addr_func_proto(enum bpf_func_id f
  		return &bpf_sock_addr_sk_lookup_udp_proto;
  	case BPF_FUNC_sk_release:
  		return &bpf_sk_release_proto;
 +	case BPF_FUNC_skc_lookup_tcp:
 +		return &bpf_sock_addr_skc_lookup_tcp_proto;
  #endif /* CONFIG_INET */
  	default:
  		return bpf_base_func_proto(func_id);
@@@ -5956,12 -5719,6 +5956,12 @@@ tc_cls_act_func_proto(enum bpf_func_id 
  		return &bpf_tcp_sock_proto;
  	case BPF_FUNC_get_listener_sock:
  		return &bpf_get_listener_sock_proto;
 +	case BPF_FUNC_skc_lookup_tcp:
 +		return &bpf_skc_lookup_tcp_proto;
 +	case BPF_FUNC_tcp_check_syncookie:
 +		return &bpf_tcp_check_syncookie_proto;
 +	case BPF_FUNC_skb_ecn_set_ce:
 +		return &bpf_skb_ecn_set_ce_proto;
  #endif
  	default:
  		return bpf_base_func_proto(func_id);
@@@ -5997,10 -5754,6 +5997,10 @@@ xdp_func_proto(enum bpf_func_id func_id
  		return &bpf_xdp_sk_lookup_tcp_proto;
  	case BPF_FUNC_sk_release:
  		return &bpf_sk_release_proto;
 +	case BPF_FUNC_skc_lookup_tcp:
 +		return &bpf_xdp_skc_lookup_tcp_proto;
 +	case BPF_FUNC_tcp_check_syncookie:
 +		return &bpf_tcp_check_syncookie_proto;
  #endif
  	default:
  		return bpf_base_func_proto(func_id);
@@@ -6093,8 -5846,6 +6093,8 @@@ sk_skb_func_proto(enum bpf_func_id func
  		return &bpf_sk_lookup_udp_proto;
  	case BPF_FUNC_sk_release:
  		return &bpf_sk_release_proto;
 +	case BPF_FUNC_skc_lookup_tcp:
 +		return &bpf_skc_lookup_tcp_proto;
  #endif
  	default:
  		return bpf_base_func_proto(func_id);
@@@ -6862,14 -6613,8 +6862,8 @@@ static bool flow_dissector_is_valid_acc
  					   const struct bpf_prog *prog,
  					   struct bpf_insn_access_aux *info)
  {
- 	if (type == BPF_WRITE) {
- 		switch (off) {
- 		case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
- 			break;
- 		default:
- 			return false;
- 		}
- 	}
+ 	if (type == BPF_WRITE)
+ 		return false;
  
  	switch (off) {
  	case bpf_ctx_range(struct __sk_buff, data):
@@@ -6881,11 -6626,7 +6875,7 @@@
  	case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
  		info->reg_type = PTR_TO_FLOW_KEYS;
  		break;
- 	case bpf_ctx_range(struct __sk_buff, tc_classid):
- 	case bpf_ctx_range(struct __sk_buff, data_meta):
- 	case bpf_ctx_range_till(struct __sk_buff, family, local_port):
- 	case bpf_ctx_range(struct __sk_buff, tstamp):
- 	case bpf_ctx_range(struct __sk_buff, wire_len):
+ 	default:
  		return false;
  	}
  
diff --combined net/core/flow_dissector.c
index b4d581134ef2,94a450b2191a..795449713ba4
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@@ -707,6 -707,7 +707,7 @@@ bool __skb_flow_bpf_dissect(struct bpf_
  	/* Pass parameters to the BPF program */
  	memset(flow_keys, 0, sizeof(*flow_keys));
  	cb->qdisc_cb.flow_keys = flow_keys;
+ 	flow_keys->n_proto = skb->protocol;
  	flow_keys->nhoff = skb_network_offset(skb);
  	flow_keys->thoff = flow_keys->nhoff;
  
@@@ -716,7 -717,8 +717,8 @@@
  	/* Restore state */
  	memcpy(cb, &cb_saved, sizeof(cb_saved));
  
- 	flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len);
+ 	flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
+ 				   skb_network_offset(skb), skb->len);
  	flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
  				   flow_keys->nhoff, skb->len);
  
@@@ -732,8 -734,6 +734,8 @@@
   * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
   * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
   * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
 + * @flags: flags that control the dissection process, e.g.
 + *         FLOW_DISSECTOR_F_STOP_AT_L3.
   *
   * The function will try to retrieve individual keys into target specified
   * by flow_dissector from either the skbuff or a raw buffer specified by the
diff --combined net/core/skbuff.c
index 4782f9354dd1,ef2cd5712098..9901f5322852
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@@ -77,8 -77,6 +77,8 @@@
  #include <linux/capability.h>
  #include <linux/user_namespace.h>
  
 +#include "datagram.h"
 +
  struct kmem_cache *skbuff_head_cache __ro_after_init;
  static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
  #ifdef CONFIG_SKB_EXTENSIONS
@@@ -1107,6 -1105,9 +1107,6 @@@ void sock_zerocopy_put_abort(struct ubu
  }
  EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
  
 -extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
 -				   struct iov_iter *from, size_t length);
 -
  int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
  {
  	return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
@@@ -3800,7 -3801,7 +3800,7 @@@ int skb_gro_receive(struct sk_buff *p, 
  	unsigned int delta_truesize;
  	struct sk_buff *lp;
  
- 	if (unlikely(p->len + len >= 65536))
+ 	if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
  		return -E2BIG;
  
  	lp = NAPI_GRO_CB(p)->last;
diff --combined net/ipv4/tcp_ipv4.c
index 3979939804b7,2f8039a26b08..faa6fa619f59
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@@ -1774,7 -1774,6 +1774,7 @@@ static void tcp_v4_fill_cb(struct sk_bu
  int tcp_v4_rcv(struct sk_buff *skb)
  {
  	struct net *net = dev_net(skb->dev);
 +	struct sk_buff *skb_to_free;
  	int sdif = inet_sdif(skb);
  	const struct iphdr *iph;
  	const struct tcphdr *th;
@@@ -1906,17 -1905,11 +1906,17 @@@ process
  	tcp_segs_in(tcp_sk(sk), skb);
  	ret = 0;
  	if (!sock_owned_by_user(sk)) {
 +		skb_to_free = sk->sk_rx_skb_cache;
 +		sk->sk_rx_skb_cache = NULL;
  		ret = tcp_v4_do_rcv(sk, skb);
 -	} else if (tcp_add_backlog(sk, skb)) {
 -		goto discard_and_relse;
 +	} else {
 +		if (tcp_add_backlog(sk, skb))
 +			goto discard_and_relse;
 +		skb_to_free = NULL;
  	}
  	bh_unlock_sock(sk);
 +	if (skb_to_free)
 +		__kfree_skb(skb_to_free);
  
  put_and_return:
  	if (refcounted)
@@@ -2585,7 -2578,8 +2585,8 @@@ static void __net_exit tcp_sk_exit(stru
  {
  	int cpu;
  
- 	module_put(net->ipv4.tcp_congestion_control->owner);
+ 	if (net->ipv4.tcp_congestion_control)
+ 		module_put(net->ipv4.tcp_congestion_control->owner);
  
  	for_each_possible_cpu(cpu)
  		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
diff --combined net/openvswitch/flow_netlink.c
index bd019058fc6f,4bdf5e3ac208..3563acd5f92e
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@@ -91,7 -91,6 +91,7 @@@ static bool actions_may_change_flow(con
  		case OVS_ACTION_ATTR_SET:
  		case OVS_ACTION_ATTR_SET_MASKED:
  		case OVS_ACTION_ATTR_METER:
 +		case OVS_ACTION_ATTR_CHECK_PKT_LEN:
  		default:
  			return true;
  		}
@@@ -404,7 -403,6 +404,7 @@@ static const struct ovs_len_tbl ovs_tun
  	[OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
  	[OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
  	[OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS]   = { .len = OVS_ATTR_VARIABLE },
 +	[OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE]   = { .len = 0 },
  };
  
  static const struct ovs_len_tbl
@@@ -668,7 -666,6 +668,7 @@@ static int ip_tun_from_nlattr(const str
  			      bool log)
  {
  	bool ttl = false, ipv4 = false, ipv6 = false;
 +	bool info_bridge_mode = false;
  	__be16 tun_flags = 0;
  	int opts_type = 0;
  	struct nlattr *a;
@@@ -785,10 -782,6 +785,10 @@@
  			tun_flags |= TUNNEL_ERSPAN_OPT;
  			opts_type = type;
  			break;
 +		case OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE:
 +			info_bridge_mode = true;
 +			ipv4 = true;
 +			break;
  		default:
  			OVS_NLERR(log, "Unknown IP tunnel attribute %d",
  				  type);
@@@ -819,29 -812,16 +819,29 @@@
  			OVS_NLERR(log, "IP tunnel dst address not specified");
  			return -EINVAL;
  		}
 -		if (ipv4 && !match->key->tun_key.u.ipv4.dst) {
 -			OVS_NLERR(log, "IPv4 tunnel dst address is zero");
 -			return -EINVAL;
 +		if (ipv4) {
 +			if (info_bridge_mode) {
 +				if (match->key->tun_key.u.ipv4.src ||
 +				    match->key->tun_key.u.ipv4.dst ||
 +				    match->key->tun_key.tp_src ||
 +				    match->key->tun_key.tp_dst ||
 +				    match->key->tun_key.ttl ||
 +				    match->key->tun_key.tos ||
 +				    tun_flags & ~TUNNEL_KEY) {
 +					OVS_NLERR(log, "IPv4 tun info is not correct");
 +					return -EINVAL;
 +				}
 +			} else if (!match->key->tun_key.u.ipv4.dst) {
 +				OVS_NLERR(log, "IPv4 tunnel dst address is zero");
 +				return -EINVAL;
 +			}
  		}
  		if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
  			OVS_NLERR(log, "IPv6 tunnel dst address is zero");
  			return -EINVAL;
  		}
  
 -		if (!ttl) {
 +		if (!ttl && !info_bridge_mode) {
  			OVS_NLERR(log, "IP tunnel TTL not specified.");
  			return -EINVAL;
  		}
@@@ -870,17 -850,12 +870,17 @@@ static int vxlan_opt_to_nlattr(struct s
  static int __ip_tun_to_nlattr(struct sk_buff *skb,
  			      const struct ip_tunnel_key *output,
  			      const void *tun_opts, int swkey_tun_opts_len,
 -			      unsigned short tun_proto)
 +			      unsigned short tun_proto, u8 mode)
  {
  	if (output->tun_flags & TUNNEL_KEY &&
  	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id,
  			 OVS_TUNNEL_KEY_ATTR_PAD))
  		return -EMSGSIZE;
 +
 +	if (mode & IP_TUNNEL_INFO_BRIDGE)
 +		return nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE)
 +		       ? -EMSGSIZE : 0;
 +
  	switch (tun_proto) {
  	case AF_INET:
  		if (output->u.ipv4.src &&
@@@ -943,7 -918,7 +943,7 @@@
  static int ip_tun_to_nlattr(struct sk_buff *skb,
  			    const struct ip_tunnel_key *output,
  			    const void *tun_opts, int swkey_tun_opts_len,
 -			    unsigned short tun_proto)
 +			    unsigned short tun_proto, u8 mode)
  {
  	struct nlattr *nla;
  	int err;
@@@ -953,7 -928,7 +953,7 @@@
  		return -EMSGSIZE;
  
  	err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
 -				 tun_proto);
 +				 tun_proto, mode);
  	if (err)
  		return err;
  
@@@ -967,7 -942,7 +967,7 @@@ int ovs_nla_put_tunnel_info(struct sk_b
  	return __ip_tun_to_nlattr(skb, &tun_info->key,
  				  ip_tunnel_info_opts(tun_info),
  				  tun_info->options_len,
 -				  ip_tunnel_info_af(tun_info));
 +				  ip_tunnel_info_af(tun_info), tun_info->mode);
  }
  
  static int encode_vlan_from_nlattrs(struct sw_flow_match *match,
@@@ -2005,7 -1980,7 +2005,7 @@@ static int __ovs_nla_put_key(const stru
  			opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
  
  		if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
 -				     swkey->tun_opts_len, swkey->tun_proto))
 +				     swkey->tun_opts_len, swkey->tun_proto, 0))
  			goto nla_put_failure;
  	}
  
@@@ -2331,14 -2306,14 +2331,14 @@@ static struct nlattr *reserve_sfa_size(
  
  	struct sw_flow_actions *acts;
  	int new_acts_size;
- 	int req_size = NLA_ALIGN(attr_len);
+ 	size_t req_size = NLA_ALIGN(attr_len);
  	int next_offset = offsetof(struct sw_flow_actions, actions) +
  					(*sfa)->actions_len;
  
  	if (req_size <= (ksize(*sfa) - next_offset))
  		goto out;
  
- 	new_acts_size = ksize(*sfa) * 2;
+ 	new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
  
  	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
  		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
@@@ -2630,8 -2605,6 +2630,8 @@@ static int validate_and_copy_set_tun(co
  	tun_info->mode = IP_TUNNEL_INFO_TX;
  	if (key.tun_proto == AF_INET6)
  		tun_info->mode |= IP_TUNNEL_INFO_IPV6;
 +	else if (key.tun_proto == AF_INET && key.tun_key.u.ipv4.dst == 0)
 +		tun_info->mode |= IP_TUNNEL_INFO_BRIDGE;
  	tun_info->key = key.tun_key;
  
  	/* We need to store the options in the action itself since
@@@ -2865,87 -2838,6 +2865,87 @@@ static int validate_userspace(const str
  	return 0;
  }
  
 +static const struct nla_policy cpl_policy[OVS_CHECK_PKT_LEN_ATTR_MAX + 1] = {
 +	[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] = {.type = NLA_U16 },
 +	[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER] = {.type = NLA_NESTED },
 +	[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL] = {.type = NLA_NESTED },
 +};
 +
 +static int validate_and_copy_check_pkt_len(struct net *net,
 +					   const struct nlattr *attr,
 +					   const struct sw_flow_key *key,
 +					   struct sw_flow_actions **sfa,
 +					   __be16 eth_type, __be16 vlan_tci,
 +					   bool log, bool last)
 +{
 +	const struct nlattr *acts_if_greater, *acts_if_lesser_eq;
 +	struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1];
 +	struct check_pkt_len_arg arg;
 +	int nested_acts_start;
 +	int start, err;
 +
 +	err = nla_parse_strict(a, OVS_CHECK_PKT_LEN_ATTR_MAX, nla_data(attr),
 +			       nla_len(attr), cpl_policy, NULL);
 +	if (err)
 +		return err;
 +
 +	if (!a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN] ||
 +	    !nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]))
 +		return -EINVAL;
 +
 +	acts_if_lesser_eq = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL];
 +	acts_if_greater = a[OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER];
 +
 +	/* Both the nested action should be present. */
 +	if (!acts_if_greater || !acts_if_lesser_eq)
 +		return -EINVAL;
 +
 +	/* validation done, copy the nested actions. */
 +	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_CHECK_PKT_LEN,
 +					log);
 +	if (start < 0)
 +		return start;
 +
 +	arg.pkt_len = nla_get_u16(a[OVS_CHECK_PKT_LEN_ATTR_PKT_LEN]);
 +	arg.exec_for_lesser_equal =
 +		last || !actions_may_change_flow(acts_if_lesser_eq);
 +	arg.exec_for_greater =
 +		last || !actions_may_change_flow(acts_if_greater);
 +
 +	err = ovs_nla_add_action(sfa, OVS_CHECK_PKT_LEN_ATTR_ARG, &arg,
 +				 sizeof(arg), log);
 +	if (err)
 +		return err;
 +
 +	nested_acts_start = add_nested_action_start(sfa,
 +		OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL, log);
 +	if (nested_acts_start < 0)
 +		return nested_acts_start;
 +
 +	err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa,
 +				     eth_type, vlan_tci, log);
 +
 +	if (err)
 +		return err;
 +
 +	add_nested_action_end(*sfa, nested_acts_start);
 +
 +	nested_acts_start = add_nested_action_start(sfa,
 +		OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER, log);
 +	if (nested_acts_start < 0)
 +		return nested_acts_start;
 +
 +	err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa,
 +				     eth_type, vlan_tci, log);
 +
 +	if (err)
 +		return err;
 +
 +	add_nested_action_end(*sfa, nested_acts_start);
 +	add_nested_action_end(*sfa, start);
 +	return 0;
 +}
 +
  static int copy_action(const struct nlattr *from,
  		       struct sw_flow_actions **sfa, bool log)
  {
@@@ -2992,7 -2884,6 +2992,7 @@@ static int __ovs_nla_copy_actions(struc
  			[OVS_ACTION_ATTR_POP_NSH] = 0,
  			[OVS_ACTION_ATTR_METER] = sizeof(u32),
  			[OVS_ACTION_ATTR_CLONE] = (u32)-1,
 +			[OVS_ACTION_ATTR_CHECK_PKT_LEN] = (u32)-1,
  		};
  		const struct ovs_action_push_vlan *vlan;
  		int type = nla_type(a);
@@@ -3194,19 -3085,6 +3194,19 @@@
  			break;
  		}
  
 +		case OVS_ACTION_ATTR_CHECK_PKT_LEN: {
 +			bool last = nla_is_last(a, rem);
 +
 +			err = validate_and_copy_check_pkt_len(net, a, key, sfa,
 +							      eth_type,
 +							      vlan_tci, log,
 +							      last);
 +			if (err)
 +				return err;
 +			skip_copy = true;
 +			break;
 +		}
 +
  		default:
  			OVS_NLERR(log, "Unknown Action type %d", type);
  			return -EINVAL;
@@@ -3305,75 -3183,6 +3305,75 @@@ static int clone_action_to_attr(const s
  	return err;
  }
  
 +static int check_pkt_len_action_to_attr(const struct nlattr *attr,
 +					struct sk_buff *skb)
 +{
 +	struct nlattr *start, *ac_start = NULL;
 +	const struct check_pkt_len_arg *arg;
 +	const struct nlattr *a, *cpl_arg;
 +	int err = 0, rem = nla_len(attr);
 +
 +	start = nla_nest_start(skb, OVS_ACTION_ATTR_CHECK_PKT_LEN);
 +	if (!start)
 +		return -EMSGSIZE;
 +
 +	/* The first nested attribute in 'attr' is always
 +	 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
 +	 */
 +	cpl_arg = nla_data(attr);
 +	arg = nla_data(cpl_arg);
 +
 +	if (nla_put_u16(skb, OVS_CHECK_PKT_LEN_ATTR_PKT_LEN, arg->pkt_len)) {
 +		err = -EMSGSIZE;
 +		goto out;
 +	}
 +
 +	/* Second nested attribute in 'attr' is always
 +	 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
 +	 */
 +	a = nla_next(cpl_arg, &rem);
 +	ac_start =  nla_nest_start(skb,
 +		OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL);
 +	if (!ac_start) {
 +		err = -EMSGSIZE;
 +		goto out;
 +	}
 +
 +	err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
 +	if (err) {
 +		nla_nest_cancel(skb, ac_start);
 +		goto out;
 +	} else {
 +		nla_nest_end(skb, ac_start);
 +	}
 +
 +	/* Third nested attribute in 'attr' is always
 +	 * OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER.
 +	 */
 +	a = nla_next(a, &rem);
 +	ac_start =  nla_nest_start(skb,
 +				   OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER);
 +	if (!ac_start) {
 +		err = -EMSGSIZE;
 +		goto out;
 +	}
 +
 +	err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
 +	if (err) {
 +		nla_nest_cancel(skb, ac_start);
 +		goto out;
 +	} else {
 +		nla_nest_end(skb, ac_start);
 +	}
 +
 +	nla_nest_end(skb, start);
 +	return 0;
 +
 +out:
 +	nla_nest_cancel(skb, start);
 +	return err;
 +}
 +
  static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
  {
  	const struct nlattr *ovs_key = nla_data(a);
@@@ -3393,7 -3202,7 +3393,7 @@@
  		err =  ip_tun_to_nlattr(skb, &tun_info->key,
  					ip_tunnel_info_opts(tun_info),
  					tun_info->options_len,
 -					ip_tunnel_info_af(tun_info));
 +					ip_tunnel_info_af(tun_info), tun_info->mode);
  		if (err)
  			return err;
  		nla_nest_end(skb, start);
@@@ -3468,12 -3277,6 +3468,12 @@@ int ovs_nla_put_actions(const struct nl
  				return err;
  			break;
  
 +		case OVS_ACTION_ATTR_CHECK_PKT_LEN:
 +			err = check_pkt_len_action_to_attr(a, skb);
 +			if (err)
 +				return err;
 +			break;
 +
  		default:
  			if (nla_put(skb, type, nla_len(a), nla_data(a)))
  				return -EMSGSIZE;
diff --combined net/tls/tls_sw.c
index 4f821edeeae6,20b191227969..4741edf4bb1e
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@@ -42,6 -42,8 +42,6 @@@
  #include <net/strparser.h>
  #include <net/tls.h>
  
 -#define MAX_IV_SIZE	TLS_CIPHER_AES_GCM_128_IV_SIZE
 -
  static int __skb_nsg(struct sk_buff *skb, int offset, int len,
                       unsigned int recursion_level)
  {
@@@ -223,7 -225,7 +223,7 @@@ static int tls_do_decryption(struct soc
  		/* Using skb->sk to push sk through to crypto async callback
  		 * handler. This allows propagating errors up to the socket
  		 * if needed. It _must_ be cleared in the async handler
 -		 * before kfree_skb is called. We _know_ skb->sk is NULL
 +		 * before consume_skb is called. We _know_ skb->sk is NULL
  		 * because it is a clone from strparser.
  		 */
  		skb->sk = sk;
@@@ -477,18 -479,11 +477,18 @@@ static int tls_do_encryption(struct soc
  	struct tls_rec *rec = ctx->open_rec;
  	struct sk_msg *msg_en = &rec->msg_encrypted;
  	struct scatterlist *sge = sk_msg_elem(msg_en, start);
 -	int rc;
 +	int rc, iv_offset = 0;
 +
 +	/* For CCM based ciphers, first byte of IV is a constant */
 +	if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
 +		rec->iv_data[0] = TLS_AES_CCM_IV_B0_BYTE;
 +		iv_offset = 1;
 +	}
 +
 +	memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
 +	       prot->iv_size + prot->salt_size);
  
 -	memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data));
 -	xor_iv_with_seq(prot->version, rec->iv_data,
 -			tls_ctx->tx.rec_seq);
 +	xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
  
  	sge->offset += prot->prepend_size;
  	sge->length -= prot->prepend_size;
@@@ -1349,7 -1344,6 +1349,7 @@@ static int decrypt_internal(struct soc
  	struct scatterlist *sgout = NULL;
  	const int data_len = rxm->full_len - prot->overhead_size +
  			     prot->tail_size;
 +	int iv_offset = 0;
  
  	if (*zc && (out_iov || out_sg)) {
  		if (out_iov)
@@@ -1392,25 -1386,18 +1392,25 @@@
  	aad = (u8 *)(sgout + n_sgout);
  	iv = aad + prot->aad_size;
  
 +	/* For CCM based ciphers, first byte of nonce+iv is always '2' */
 +	if (prot->cipher_type == TLS_CIPHER_AES_CCM_128) {
 +		iv[0] = 2;
 +		iv_offset = 1;
 +	}
 +
  	/* Prepare IV */
  	err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE,
 -			    iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
 +			    iv + iv_offset + prot->salt_size,
  			    prot->iv_size);
  	if (err < 0) {
  		kfree(mem);
  		return err;
  	}
  	if (prot->version == TLS_1_3_VERSION)
 -		memcpy(iv, tls_ctx->rx.iv, crypto_aead_ivsize(ctx->aead_recv));
 +		memcpy(iv + iv_offset, tls_ctx->rx.iv,
 +		       crypto_aead_ivsize(ctx->aead_recv));
  	else
 -		memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
 +		memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
  
  	xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
  
@@@ -1497,6 -1484,8 +1497,8 @@@ static int decrypt_skb_update(struct so
  
  				return err;
  			}
+ 		} else {
+ 			*zc = false;
  		}
  
  		rxm->full_len -= padding_length(ctx, tls_ctx, skb);
@@@ -1535,7 -1524,7 +1537,7 @@@ static bool tls_sw_advance_skb(struct s
  			rxm->full_len -= len;
  			return false;
  		}
 -		kfree_skb(skb);
 +		consume_skb(skb);
  	}
  
  	/* Finished with message */
@@@ -1644,7 -1633,7 +1646,7 @@@ static int process_rx_list(struct tls_s
  
  		if (!is_peek) {
  			skb_unlink(skb, &ctx->rx_list);
 -			kfree_skb(skb);
 +			consume_skb(skb);
  		}
  
  		skb = next_skb;
@@@ -2165,15 -2154,14 +2167,15 @@@ int tls_set_sw_offload(struct sock *sk
  	struct tls_crypto_info *crypto_info;
  	struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
  	struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
 +	struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
  	struct tls_sw_context_tx *sw_ctx_tx = NULL;
  	struct tls_sw_context_rx *sw_ctx_rx = NULL;
  	struct cipher_context *cctx;
  	struct crypto_aead **aead;
  	struct strp_callbacks cb;
 -	u16 nonce_size, tag_size, iv_size, rec_seq_size;
 +	u16 nonce_size, tag_size, iv_size, rec_seq_size, salt_size;
  	struct crypto_tfm *tfm;
 -	char *iv, *rec_seq, *key, *salt;
 +	char *iv, *rec_seq, *key, *salt, *cipher_name;
  	size_t keysize;
  	int rc = 0;
  
@@@ -2238,8 -2226,6 +2240,8 @@@
  		keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
  		key = gcm_128_info->key;
  		salt = gcm_128_info->salt;
 +		salt_size = TLS_CIPHER_AES_GCM_128_SALT_SIZE;
 +		cipher_name = "gcm(aes)";
  		break;
  	}
  	case TLS_CIPHER_AES_GCM_256: {
@@@ -2255,25 -2241,6 +2257,25 @@@
  		keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
  		key = gcm_256_info->key;
  		salt = gcm_256_info->salt;
 +		salt_size = TLS_CIPHER_AES_GCM_256_SALT_SIZE;
 +		cipher_name = "gcm(aes)";
 +		break;
 +	}
 +	case TLS_CIPHER_AES_CCM_128: {
 +		nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
 +		tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
 +		iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
 +		iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
 +		rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
 +		rec_seq =
 +		((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
 +		ccm_128_info =
 +		(struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
 +		keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
 +		key = ccm_128_info->key;
 +		salt = ccm_128_info->salt;
 +		salt_size = TLS_CIPHER_AES_CCM_128_SALT_SIZE;
 +		cipher_name = "ccm(aes)";
  		break;
  	}
  	default:
@@@ -2303,16 -2270,16 +2305,16 @@@
  	prot->overhead_size = prot->prepend_size +
  			      prot->tag_size + prot->tail_size;
  	prot->iv_size = iv_size;
 -	cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
 -			   GFP_KERNEL);
 +	prot->salt_size = salt_size;
 +	cctx->iv = kmalloc(iv_size + salt_size, GFP_KERNEL);
  	if (!cctx->iv) {
  		rc = -ENOMEM;
  		goto free_priv;
  	}
  	/* Note: 128 & 256 bit salt are the same size */
 -	memcpy(cctx->iv, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
 -	memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size);
  	prot->rec_seq_size = rec_seq_size;
 +	memcpy(cctx->iv, salt, salt_size);
 +	memcpy(cctx->iv + salt_size, iv, iv_size);
  	cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL);
  	if (!cctx->rec_seq) {
  		rc = -ENOMEM;
@@@ -2320,7 -2287,7 +2322,7 @@@
  	}
  
  	if (!*aead) {
 -		*aead = crypto_alloc_aead("gcm(aes)", 0, 0);
 +		*aead = crypto_alloc_aead(cipher_name, 0, 0);
  		if (IS_ERR(*aead)) {
  			rc = PTR_ERR(*aead);
  			*aead = NULL;

-- 
LinuxNextTracking


More information about the linux-merge mailing list