[linux-next] LinuxNextTracking branch, master, updated. next-20180529

batman at open-mesh.org batman at open-mesh.org
Wed May 30 00:17:34 CEST 2018


The following commit has been merged in the master branch:
commit 5b79c2af667c0e2684f2a6dbf6439074b78f490c
Merge: e52cde71709348c0d67bf0f213b438fa4d6cf9a9 bc2dbc5420e82560e650f8531ceca597441ca171
Author: David S. Miller <davem at davemloft.net>
Date:   Sat May 26 19:46:15 2018 -0400

    Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
    
    Lots of easy overlapping changes in the confict
    resolutions here.
    
    Signed-off-by: David S. Miller <davem at davemloft.net>

diff --combined MAINTAINERS
index 032807a95558,ca4afd68530c..f492431b239b
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -2332,7 -2332,7 +2332,7 @@@ F:	drivers/gpio/gpio-ath79.
  F:	Documentation/devicetree/bindings/gpio/gpio-ath79.txt
  
  ATHEROS ATH GENERIC UTILITIES
- M:	"Luis R. Rodriguez" <mcgrof at do-not-panic.com>
+ M:	Kalle Valo <kvalo at codeaurora.org>
  L:	linux-wireless at vger.kernel.org
  S:	Supported
  F:	drivers/net/wireless/ath/*
@@@ -2347,7 -2347,7 +2347,7 @@@ S:	Maintaine
  F:	drivers/net/wireless/ath/ath5k/
  
  ATHEROS ATH6KL WIRELESS DRIVER
- M:	Kalle Valo <kvalo at qca.qualcomm.com>
+ M:	Kalle Valo <kvalo at codeaurora.org>
  L:	linux-wireless at vger.kernel.org
  W:	http://wireless.kernel.org/en/users/Drivers/ath6kl
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@@ -2728,6 -2728,7 +2728,6 @@@ F:	Documentation/networking/filter.tx
  F:	Documentation/bpf/
  F:	include/linux/bpf*
  F:	include/linux/filter.h
 -F:	include/trace/events/bpf.h
  F:	include/trace/events/xdp.h
  F:	include/uapi/linux/bpf*
  F:	include/uapi/linux/filter.h
@@@ -5316,6 -5317,7 +5316,6 @@@ F:	include/linux/*mdio*.
  F:	include/linux/of_net.h
  F:	include/linux/phy.h
  F:	include/linux/phy_fixed.h
 -F:	include/linux/platform_data/mdio-gpio.h
  F:	include/linux/platform_data/mdio-bcm-unimac.h
  F:	include/trace/events/mdio.h
  F:	include/uapi/linux/mdio.h
@@@ -5386,7 -5388,6 +5386,6 @@@ S:	Maintaine
  F:	drivers/iommu/exynos-iommu.c
  
  EZchip NPS platform support
- M:	Elad Kanfi <eladkan at mellanox.com>
  M:	Vineet Gupta <vgupta at synopsys.com>
  S:	Supported
  F:	arch/arc/plat-eznps
@@@ -6502,9 -6503,15 +6501,15 @@@ F:	Documentation/networking/hinic.tx
  F:	drivers/net/ethernet/huawei/hinic/
  
  HUGETLB FILESYSTEM
- M:	Nadia Yvette Chambers <nyc at holomorphy.com>
+ M:	Mike Kravetz <mike.kravetz at oracle.com>
+ L:	linux-mm at kvack.org
  S:	Maintained
  F:	fs/hugetlbfs/
+ F:	mm/hugetlb.c
+ F:	include/linux/hugetlb.h
+ F:	Documentation/admin-guide/mm/hugetlbpage.rst
+ F:	Documentation/vm/hugetlbfs_reserv.rst
+ F:	Documentation/ABI/testing/sysfs-kernel-mm-hugepages
  
  HVA ST MEDIA DRIVER
  M:	Jean-Christophe Trotin <jean-christophe.trotin at st.com>
@@@ -8465,7 -8472,6 +8470,7 @@@ M:	Vivien Didelot <vivien.didelot at savoi
  L:	netdev at vger.kernel.org
  S:	Maintained
  F:	drivers/net/dsa/mv88e6xxx/
 +F:	linux/platform_data/mv88e6xxx.h
  F:	Documentation/devicetree/bindings/net/dsa/marvell.txt
  
  MARVELL ARMADA DRM SUPPORT
@@@ -9019,17 -9025,24 +9024,26 @@@ W:	http://www.mellanox.co
  Q:	http://patchwork.ozlabs.org/project/netdev/list/
  F:	drivers/net/ethernet/mellanox/mlx5/core/en_*
  
 -MELLANOX ETHERNET INNOVA DRIVER
 +MELLANOX ETHERNET INNOVA DRIVERS
- M:	Boris Pismenny <borisp at mellanox.com>
+ R:	Boris Pismenny <borisp at mellanox.com>
  L:	netdev at vger.kernel.org
  S:	Supported
  W:	http://www.mellanox.com
  Q:	http://patchwork.ozlabs.org/project/netdev/list/
 +F:	drivers/net/ethernet/mellanox/mlx5/core/en_accel/*
 +F:	drivers/net/ethernet/mellanox/mlx5/core/accel/*
  F:	drivers/net/ethernet/mellanox/mlx5/core/fpga/*
  F:	include/linux/mlx5/mlx5_ifc_fpga.h
  
+ MELLANOX ETHERNET INNOVA IPSEC DRIVER
+ R:	Boris Pismenny <borisp at mellanox.com>
+ L:	netdev at vger.kernel.org
+ S:	Supported
+ W:	http://www.mellanox.com
+ Q:	http://patchwork.ozlabs.org/project/netdev/list/
+ F:	drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
+ F:	drivers/net/ethernet/mellanox/mlx5/core/ipsec*
+ 
  MELLANOX ETHERNET SWITCH DRIVERS
  M:	Jiri Pirko <jiri at mellanox.com>
  M:	Ido Schimmel <idosch at mellanox.com>
@@@ -9077,7 -9090,6 +9091,6 @@@ F:	include/uapi/rdma/mlx4-abi.
  
  MELLANOX MLX5 core VPI driver
  M:	Saeed Mahameed <saeedm at mellanox.com>
- M:	Matan Barak <matanb at mellanox.com>
  M:	Leon Romanovsky <leonro at mellanox.com>
  L:	netdev at vger.kernel.org
  L:	linux-rdma at vger.kernel.org
@@@ -9088,7 -9100,6 +9101,6 @@@ F:	drivers/net/ethernet/mellanox/mlx5/c
  F:	include/linux/mlx5/
  
  MELLANOX MLX5 IB driver
- M:	Matan Barak <matanb at mellanox.com>
  M:	Leon Romanovsky <leonro at mellanox.com>
  L:	linux-rdma at vger.kernel.org
  W:	http://www.mellanox.com
@@@ -9279,12 -9290,6 +9291,12 @@@ F:	include/linux/cciss*.
  F:	include/uapi/linux/cciss*.h
  F:	Documentation/scsi/smartpqi.txt
  
 +MICROSEMI ETHERNET SWITCH DRIVER
 +M:	Alexandre Belloni <alexandre.belloni at bootlin.com>
 +L:	netdev at vger.kernel.org
 +S:	Supported
 +F:	drivers/net/ethernet/mscc/
 +
  MICROSOFT SURFACE PRO 3 BUTTON DRIVER
  M:	Chen Yu <yu.c.chen at intel.com>
  L:	platform-driver-x86 at vger.kernel.org
@@@ -9828,7 -9833,6 +9840,7 @@@ F:	net/netfilter/xt_CONNSECMARK.
  F:	net/netfilter/xt_SECMARK.c
  
  NETWORKING [TLS]
 +M:	Boris Pismenny <borisp at mellanox.com>
  M:	Aviad Yehezkel <aviadye at mellanox.com>
  M:	Dave Watson <davejwatson at fb.com>
  L:	netdev at vger.kernel.org
@@@ -11628,7 -11632,7 +11640,7 @@@ S:	Maintaine
  F:	drivers/media/tuners/qt1010*
  
  QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
- M:	Kalle Valo <kvalo at qca.qualcomm.com>
+ M:	Kalle Valo <kvalo at codeaurora.org>
  L:	ath10k at lists.infradead.org
  W:	http://wireless.kernel.org/en/users/Drivers/ath10k
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
@@@ -11679,7 -11683,7 +11691,7 @@@ S:	Maintaine
  F:	drivers/media/platform/qcom/venus/
  
  QUALCOMM WCN36XX WIRELESS DRIVER
- M:	Eugene Krasnikov <k.eugene.e at gmail.com>
+ M:	Kalle Valo <kvalo at codeaurora.org>
  L:	wcn36xx at lists.infradead.org
  W:	http://wireless.kernel.org/en/users/Drivers/wcn36xx
  T:	git git://github.com/KrasnikovEugene/wcn36xx.git
@@@ -13397,7 -13401,6 +13409,7 @@@ F:	drivers/media/usb/stk1160
  STMMAC ETHERNET DRIVER
  M:	Giuseppe Cavallaro <peppe.cavallaro at st.com>
  M:	Alexandre Torgue <alexandre.torgue at st.com>
 +M:	Jose Abreu <joabreu at synopsys.com>
  L:	netdev at vger.kernel.org
  W:	http://www.stlinux.com
  S:	Supported
@@@ -14614,9 -14617,7 +14626,9 @@@ M:	Woojung Huh <woojung.huh at microchip.c
  M:	Microchip Linux Driver Support <UNGLinuxDriver at microchip.com>
  L:	netdev at vger.kernel.org
  S:	Maintained
 +F:	Documentation/devicetree/bindings/net/microchip,lan78xx.txt
  F:	drivers/net/usb/lan78xx.*
 +F:	include/dt-bindings/net/microchip-lan78xx.h
  
  USB MASS STORAGE DRIVER
  M:	Alan Stern <stern at rowland.harvard.edu>
@@@ -15404,14 -15405,6 +15416,14 @@@ T:	git git://linuxtv.org/media_tree.gi
  S:	Maintained
  F:	drivers/media/tuners/tuner-xc2028.*
  
 +XDP SOCKETS (AF_XDP)
 +M:	Björn Töpel <bjorn.topel at intel.com>
 +M:	Magnus Karlsson <magnus.karlsson at intel.com>
 +L:	netdev at vger.kernel.org
 +S:	Maintained
 +F:	kernel/bpf/xskmap.c
 +F:	net/xdp/
 +
  XEN BLOCK SUBSYSTEM
  M:	Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>
  M:	Roger Pau Monné <roger.pau at citrix.com>
diff --combined arch/x86/include/asm/nospec-branch.h
index 2f700a1db851,8b38df98548e..f6f6c63da62f
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@@ -217,6 -217,14 +217,14 @@@ enum spectre_v2_mitigation 
  	SPECTRE_V2_IBRS,
  };
  
+ /* The Speculative Store Bypass disable variants */
+ enum ssb_mitigation {
+ 	SPEC_STORE_BYPASS_NONE,
+ 	SPEC_STORE_BYPASS_DISABLE,
+ 	SPEC_STORE_BYPASS_PRCTL,
+ 	SPEC_STORE_BYPASS_SECCOMP,
+ };
+ 
  extern char __indirect_thunk_start[];
  extern char __indirect_thunk_end[];
  
@@@ -241,22 -249,27 +249,27 @@@ static inline void vmexit_fill_RSB(void
  #endif
  }
  
- #define alternative_msr_write(_msr, _val, _feature)		\
- 	asm volatile(ALTERNATIVE("",				\
- 				 "movl %[msr], %%ecx\n\t"	\
- 				 "movl %[val], %%eax\n\t"	\
- 				 "movl $0, %%edx\n\t"		\
- 				 "wrmsr",			\
- 				 _feature)			\
- 		     : : [msr] "i" (_msr), [val] "i" (_val)	\
- 		     : "eax", "ecx", "edx", "memory")
+ static __always_inline
+ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+ {
+ 	asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+ 		: : "c" (msr),
+ 		    "a" ((u32)val),
+ 		    "d" ((u32)(val >> 32)),
+ 		    [feature] "i" (feature)
+ 		: "memory");
+ }
  
  static inline void indirect_branch_prediction_barrier(void)
  {
- 	alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB,
- 			      X86_FEATURE_USE_IBPB);
+ 	u64 val = PRED_CMD_IBPB;
+ 
+ 	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
  }
  
+ /* The Intel SPEC CTRL MSR base value cache */
+ extern u64 x86_spec_ctrl_base;
+ 
  /*
   * With retpoline, we must use IBRS to restrict branch prediction
   * before calling into firmware.
@@@ -265,14 -278,18 +278,18 @@@
   */
  #define firmware_restrict_branch_speculation_start()			\
  do {									\
+ 	u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS;			\
+ 									\
  	preempt_disable();						\
- 	alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS,	\
+ 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
  			      X86_FEATURE_USE_IBRS_FW);			\
  } while (0)
  
  #define firmware_restrict_branch_speculation_end()			\
  do {									\
- 	alternative_msr_write(MSR_IA32_SPEC_CTRL, 0,			\
+ 	u64 val = x86_spec_ctrl_base;					\
+ 									\
+ 	alternative_msr_write(MSR_IA32_SPEC_CTRL, val,			\
  			      X86_FEATURE_USE_IBRS_FW);			\
  	preempt_enable();						\
  } while (0)
@@@ -291,20 -308,16 +308,20 @@@
   *    lfence
   *    jmp spec_trap
   *  do_rop:
 - *    mov %rax,(%rsp)
 + *    mov %rax,(%rsp) for x86_64
 + *    mov %edx,(%esp) for x86_32
   *    retq
   *
   * Without retpolines configured:
   *
 - *    jmp *%rax
 + *    jmp *%rax for x86_64
 + *    jmp *%edx for x86_32
   */
  #ifdef CONFIG_RETPOLINE
 -# define RETPOLINE_RAX_BPF_JIT_SIZE	17
 -# define RETPOLINE_RAX_BPF_JIT()				\
 +# ifdef CONFIG_X86_64
 +#  define RETPOLINE_RAX_BPF_JIT_SIZE	17
 +#  define RETPOLINE_RAX_BPF_JIT()				\
 +do {								\
  	EMIT1_off32(0xE8, 7);	 /* callq do_rop */		\
  	/* spec_trap: */					\
  	EMIT2(0xF3, 0x90);       /* pause */			\
@@@ -312,30 -325,11 +329,30 @@@
  	EMIT2(0xEB, 0xF9);       /* jmp spec_trap */		\
  	/* do_rop: */						\
  	EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */	\
 -	EMIT1(0xC3);             /* retq */
 -#else
 -# define RETPOLINE_RAX_BPF_JIT_SIZE	2
 -# define RETPOLINE_RAX_BPF_JIT()				\
 -	EMIT2(0xFF, 0xE0);	 /* jmp *%rax */
 +	EMIT1(0xC3);             /* retq */			\
 +} while (0)
 +# else /* !CONFIG_X86_64 */
 +#  define RETPOLINE_EDX_BPF_JIT()				\
 +do {								\
 +	EMIT1_off32(0xE8, 7);	 /* call do_rop */		\
 +	/* spec_trap: */					\
 +	EMIT2(0xF3, 0x90);       /* pause */			\
 +	EMIT3(0x0F, 0xAE, 0xE8); /* lfence */			\
 +	EMIT2(0xEB, 0xF9);       /* jmp spec_trap */		\
 +	/* do_rop: */						\
 +	EMIT3(0x89, 0x14, 0x24); /* mov %edx,(%esp) */		\
 +	EMIT1(0xC3);             /* ret */			\
 +} while (0)
 +# endif
 +#else /* !CONFIG_RETPOLINE */
 +# ifdef CONFIG_X86_64
 +#  define RETPOLINE_RAX_BPF_JIT_SIZE	2
 +#  define RETPOLINE_RAX_BPF_JIT()				\
 +	EMIT2(0xFF, 0xE0);       /* jmp *%rax */
 +# else /* !CONFIG_X86_64 */
 +#  define RETPOLINE_EDX_BPF_JIT()				\
 +	EMIT2(0xFF, 0xE2)        /* jmp *%edx */
 +# endif
  #endif
  
  #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
diff --combined drivers/net/ethernet/freescale/fec_main.c
index 4358f586e28f,9d3eed46830d..ab7521c04eb2
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0+
  /*
   * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
   * Copyright (c) 1997 Dan Malek (dmalek at jlc.net)
@@@ -2052,9 -2053,13 +2053,9 @@@ static int fec_enet_mii_init(struct pla
  	fep->mii_bus->parent = &pdev->dev;
  
  	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
 -	if (node) {
 -		err = of_mdiobus_register(fep->mii_bus, node);
 +	err = of_mdiobus_register(fep->mii_bus, node);
 +	if (node)
  		of_node_put(node);
 -	} else {
 -		err = mdiobus_register(fep->mii_bus);
 -	}
 -
  	if (err)
  		goto err_out_free_mdiobus;
  
@@@ -2107,7 -2112,7 +2108,7 @@@ static int fec_enet_get_regs_len(struc
  /* List of registers that can be safety be read to dump them with ethtool */
  #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
  	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
 -	defined(CONFIG_ARM64)
 +	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
  static u32 fec_enet_register_offset[] = {
  	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
  	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
diff --combined drivers/net/ethernet/freescale/fec_ptp.c
index d438ef8a371d,43d973215040..36c2d7d6ee1b
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@@ -1,20 -1,8 +1,8 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * Fast Ethernet Controller (ENET) PTP driver for MX6x.
   *
   * Copyright (C) 2012 Freescale Semiconductor, Inc.
-  *
-  * This program is free software; you can redistribute it and/or modify it
-  * under the terms and conditions of the GNU General Public License,
-  * version 2, as published by the Free Software Foundation.
-  *
-  * This program is distributed in the hope it will be useful, but WITHOUT
-  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-  * more details.
-  *
-  * You should have received a copy of the GNU General Public License along with
-  * this program; if not, write to the Free Software Foundation, Inc.,
-  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
   */
  
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@@ -466,6 -454,12 +454,6 @@@ static int fec_ptp_enable(struct ptp_cl
  	return -EOPNOTSUPP;
  }
  
 -/**
 - * fec_ptp_hwtstamp_ioctl - control hardware time stamping
 - * @ndev: pointer to net_device
 - * @ifreq: ioctl data
 - * @cmd: particular ioctl requested
 - */
  int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
  {
  	struct fec_enet_private *fep = netdev_priv(ndev);
diff --combined drivers/net/ethernet/ibm/ibmvnic.c
index 09f8e6baf049,5ec1185808e5..d0e196bff081
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@@ -109,14 -109,13 +109,14 @@@ static union sub_crq *ibmvnic_next_scrq
  					struct ibmvnic_sub_crq_queue *);
  static int ibmvnic_poll(struct napi_struct *napi, int data);
  static void send_map_query(struct ibmvnic_adapter *adapter);
 -static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 -static void send_request_unmap(struct ibmvnic_adapter *, u8);
 +static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
 +static int send_request_unmap(struct ibmvnic_adapter *, u8);
  static int send_login(struct ibmvnic_adapter *adapter);
  static void send_cap_queries(struct ibmvnic_adapter *adapter);
  static int init_sub_crqs(struct ibmvnic_adapter *);
  static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
  static int ibmvnic_init(struct ibmvnic_adapter *);
 +static int ibmvnic_reset_init(struct ibmvnic_adapter *);
  static void release_crq_queue(struct ibmvnic_adapter *);
  static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
  static int init_crq_queue(struct ibmvnic_adapter *adapter);
@@@ -173,7 -172,6 +173,7 @@@ static int alloc_long_term_buff(struct 
  				struct ibmvnic_long_term_buff *ltb, int size)
  {
  	struct device *dev = &adapter->vdev->dev;
 +	int rc;
  
  	ltb->size = size;
  	ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
@@@ -187,12 -185,8 +187,12 @@@
  	adapter->map_id++;
  
  	init_completion(&adapter->fw_done);
 -	send_request_map(adapter, ltb->addr,
 -			 ltb->size, ltb->map_id);
 +	rc = send_request_map(adapter, ltb->addr,
 +			      ltb->size, ltb->map_id);
 +	if (rc) {
 +		dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
 +		return rc;
 +	}
  	wait_for_completion(&adapter->fw_done);
  
  	if (adapter->fw_done_rc) {
@@@ -221,14 -215,10 +221,14 @@@ static void free_long_term_buff(struct 
  static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
  				struct ibmvnic_long_term_buff *ltb)
  {
 +	int rc;
 +
  	memset(ltb->buff, 0, ltb->size);
  
  	init_completion(&adapter->fw_done);
 -	send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
 +	rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
 +	if (rc)
 +		return rc;
  	wait_for_completion(&adapter->fw_done);
  
  	if (adapter->fw_done_rc) {
@@@ -799,7 -789,6 +799,7 @@@ static void release_napi(struct ibmvnic
  	kfree(adapter->napi);
  	adapter->napi = NULL;
  	adapter->num_active_rx_napi = 0;
 +	adapter->napi_enabled = false;
  }
  
  static int ibmvnic_login(struct net_device *netdev)
@@@ -807,9 -796,11 +807,11 @@@
  	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  	unsigned long timeout = msecs_to_jiffies(30000);
  	int retry_count = 0;
+ 	bool retry;
  	int rc;
  
  	do {
+ 		retry = false;
  		if (retry_count > IBMVNIC_MAX_QUEUES) {
  			netdev_warn(netdev, "Login attempts exceeded\n");
  			return -1;
@@@ -833,6 -824,9 +835,9 @@@
  			retry_count++;
  			release_sub_crqs(adapter, 1);
  
+ 			retry = true;
+ 			netdev_dbg(netdev,
+ 				   "Received partial success, retrying...\n");
  			adapter->init_done_rc = 0;
  			reinit_completion(&adapter->init_done);
  			send_cap_queries(adapter);
@@@ -860,7 -854,7 +865,7 @@@
  			netdev_warn(netdev, "Adapter login failed\n");
  			return -1;
  		}
- 	} while (adapter->init_done_rc == PARTIALSUCCESS);
+ 	} while (retry);
  
  	/* handle pending MAC address changes after successful login */
  	if (adapter->mac_change_pending) {
@@@ -930,10 -924,6 +935,10 @@@ static int set_link_state(struct ibmvni
  			/* Partuial success, delay and re-send */
  			mdelay(1000);
  			resend = true;
 +		} else if (adapter->init_done_rc) {
 +			netdev_warn(netdev, "Unable to set link state, rc=%d\n",
 +				    adapter->init_done_rc);
 +			return adapter->init_done_rc;
  		}
  	} while (resend);
  
@@@ -966,7 -956,6 +971,7 @@@ static int ibmvnic_get_vpd(struct ibmvn
  	struct device *dev = &adapter->vdev->dev;
  	union ibmvnic_crq crq;
  	int len = 0;
 +	int rc;
  
  	if (adapter->vpd->buff)
  		len = adapter->vpd->len;
@@@ -974,9 -963,7 +979,9 @@@
  	init_completion(&adapter->fw_done);
  	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
  	crq.get_vpd_size.cmd = GET_VPD_SIZE;
 -	ibmvnic_send_crq(adapter, &crq);
 +	rc = ibmvnic_send_crq(adapter, &crq);
 +	if (rc)
 +		return rc;
  	wait_for_completion(&adapter->fw_done);
  
  	if (!adapter->vpd->len)
@@@ -1009,12 -996,7 +1014,12 @@@
  	crq.get_vpd.cmd = GET_VPD;
  	crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
  	crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
 -	ibmvnic_send_crq(adapter, &crq);
 +	rc = ibmvnic_send_crq(adapter, &crq);
 +	if (rc) {
 +		kfree(adapter->vpd->buff);
 +		adapter->vpd->buff = NULL;
 +		return rc;
 +	}
  	wait_for_completion(&adapter->fw_done);
  
  	return 0;
@@@ -1713,7 -1695,6 +1718,7 @@@ static int __ibmvnic_set_mac(struct net
  	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
  	struct sockaddr *addr = p;
  	union ibmvnic_crq crq;
 +	int rc;
  
  	if (!is_valid_ether_addr(addr->sa_data))
  		return -EADDRNOTAVAIL;
@@@ -1724,9 -1705,7 +1729,9 @@@
  	ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
  
  	init_completion(&adapter->fw_done);
 -	ibmvnic_send_crq(adapter, &crq);
 +	rc = ibmvnic_send_crq(adapter, &crq);
 +	if (rc)
 +		return rc;
  	wait_for_completion(&adapter->fw_done);
  	/* netdev->dev_addr is changed in handle_change_mac_rsp function */
  	return adapter->fw_done_rc ? -EIO : 0;
@@@ -1808,7 -1787,7 +1813,7 @@@ static int do_reset(struct ibmvnic_adap
  			return rc;
  		}
  
 -		rc = ibmvnic_init(adapter);
 +		rc = ibmvnic_reset_init(adapter);
  		if (rc)
  			return IBMVNIC_INIT_FAILED;
  
@@@ -1878,85 -1857,6 +1883,85 @@@
  	return 0;
  }
  
 +static int do_hard_reset(struct ibmvnic_adapter *adapter,
 +			 struct ibmvnic_rwi *rwi, u32 reset_state)
 +{
 +	struct net_device *netdev = adapter->netdev;
 +	int rc;
 +
 +	netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
 +		   rwi->reset_reason);
 +
 +	netif_carrier_off(netdev);
 +	adapter->reset_reason = rwi->reset_reason;
 +
 +	ibmvnic_cleanup(netdev);
 +	release_resources(adapter);
 +	release_sub_crqs(adapter, 0);
 +	release_crq_queue(adapter);
 +
 +	/* remove the closed state so when we call open it appears
 +	 * we are coming from the probed state.
 +	 */
 +	adapter->state = VNIC_PROBED;
 +
 +	rc = init_crq_queue(adapter);
 +	if (rc) {
 +		netdev_err(adapter->netdev,
 +			   "Couldn't initialize crq. rc=%d\n", rc);
 +		return rc;
 +	}
 +
 +	rc = ibmvnic_init(adapter);
 +	if (rc)
 +		return rc;
 +
 +	/* If the adapter was in PROBE state prior to the reset,
 +	 * exit here.
 +	 */
 +	if (reset_state == VNIC_PROBED)
 +		return 0;
 +
 +	rc = ibmvnic_login(netdev);
 +	if (rc) {
 +		adapter->state = VNIC_PROBED;
 +		return 0;
 +	}
 +	/* netif_set_real_num_xx_queues needs to take rtnl lock here
 +	 * unless wait_for_reset is set, in which case the rtnl lock
 +	 * has already been taken before initializing the reset
 +	 */
 +	if (!adapter->wait_for_reset) {
 +		rtnl_lock();
 +		rc = init_resources(adapter);
 +		rtnl_unlock();
 +	} else {
 +		rc = init_resources(adapter);
 +	}
 +	if (rc)
 +		return rc;
 +
 +	ibmvnic_disable_irqs(adapter);
 +	adapter->state = VNIC_CLOSED;
 +
 +	if (reset_state == VNIC_CLOSED)
 +		return 0;
 +
 +	rc = __ibmvnic_open(netdev);
 +	if (rc) {
 +		if (list_empty(&adapter->rwi_list))
 +			adapter->state = VNIC_CLOSED;
 +		else
 +			adapter->state = reset_state;
 +
 +		return 0;
 +	}
 +
 +	netif_carrier_on(netdev);
 +
 +	return 0;
 +}
 +
  static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
  {
  	struct ibmvnic_rwi *rwi;
@@@ -1998,19 -1898,14 +2003,19 @@@ static void __ibmvnic_reset(struct work
  	netdev = adapter->netdev;
  
  	mutex_lock(&adapter->reset_lock);
 -	adapter->resetting = true;
  	reset_state = adapter->state;
  
  	rwi = get_next_rwi(adapter);
  	while (rwi) {
 -		rc = do_reset(adapter, rwi, reset_state);
 +		if (adapter->force_reset_recovery) {
 +			adapter->force_reset_recovery = false;
 +			rc = do_hard_reset(adapter, rwi, reset_state);
 +		} else {
 +			rc = do_reset(adapter, rwi, reset_state);
 +		}
  		kfree(rwi);
 -		if (rc && rc != IBMVNIC_INIT_FAILED)
 +		if (rc && rc != IBMVNIC_INIT_FAILED &&
 +		    !adapter->force_reset_recovery)
  			break;
  
  		rwi = get_next_rwi(adapter);
@@@ -2036,9 -1931,9 +2041,9 @@@
  static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
  			 enum ibmvnic_reset_reason reason)
  {
 +	struct list_head *entry, *tmp_entry;
  	struct ibmvnic_rwi *rwi, *tmp;
  	struct net_device *netdev = adapter->netdev;
 -	struct list_head *entry;
  	int ret;
  
  	if (adapter->state == VNIC_REMOVING ||
@@@ -2074,17 -1969,11 +2079,17 @@@
  		ret = ENOMEM;
  		goto err;
  	}
 -
 +	/* if we just received a transport event,
 +	 * flush reset queue and process this reset
 +	 */
 +	if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
 +		list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
 +			list_del(entry);
 +	}
  	rwi->reset_reason = reason;
  	list_add_tail(&rwi->list, &adapter->rwi_list);
  	mutex_unlock(&adapter->rwi_lock);
 -
 +	adapter->resetting = true;
  	netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
  	schedule_work(&adapter->ibmvnic_reset);
  
@@@ -2480,7 -2369,6 +2485,7 @@@ static void ibmvnic_get_ethtool_stats(s
  	struct ibmvnic_adapter *adapter = netdev_priv(dev);
  	union ibmvnic_crq crq;
  	int i, j;
 +	int rc;
  
  	memset(&crq, 0, sizeof(crq));
  	crq.request_statistics.first = IBMVNIC_CRQ_CMD;
@@@ -2491,9 -2379,7 +2496,9 @@@
  
  	/* Wait for data to be written */
  	init_completion(&adapter->stats_done);
 -	ibmvnic_send_crq(adapter, &crq);
 +	rc = ibmvnic_send_crq(adapter, &crq);
 +	if (rc)
 +		return;
  	wait_for_completion(&adapter->stats_done);
  
  	for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@@@ -2736,18 -2622,21 +2741,21 @@@ static int enable_scrq_irq(struct ibmvn
  {
  	struct device *dev = &adapter->vdev->dev;
  	unsigned long rc;
- 	u64 val;
  
  	if (scrq->hw_irq > 0x100000000ULL) {
  		dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
  		return 1;
  	}
  
- 	val = (0xff000000) | scrq->hw_irq;
- 	rc = plpar_hcall_norets(H_EOI, val);
- 	if (rc)
- 		dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- 			val, rc);
+ 	if (adapter->resetting &&
+ 	    adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ 		u64 val = (0xff000000) | scrq->hw_irq;
+ 
+ 		rc = plpar_hcall_norets(H_EOI, val);
+ 		if (rc)
+ 			dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
+ 				val, rc);
+ 	}
  
  	rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
  				H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
@@@ -3265,12 -3154,6 +3273,12 @@@ static int ibmvnic_send_crq(struct ibmv
  		   (unsigned long int)cpu_to_be64(u64_crq[0]),
  		   (unsigned long int)cpu_to_be64(u64_crq[1]));
  
 +	if (!adapter->crq.active &&
 +	    crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
 +		dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
 +		return -EINVAL;
 +	}
 +
  	/* Make sure the hypervisor sees the complete request */
  	mb();
  
@@@ -3495,8 -3378,8 +3503,8 @@@ buf_alloc_failed
  	return -1;
  }
  
 -static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
 -			     u32 len, u8 map_id)
 +static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
 +			    u32 len, u8 map_id)
  {
  	union ibmvnic_crq crq;
  
@@@ -3506,10 -3389,10 +3514,10 @@@
  	crq.request_map.map_id = map_id;
  	crq.request_map.ioba = cpu_to_be32(addr);
  	crq.request_map.len = cpu_to_be32(len);
 -	ibmvnic_send_crq(adapter, &crq);
 +	return ibmvnic_send_crq(adapter, &crq);
  }
  
 -static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
 +static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
  {
  	union ibmvnic_crq crq;
  
@@@ -3517,7 -3400,7 +3525,7 @@@
  	crq.request_unmap.first = IBMVNIC_CRQ_CMD;
  	crq.request_unmap.cmd = REQUEST_UNMAP;
  	crq.request_unmap.map_id = map_id;
 -	ibmvnic_send_crq(adapter, &crq);
 +	return ibmvnic_send_crq(adapter, &crq);
  }
  
  static void send_map_query(struct ibmvnic_adapter *adapter)
@@@ -4344,15 -4227,11 +4352,15 @@@ static void ibmvnic_handle_crq(union ib
  			dev_info(dev, "Partner initialized\n");
  			adapter->from_passive_init = true;
  			adapter->failover_pending = false;
 -			complete(&adapter->init_done);
 +			if (!completion_done(&adapter->init_done)) {
 +				complete(&adapter->init_done);
 +				adapter->init_done_rc = -EIO;
 +			}
  			ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
  			break;
  		case IBMVNIC_CRQ_INIT_COMPLETE:
  			dev_info(dev, "Partner initialization complete\n");
 +			adapter->crq.active = true;
  			send_version_xchg(adapter);
  			break;
  		default:
@@@ -4361,9 -4240,6 +4369,9 @@@
  		return;
  	case IBMVNIC_CRQ_XPORT_EVENT:
  		netif_carrier_off(netdev);
 +		adapter->crq.active = false;
 +		if (adapter->resetting)
 +			adapter->force_reset_recovery = true;
  		if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
  			dev_info(dev, "Migrated, re-enabling adapter\n");
  			ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
@@@ -4551,7 -4427,6 +4559,7 @@@ static int ibmvnic_reset_crq(struct ibm
  	/* Clean out the queue */
  	memset(crq->msgs, 0, PAGE_SIZE);
  	crq->cur = 0;
 +	crq->active = false;
  
  	/* And re-open it again */
  	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
@@@ -4586,7 -4461,6 +4594,7 @@@ static void release_crq_queue(struct ib
  			 DMA_BIDIRECTIONAL);
  	free_page((unsigned long)crq->msgs);
  	crq->msgs = NULL;
 +	crq->active = false;
  }
  
  static int init_crq_queue(struct ibmvnic_adapter *adapter)
@@@ -4664,7 -4538,7 +4672,7 @@@ map_failed
  	return retrc;
  }
  
 -static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 +static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
  {
  	struct device *dev = &adapter->vdev->dev;
  	unsigned long timeout = msecs_to_jiffies(30000);
@@@ -4723,49 -4597,6 +4731,49 @@@
  	return rc;
  }
  
 +static int ibmvnic_init(struct ibmvnic_adapter *adapter)
 +{
 +	struct device *dev = &adapter->vdev->dev;
 +	unsigned long timeout = msecs_to_jiffies(30000);
 +	int rc;
 +
 +	adapter->from_passive_init = false;
 +
 +	init_completion(&adapter->init_done);
 +	adapter->init_done_rc = 0;
 +	ibmvnic_send_crq_init(adapter);
 +	if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
 +		dev_err(dev, "Initialization sequence timed out\n");
 +		return -1;
 +	}
 +
 +	if (adapter->init_done_rc) {
 +		release_crq_queue(adapter);
 +		return adapter->init_done_rc;
 +	}
 +
 +	if (adapter->from_passive_init) {
 +		adapter->state = VNIC_OPEN;
 +		adapter->from_passive_init = false;
 +		return -1;
 +	}
 +
 +	rc = init_sub_crqs(adapter);
 +	if (rc) {
 +		dev_err(dev, "Initialization of sub crqs failed\n");
 +		release_crq_queue(adapter);
 +		return rc;
 +	}
 +
 +	rc = init_sub_crq_irqs(adapter);
 +	if (rc) {
 +		dev_err(dev, "Failed to initialize sub crq irqs\n");
 +		release_crq_queue(adapter);
 +	}
 +
 +	return rc;
 +}
 +
  static struct device_attribute dev_attr_failover;
  
  static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
diff --combined drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 53f72923b164,1ff0b0e93804..a6a92c4f5fbb
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@@ -37,7 -37,6 +37,7 @@@
  #include <linux/bpf_trace.h>
  #include <net/busy_poll.h>
  #include <net/ip6_checksum.h>
 +#include <net/page_pool.h>
  #include "en.h"
  #include "en_tc.h"
  #include "eswitch.h"
@@@ -222,7 -221,7 +222,7 @@@ static inline int mlx5e_page_alloc_mapp
  	if (mlx5e_rx_cache_get(rq, dma_info))
  		return 0;
  
 -	dma_info->page = dev_alloc_pages(rq->buff.page_order);
 +	dma_info->page = page_pool_dev_alloc_pages(rq->page_pool);
  	if (unlikely(!dma_info->page))
  		return -ENOMEM;
  
@@@ -237,26 -236,15 +237,26 @@@
  	return 0;
  }
  
 +static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq,
 +					struct mlx5e_dma_info *dma_info)
 +{
 +	dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
 +		       rq->buff.map_dir);
 +}
 +
  void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
  			bool recycle)
  {
 -	if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
 -		return;
 +	if (likely(recycle)) {
 +		if (mlx5e_rx_cache_put(rq, dma_info))
 +			return;
  
 -	dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
 -		       rq->buff.map_dir);
 -	put_page(dma_info->page);
 +		mlx5e_page_dma_unmap(rq, dma_info);
 +		page_pool_recycle_direct(rq->page_pool, dma_info->page);
 +	} else {
 +		mlx5e_page_dma_unmap(rq, dma_info);
 +		put_page(dma_info->page);
 +	}
  }
  
  static inline bool mlx5e_page_reuse(struct mlx5e_rq *rq,
@@@ -450,7 -438,7 +450,7 @@@ bool mlx5e_post_rx_wqes(struct mlx5e_r
  	struct mlx5_wq_ll *wq = &rq->wq;
  	int err;
  
 -	if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
 +	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
  		return false;
  
  	if (mlx5_wq_ll_is_full(wq))
@@@ -508,7 -496,7 +508,7 @@@ static void mlx5e_poll_ico_cq(struct ml
  	struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
  	struct mlx5_cqe64 *cqe;
  
 -	if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
 +	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
  		return;
  
  	cqe = mlx5_cqwq_get_cqe(&cq->wq);
@@@ -525,7 -513,7 +525,7 @@@ bool mlx5e_post_rx_mpwqes(struct mlx5e_
  {
  	struct mlx5_wq_ll *wq = &rq->wq;
  
 -	if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
 +	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
  		return false;
  
  	mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
@@@ -627,6 -615,45 +627,45 @@@ static inline bool is_last_ethertype_ip
  	return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
  }
  
+ static __be32 mlx5e_get_fcs(struct sk_buff *skb)
+ {
+ 	int last_frag_sz, bytes_in_prev, nr_frags;
+ 	u8 *fcs_p1, *fcs_p2;
+ 	skb_frag_t *last_frag;
+ 	__be32 fcs_bytes;
+ 
+ 	if (!skb_is_nonlinear(skb))
+ 		return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
+ 
+ 	nr_frags = skb_shinfo(skb)->nr_frags;
+ 	last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
+ 	last_frag_sz = skb_frag_size(last_frag);
+ 
+ 	/* If all FCS data is in last frag */
+ 	if (last_frag_sz >= ETH_FCS_LEN)
+ 		return *(__be32 *)(skb_frag_address(last_frag) +
+ 				   last_frag_sz - ETH_FCS_LEN);
+ 
+ 	fcs_p2 = (u8 *)skb_frag_address(last_frag);
+ 	bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
+ 
+ 	/* Find where the other part of the FCS is - Linear or another frag */
+ 	if (nr_frags == 1) {
+ 		fcs_p1 = skb_tail_pointer(skb);
+ 	} else {
+ 		skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
+ 
+ 		fcs_p1 = skb_frag_address(prev_frag) +
+ 			    skb_frag_size(prev_frag);
+ 	}
+ 	fcs_p1 -= bytes_in_prev;
+ 
+ 	memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
+ 	memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
+ 
+ 	return fcs_bytes;
+ }
+ 
  static inline void mlx5e_handle_csum(struct net_device *netdev,
  				     struct mlx5_cqe64 *cqe,
  				     struct mlx5e_rq *rq,
@@@ -655,6 -682,9 +694,9 @@@
  			skb->csum = csum_partial(skb->data + ETH_HLEN,
  						 network_depth - ETH_HLEN,
  						 skb->csum);
+ 		if (unlikely(netdev->features & NETIF_F_RXFCS))
+ 			skb->csum = csum_add(skb->csum,
+ 					     (__force __wsum)mlx5e_get_fcs(skb));
  		rq->stats.csum_complete++;
  		return;
  	}
@@@ -681,10 -711,11 +723,10 @@@ static inline void mlx5e_build_rx_skb(s
  				      struct mlx5e_rq *rq,
  				      struct sk_buff *skb)
  {
 +	u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
  	struct net_device *netdev = rq->netdev;
 -	int lro_num_seg;
  
  	skb->mac_len = ETH_HLEN;
 -	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
  	if (lro_num_seg > 1) {
  		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
  		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
@@@ -807,14 -838,13 +849,14 @@@ static inline bool mlx5e_xmit_xdp_frame
  }
  
  /* returns true if packet was consumed by xdp */
 -static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
 -				   struct mlx5e_dma_info *di,
 -				   void *va, u16 *rx_headroom, u32 *len)
 +static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
 +				    struct mlx5e_dma_info *di,
 +				    void *va, u16 *rx_headroom, u32 *len)
  {
 -	const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
 +	struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
  	struct xdp_buff xdp;
  	u32 act;
 +	int err;
  
  	if (!prog)
  		return false;
@@@ -835,15 -865,6 +877,15 @@@
  		if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
  			trace_xdp_exception(rq->netdev, prog, act);
  		return true;
 +	case XDP_REDIRECT:
 +		/* When XDP enabled then page-refcnt==1 here */
 +		err = xdp_do_redirect(rq->netdev, &xdp, prog);
 +		if (!err) {
 +			__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
 +			rq->xdpsq.db.redirect_flush = true;
 +			mlx5e_page_dma_unmap(rq, di);
 +		}
 +		return true;
  	default:
  		bpf_warn_invalid_xdp_action(act);
  	case XDP_ABORTED:
@@@ -889,7 -910,6 +931,7 @@@ struct sk_buff *skb_from_cqe(struct mlx
  
  	dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
  				      frag_size, DMA_FROM_DEVICE);
 +	prefetchw(va); /* xdp_frame data area */
  	prefetch(data);
  	wi->offset += frag_size;
  
@@@ -1132,7 -1152,7 +1174,7 @@@ int mlx5e_poll_rx_cq(struct mlx5e_cq *c
  	struct mlx5_cqe64 *cqe;
  	int work_done = 0;
  
 -	if (unlikely(!MLX5E_TEST_BIT(rq->state, MLX5E_RQ_STATE_ENABLED)))
 +	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
  		return 0;
  
  	if (cq->decmprs_left)
@@@ -1162,11 -1182,6 +1204,11 @@@
  		xdpsq->db.doorbell = false;
  	}
  
 +	if (xdpsq->db.redirect_flush) {
 +		xdp_do_flush_map();
 +		xdpsq->db.redirect_flush = false;
 +	}
 +
  	mlx5_cqwq_update_db_record(&cq->wq);
  
  	/* ensure cq space is freed before enabling more cqes */
@@@ -1185,7 -1200,7 +1227,7 @@@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_c
  
  	sq = container_of(cq, struct mlx5e_xdpsq, cq);
  
 -	if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED)))
 +	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
  		return false;
  
  	cqe = mlx5_cqwq_get_cqe(&cq->wq);
diff --combined drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 3c4f1f326e13,fad8c2e3804e..a0433b48e833
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@@ -43,6 -43,9 +43,6 @@@
  #include "fpga/sdk.h"
  #include "fpga/core.h"
  
 -#define SBU_QP_QUEUE_SIZE 8
 -#define MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC	(60 * 1000)
 -
  enum mlx5_fpga_ipsec_cmd_status {
  	MLX5_FPGA_IPSEC_CMD_PENDING,
  	MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
@@@ -234,19 -237,17 +234,17 @@@ static void *mlx5_fpga_ipsec_cmd_exec(s
  	context->buf.sg[0].data = &context->command;
  
  	spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- 	list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
+ 	res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
+ 	if (!res)
+ 		list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
  	spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
  
- 	res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
  	if (res) {
- 		mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n",
- 			       res);
- 		spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
- 		list_del(&context->list);
- 		spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
+ 		mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
  		kfree(context);
  		return ERR_PTR(res);
  	}
+ 
  	/* Context will be freed by wait func after completion */
  	return context;
  }
@@@ -255,7 -256,7 +253,7 @@@ static int mlx5_fpga_ipsec_cmd_wait(voi
  {
  	struct mlx5_fpga_ipsec_cmd_context *context = ctx;
  	unsigned long timeout =
 -		msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
 +		msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
  	int res;
  
  	res = wait_for_completion_timeout(&context->complete, timeout);
diff --combined drivers/net/phy/bcm-phy-lib.c
index 0876aec7328c,d5e0833d69b9..e10e7b54ec4b
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@@ -56,7 -56,7 +56,7 @@@ int bcm54xx_auxctl_read(struct phy_devi
  	/* The register must be written to both the Shadow Register Select and
  	 * the Shadow Read Register Selector
  	 */
- 	phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
+ 	phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
  		  regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
  	return phy_read(phydev, MII_BCM54XX_AUX_CTL);
  }
@@@ -346,6 -346,10 +346,6 @@@ void bcm_phy_get_strings(struct phy_dev
  }
  EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
  
 -#ifndef UINT64_MAX
 -#define UINT64_MAX              (u64)(~((u64)0))
 -#endif
 -
  /* Caller is supposed to provide appropriate storage for the library code to
   * access the shadow copy
   */
@@@ -358,7 -362,7 +358,7 @@@ static u64 bcm_phy_get_stat(struct phy_
  
  	val = phy_read(phydev, stat.reg);
  	if (val < 0) {
 -		ret = UINT64_MAX;
 +		ret = U64_MAX;
  	} else {
  		val >>= stat.shift;
  		val = val & ((1 << stat.bits) - 1);
diff --combined drivers/net/tun.c
index 33a9c5661038,45d807796a18..2265d2ccea47
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@@ -70,7 -70,6 +70,7 @@@
  #include <net/netns/generic.h>
  #include <net/rtnetlink.h>
  #include <net/sock.h>
 +#include <net/xdp.h>
  #include <linux/seq_file.h>
  #include <linux/uio.h>
  #include <linux/skb_array.h>
@@@ -249,11 -248,11 +249,11 @@@ struct veth 
  	__be16 h_vlan_TCI;
  };
  
 -bool tun_is_xdp_buff(void *ptr)
 +bool tun_is_xdp_frame(void *ptr)
  {
  	return (unsigned long)ptr & TUN_XDP_FLAG;
  }
 -EXPORT_SYMBOL(tun_is_xdp_buff);
 +EXPORT_SYMBOL(tun_is_xdp_frame);
  
  void *tun_xdp_to_ptr(void *ptr)
  {
@@@ -526,6 -525,11 +526,6 @@@ static void tun_flow_update(struct tun_
  
  	rcu_read_lock();
  
 -	/* We may get a very small possibility of OOO during switching, not
 -	 * worth to optimize.*/
 -	if (tun->numqueues == 1 || tfile->detached)
 -		goto unlock;
 -
  	e = tun_flow_find(head, rxhash);
  	if (likely(e)) {
  		/* TODO: keep queueing to old queue until it's empty? */
@@@ -544,6 -548,7 +544,6 @@@
  		spin_unlock_bh(&tun->lock);
  	}
  
 -unlock:
  	rcu_read_unlock();
  }
  
@@@ -655,10 -660,10 +655,10 @@@ void tun_ptr_free(void *ptr
  {
  	if (!ptr)
  		return;
 -	if (tun_is_xdp_buff(ptr)) {
 -		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
 +	if (tun_is_xdp_frame(ptr)) {
 +		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
  
 -		put_page(virt_to_head_page(xdp->data));
 +		xdp_return_frame(xdpf);
  	} else {
  		__skb_array_destroy_skb(ptr);
  	}
@@@ -843,12 -848,6 +843,12 @@@ static int tun_attach(struct tun_struc
  				       tun->dev, tfile->queue_index);
  		if (err < 0)
  			goto out;
 +		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
 +						 MEM_TYPE_PAGE_SHARED, NULL);
 +		if (err < 0) {
 +			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 +			goto out;
 +		}
  		err = 0;
  	}
  
@@@ -1285,54 -1284,42 +1285,54 @@@ static const struct net_device_ops tun_
  	.ndo_get_stats64	= tun_net_get_stats64,
  };
  
 -static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
 +static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames)
  {
  	struct tun_struct *tun = netdev_priv(dev);
 -	struct xdp_buff *buff = xdp->data_hard_start;
 -	int headroom = xdp->data - xdp->data_hard_start;
  	struct tun_file *tfile;
  	u32 numqueues;
 -	int ret = 0;
 -
 -	/* Assure headroom is available and buff is properly aligned */
 -	if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp)))
 -		return -ENOSPC;
 -
 -	*buff = *xdp;
 +	int drops = 0;
 +	int cnt = n;
 +	int i;
  
  	rcu_read_lock();
  
  	numqueues = READ_ONCE(tun->numqueues);
  	if (!numqueues) {
 -		ret = -ENOSPC;
 -		goto out;
 +		rcu_read_unlock();
 +		return -ENXIO; /* Caller will free/return all frames */
  	}
  
  	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
  					    numqueues]);
 -	/* Encode the XDP flag into lowest bit for consumer to differ
 -	 * XDP buffer from sk_buff.
 -	 */
 -	if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) {
 -		this_cpu_inc(tun->pcpu_stats->tx_dropped);
 -		ret = -ENOSPC;
 +
 +	spin_lock(&tfile->tx_ring.producer_lock);
 +	for (i = 0; i < n; i++) {
 +		struct xdp_frame *xdp = frames[i];
 +		/* Encode the XDP flag into lowest bit for consumer to differ
 +		 * XDP buffer from sk_buff.
 +		 */
 +		void *frame = tun_xdp_to_ptr(xdp);
 +
 +		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
 +			this_cpu_inc(tun->pcpu_stats->tx_dropped);
 +			xdp_return_frame_rx_napi(xdp);
 +			drops++;
 +		}
  	}
 +	spin_unlock(&tfile->tx_ring.producer_lock);
  
 -out:
  	rcu_read_unlock();
 -	return ret;
 +	return cnt - drops;
 +}
 +
 +static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
 +{
 +	struct xdp_frame *frame = convert_to_xdp_frame(xdp);
 +
 +	if (unlikely(!frame))
 +		return -EOVERFLOW;
 +
 +	return tun_xdp_xmit(dev, 1, &frame);
  }
  
  static void tun_xdp_flush(struct net_device *dev)
@@@ -1436,6 -1423,13 +1436,13 @@@ static void tun_net_init(struct net_dev
  	dev->max_mtu = MAX_MTU - dev->hard_header_len;
  }
  
+ static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
+ {
+ 	struct sock *sk = tfile->socket.sk;
+ 
+ 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
+ }
+ 
  /* Character device part */
  
  /* Poll */
@@@ -1458,10 -1452,14 +1465,14 @@@ static __poll_t tun_chr_poll(struct fil
  	if (!ptr_ring_empty(&tfile->tx_ring))
  		mask |= EPOLLIN | EPOLLRDNORM;
  
- 	if (tun->dev->flags & IFF_UP &&
- 	    (sock_writeable(sk) ||
- 	     (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
- 	      sock_writeable(sk))))
+ 	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
+ 	 * guarantee EPOLLOUT to be raised by either here or
+ 	 * tun_sock_write_space(). Then process could get notification
+ 	 * after it writes to a down device and meets -EIO.
+ 	 */
+ 	if (tun_sock_writeable(tun, tfile) ||
+ 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ 	     tun_sock_writeable(tun, tfile)))
  		mask |= EPOLLOUT | EPOLLWRNORM;
  
  	if (tun->dev->reg_state != NETREG_REGISTERED)
@@@ -1682,7 -1680,7 +1693,7 @@@ static struct sk_buff *tun_build_skb(st
  		case XDP_TX:
  			get_page(alloc_frag->page);
  			alloc_frag->offset += buflen;
 -			if (tun_xdp_xmit(tun->dev, &xdp))
 +			if (tun_xdp_tx(tun->dev, &xdp))
  				goto err_redirect;
  			tun_xdp_flush(tun->dev);
  			rcu_read_unlock();
@@@ -1690,7 -1688,6 +1701,7 @@@
  			return NULL;
  		case XDP_PASS:
  			delta = orig_data - xdp.data;
 +			len = xdp.data_end - xdp.data;
  			break;
  		default:
  			bpf_warn_invalid_xdp_action(act);
@@@ -1711,7 -1708,7 +1722,7 @@@
  	}
  
  	skb_reserve(skb, pad - delta);
 -	skb_put(skb, len + delta);
 +	skb_put(skb, len);
  	get_page(alloc_frag->page);
  	alloc_frag->offset += buflen;
  
@@@ -1932,13 -1929,10 +1943,13 @@@ static ssize_t tun_get_user(struct tun_
  		rcu_read_unlock();
  	}
  
 -	rcu_read_lock();
 -	if (!rcu_dereference(tun->steering_prog))
 +	/* Compute the costly rx hash only if needed for flow updates.
 +	 * We may get a very small possibility of OOO during switching, not
 +	 * worth to optimize.
 +	 */
 +	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
 +	    !tfile->detached)
  		rxhash = __skb_get_hash_symmetric(skb);
 -	rcu_read_unlock();
  
  	if (frags) {
  		/* Exercise flow dissector code path. */
@@@ -2007,11 -2001,11 +2018,11 @@@ static ssize_t tun_chr_write_iter(struc
  
  static ssize_t tun_put_user_xdp(struct tun_struct *tun,
  				struct tun_file *tfile,
 -				struct xdp_buff *xdp,
 +				struct xdp_frame *xdp_frame,
  				struct iov_iter *iter)
  {
  	int vnet_hdr_sz = 0;
 -	size_t size = xdp->data_end - xdp->data;
 +	size_t size = xdp_frame->len;
  	struct tun_pcpu_stats *stats;
  	size_t ret;
  
@@@ -2027,7 -2021,7 +2038,7 @@@
  		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
  	}
  
 -	ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz;
 +	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
  
  	stats = get_cpu_ptr(tun->pcpu_stats);
  	u64_stats_update_begin(&stats->syncp);
@@@ -2195,11 -2189,11 +2206,11 @@@ static ssize_t tun_do_read(struct tun_s
  			return err;
  	}
  
 -	if (tun_is_xdp_buff(ptr)) {
 -		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
 +	if (tun_is_xdp_frame(ptr)) {
 +		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
  
 -		ret = tun_put_user_xdp(tun, tfile, xdp, to);
 -		put_page(virt_to_head_page(xdp->data));
 +		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
 +		xdp_return_frame(xdpf);
  	} else {
  		struct sk_buff *skb = ptr;
  
@@@ -2438,10 -2432,10 +2449,10 @@@ out_free
  static int tun_ptr_peek_len(void *ptr)
  {
  	if (likely(ptr)) {
 -		if (tun_is_xdp_buff(ptr)) {
 -			struct xdp_buff *xdp = tun_ptr_to_xdp(ptr);
 +		if (tun_is_xdp_frame(ptr)) {
 +			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
  
 -			return xdp->data_end - xdp->data;
 +			return xdpf->len;
  		}
  		return __skb_array_len_with_tag(ptr);
  	} else {
@@@ -2855,10 -2849,10 +2866,10 @@@ static long __tun_chr_ioctl(struct fil
  			    unsigned long arg, int ifreq_len)
  {
  	struct tun_file *tfile = file->private_data;
 +	struct net *net = sock_net(&tfile->sk);
  	struct tun_struct *tun;
  	void __user* argp = (void __user*)arg;
  	struct ifreq ifr;
 -	struct net *net;
  	kuid_t owner;
  	kgid_t group;
  	int sndbuf;
@@@ -2882,18 -2876,14 +2893,18 @@@
  		 */
  		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
  				(unsigned int __user*)argp);
 -	} else if (cmd == TUNSETQUEUE)
 +	} else if (cmd == TUNSETQUEUE) {
  		return tun_set_queue(file, &ifr);
 +	} else if (cmd == SIOCGSKNS) {
 +		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 +			return -EPERM;
 +		return open_related_ns(&net->ns, get_net_ns);
 +	}
  
  	ret = 0;
  	rtnl_lock();
  
  	tun = tun_get(tfile);
 -	net = sock_net(&tfile->sk);
  	if (cmd == TUNSETIFF) {
  		ret = -EEXIST;
  		if (tun)
@@@ -2923,6 -2913,14 +2934,6 @@@
  		tfile->ifindex = ifindex;
  		goto unlock;
  	}
 -	if (cmd == SIOCGSKNS) {
 -		ret = -EPERM;
 -		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
 -			goto unlock;
 -
 -		ret = open_related_ns(&net->ns, get_net_ns);
 -		goto unlock;
 -	}
  
  	ret = -EBADFD;
  	if (!tun)
diff --combined drivers/net/virtio_net.c
index 39a0783d1cde,032e1ac10a30..b2647dd5d302
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@@ -419,70 -419,46 +419,70 @@@ static void virtnet_xdp_flush(struct ne
  	virtqueue_kick(sq->vq);
  }
  
 -static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
 -			       struct xdp_buff *xdp)
 +static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
 +				   struct send_queue *sq,
 +				   struct xdp_frame *xdpf)
  {
  	struct virtio_net_hdr_mrg_rxbuf *hdr;
 -	unsigned int len;
 -	struct send_queue *sq;
 -	unsigned int qp;
 -	void *xdp_sent;
  	int err;
  
 -	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
 -	sq = &vi->sq[qp];
 +	/* virtqueue want to use data area in-front of packet */
 +	if (unlikely(xdpf->metasize > 0))
 +		return -EOPNOTSUPP;
  
 -	/* Free up any pending old buffers before queueing new ones. */
 -	while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
 -		struct page *sent_page = virt_to_head_page(xdp_sent);
 +	if (unlikely(xdpf->headroom < vi->hdr_len))
 +		return -EOVERFLOW;
  
 -		put_page(sent_page);
 -	}
 -
 -	xdp->data -= vi->hdr_len;
 +	/* Make room for virtqueue hdr (also change xdpf->headroom?) */
 +	xdpf->data -= vi->hdr_len;
  	/* Zero header and leave csum up to XDP layers */
 -	hdr = xdp->data;
 +	hdr = xdpf->data;
  	memset(hdr, 0, vi->hdr_len);
 +	xdpf->len   += vi->hdr_len;
  
 -	sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
 +	sg_init_one(sq->sg, xdpf->data, xdpf->len);
  
 -	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
 +	err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
  	if (unlikely(err))
 -		return false; /* Caller handle free/refcnt */
 +		return -ENOSPC; /* Caller handle free/refcnt */
 +
 +	return 0;
 +}
 +
 +static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
 +				   struct xdp_frame *xdpf)
 +{
 +	struct xdp_frame *xdpf_sent;
 +	struct send_queue *sq;
 +	unsigned int len;
 +	unsigned int qp;
 +
 +	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
 +	sq = &vi->sq[qp];
 +
 +	/* Free up any pending old buffers before queueing new ones. */
 +	while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
 +		xdp_return_frame(xdpf_sent);
  
 -	return true;
 +	return __virtnet_xdp_xmit_one(vi, sq, xdpf);
  }
  
 -static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
 +static int virtnet_xdp_xmit(struct net_device *dev,
 +			    int n, struct xdp_frame **frames)
  {
  	struct virtnet_info *vi = netdev_priv(dev);
  	struct receive_queue *rq = vi->rq;
 +	struct xdp_frame *xdpf_sent;
  	struct bpf_prog *xdp_prog;
 -	bool sent;
 +	struct send_queue *sq;
 +	unsigned int len;
 +	unsigned int qp;
 +	int drops = 0;
 +	int err;
 +	int i;
 +
 +	qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
 +	sq = &vi->sq[qp];
  
  	/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
  	 * indicate XDP resources have been successfully allocated.
@@@ -491,20 -467,10 +491,20 @@@
  	if (!xdp_prog)
  		return -ENXIO;
  
 -	sent = __virtnet_xdp_xmit(vi, xdp);
 -	if (!sent)
 -		return -ENOSPC;
 -	return 0;
 +	/* Free up any pending old buffers before queueing new ones. */
 +	while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
 +		xdp_return_frame(xdpf_sent);
 +
 +	for (i = 0; i < n; i++) {
 +		struct xdp_frame *xdpf = frames[i];
 +
 +		err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
 +		if (err) {
 +			xdp_return_frame_rx_napi(xdpf);
 +			drops++;
 +		}
 +	}
 +	return n - drops;
  }
  
  static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
@@@ -593,6 -559,7 +593,6 @@@ static struct sk_buff *receive_small(st
  	struct page *page = virt_to_head_page(buf);
  	unsigned int delta = 0;
  	struct page *xdp_page;
 -	bool sent;
  	int err;
  
  	len -= vi->hdr_len;
@@@ -601,7 -568,6 +601,7 @@@
  	xdp_prog = rcu_dereference(rq->xdp_prog);
  	if (xdp_prog) {
  		struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
 +		struct xdp_frame *xdpf;
  		struct xdp_buff xdp;
  		void *orig_data;
  		u32 act;
@@@ -642,14 -608,10 +642,14 @@@
  		case XDP_PASS:
  			/* Recalculate length in case bpf program changed it */
  			delta = orig_data - xdp.data;
 +			len = xdp.data_end - xdp.data;
  			break;
  		case XDP_TX:
 -			sent = __virtnet_xdp_xmit(vi, &xdp);
 -			if (unlikely(!sent)) {
 +			xdpf = convert_to_xdp_frame(&xdp);
 +			if (unlikely(!xdpf))
 +				goto err_xdp;
 +			err = __virtnet_xdp_tx_xmit(vi, xdpf);
 +			if (unlikely(err)) {
  				trace_xdp_exception(vi->dev, xdp_prog, act);
  				goto err_xdp;
  			}
@@@ -679,7 -641,7 +679,7 @@@
  		goto err;
  	}
  	skb_reserve(skb, headroom - delta);
 -	skb_put(skb, len + delta);
 +	skb_put(skb, len);
  	if (!delta) {
  		buf += header_offset;
  		memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
@@@ -732,6 -694,7 +732,6 @@@ static struct sk_buff *receive_mergeabl
  	struct bpf_prog *xdp_prog;
  	unsigned int truesize;
  	unsigned int headroom = mergeable_ctx_to_headroom(ctx);
 -	bool sent;
  	int err;
  
  	head_skb = NULL;
@@@ -739,12 -702,18 +739,19 @@@
  	rcu_read_lock();
  	xdp_prog = rcu_dereference(rq->xdp_prog);
  	if (xdp_prog) {
 +		struct xdp_frame *xdpf;
  		struct page *xdp_page;
  		struct xdp_buff xdp;
  		void *data;
  		u32 act;
  
+ 		/* Transient failure which in theory could occur if
+ 		 * in-flight packets from before XDP was enabled reach
+ 		 * the receive path after XDP is loaded.
+ 		 */
+ 		if (unlikely(hdr->hdr.gso_type))
+ 			goto err_xdp;
+ 
  		/* This happens when rx buffer size is underestimated
  		 * or headroom is not enough because of the buffer
  		 * was refilled before XDP is set. This should only
@@@ -765,14 -734,6 +772,6 @@@
  			xdp_page = page;
  		}
  
- 		/* Transient failure which in theory could occur if
- 		 * in-flight packets from before XDP was enabled reach
- 		 * the receive path after XDP is loaded. In practice I
- 		 * was not able to create this condition.
- 		 */
- 		if (unlikely(hdr->hdr.gso_type))
- 			goto err_xdp;
- 
  		/* Allow consuming headroom but reserve enough space to push
  		 * the descriptor on if we get an XDP_TX return code.
  		 */
@@@ -794,10 -755,6 +793,10 @@@
  			offset = xdp.data -
  					page_address(xdp_page) - vi->hdr_len;
  
 +			/* recalculate len if xdp.data or xdp.data_end were
 +			 * adjusted
 +			 */
 +			len = xdp.data_end - xdp.data + vi->hdr_len;
  			/* We can only create skb based on xdp_page. */
  			if (unlikely(xdp_page != page)) {
  				rcu_read_unlock();
@@@ -808,11 -765,8 +807,11 @@@
  			}
  			break;
  		case XDP_TX:
 -			sent = __virtnet_xdp_xmit(vi, &xdp);
 -			if (unlikely(!sent)) {
 +			xdpf = convert_to_xdp_frame(&xdp);
 +			if (unlikely(!xdpf))
 +				goto err_xdp;
 +			err = __virtnet_xdp_tx_xmit(vi, xdpf);
 +			if (unlikely(err)) {
  				trace_xdp_exception(vi->dev, xdp_prog, act);
  				if (unlikely(xdp_page != page))
  					put_page(xdp_page);
@@@ -820,7 -774,7 +819,7 @@@
  			}
  			*xdp_xmit = true;
  			if (unlikely(xdp_page != page))
- 				goto err_xdp;
+ 				put_page(page);
  			rcu_read_unlock();
  			goto xdp_xmit;
  		case XDP_REDIRECT:
@@@ -832,7 -786,7 +831,7 @@@
  			}
  			*xdp_xmit = true;
  			if (unlikely(xdp_page != page))
- 				goto err_xdp;
+ 				put_page(page);
  			rcu_read_unlock();
  			goto xdp_xmit;
  		default:
@@@ -920,7 -874,7 +919,7 @@@ err_xdp
  	rcu_read_unlock();
  err_skb:
  	put_page(page);
- 	while (--num_buf) {
+ 	while (num_buf-- > 1) {
  		buf = virtqueue_get_buf(rq->vq, &len);
  		if (unlikely(!buf)) {
  			pr_debug("%s: rx error: %d buffers missing\n",
@@@ -1357,13 -1311,6 +1356,13 @@@ static int virtnet_open(struct net_devi
  		if (err < 0)
  			return err;
  
 +		err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
 +						 MEM_TYPE_PAGE_SHARED, NULL);
 +		if (err < 0) {
 +			xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
 +			return err;
 +		}
 +
  		virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
  		virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
  	}
diff --combined drivers/net/wireless/mac80211_hwsim.c
index c26469b54ac9,920c23e542a5..89fc22520d40
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@@ -2650,7 -2650,6 +2650,7 @@@ static int mac80211_hwsim_new_radio(str
  	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
  	ieee80211_hw_set(hw, MFP_CAPABLE);
  	ieee80211_hw_set(hw, SIGNAL_DBM);
 +	ieee80211_hw_set(hw, SUPPORTS_PS);
  	ieee80211_hw_set(hw, TDLS_WIDER_BW);
  	if (rctbl)
  		ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
@@@ -3341,7 -3340,7 +3341,7 @@@ out_err
  static int hwsim_dump_radio_nl(struct sk_buff *skb,
  			       struct netlink_callback *cb)
  {
- 	int last_idx = cb->args[0];
+ 	int last_idx = cb->args[0] - 1;
  	struct mac80211_hwsim_data *data = NULL;
  	int res = 0;
  	void *hdr;
@@@ -3369,7 -3368,7 +3369,7 @@@
  		last_idx = data->idx;
  	}
  
- 	cb->args[0] = last_idx;
+ 	cb->args[0] = last_idx + 1;
  
  	/* list changed, but no new element sent, set interrupted flag */
  	if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
diff --combined include/linux/bpf_verifier.h
index c286813deaeb,df36b1b08af0..38b04f559ad3
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@@ -142,10 -142,11 +142,11 @@@ struct bpf_verifier_state_list 
  struct bpf_insn_aux_data {
  	union {
  		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
- 		struct bpf_map *map_ptr;	/* pointer for call insn into lookup_elem */
+ 		unsigned long map_state;	/* pointer/poison value for maps */
  		s32 call_imm;			/* saved imm field of call insn */
  	};
  	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
+ 	int sanitize_stack_off; /* stack slot to be cleared */
  	bool seen; /* this insn was processed by the verifier */
  };
  
@@@ -173,11 -174,6 +174,11 @@@ static inline bool bpf_verifier_log_nee
  
  #define BPF_MAX_SUBPROGS 256
  
 +struct bpf_subprog_info {
 +	u32 start; /* insn idx of function entry point */
 +	u16 stack_depth; /* max. stack depth used by this function */
 +};
 +
  /* single container for all structs
   * one verifier_env per bpf_check() call
   */
@@@ -196,12 -192,14 +197,12 @@@ struct bpf_verifier_env 
  	bool seen_direct_write;
  	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
  	struct bpf_verifier_log log;
 -	u32 subprog_starts[BPF_MAX_SUBPROGS];
 -	/* computes the stack depth of each bpf function */
 -	u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
 +	struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
  	u32 subprog_cnt;
  };
  
 -void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
 -		       va_list args);
 +__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
 +				      const char *fmt, va_list args);
  __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
  					   const char *fmt, ...);
  
diff --combined include/net/sctp/sctp.h
index f66d44350007,35498e613ff5..8c2caa370e0f
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@@ -103,6 -103,8 +103,8 @@@ void sctp_addr_wq_mgmt(struct net *, st
  /*
   * sctp/socket.c
   */
+ int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		      int addr_len, int flags);
  int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
  int sctp_inet_listen(struct socket *sock, int backlog);
  void sctp_write_space(struct sock *sk);
@@@ -428,6 -430,32 +430,6 @@@ static inline int sctp_list_single_entr
  	return (head->next != head) && (head->next == head->prev);
  }
  
 -/* Break down data chunks at this point.  */
 -static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
 -{
 -	struct sctp_sock *sp = sctp_sk(asoc->base.sk);
 -	struct sctp_af *af = sp->pf->af;
 -	int frag = pmtu;
 -
 -	frag -= af->ip_options_len(asoc->base.sk);
 -	frag -= af->net_header_len;
 -	frag -= sizeof(struct sctphdr) + sctp_datachk_len(&asoc->stream);
 -
 -	if (asoc->user_frag)
 -		frag = min_t(int, frag, asoc->user_frag);
 -
 -	frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN -
 -					    sctp_datachk_len(&asoc->stream)));
 -
 -	return frag;
 -}
 -
 -static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
 -{
 -	sctp_assoc_sync_pmtu(asoc);
 -	asoc->pmtu_pending = 0;
 -}
 -
  static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
  {
  	return !list_empty(&chunk->list);
@@@ -581,29 -609,17 +583,29 @@@ static inline struct dst_entry *sctp_tr
  	return t->dst;
  }
  
 -static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
 +/* Calculate max payload size given a MTU, or the total overhead if
 + * given MTU is zero
 + */
 +static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp,
 +				     __u32 mtu, __u32 extra)
  {
 -	__u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
 -			   SCTP_DEFAULT_MINSEGMENT);
 +	__u32 overhead = sizeof(struct sctphdr) + extra;
  
 -	if (t->pathmtu == pmtu)
 -		return true;
 +	if (sp)
 +		overhead += sp->pf->af->net_header_len;
 +	else
 +		overhead += sizeof(struct ipv6hdr);
  
 -	t->pathmtu = pmtu;
 +	if (WARN_ON_ONCE(mtu && mtu <= overhead))
 +		mtu = overhead;
  
 -	return false;
 +	return mtu ? mtu - overhead : overhead;
 +}
 +
 +static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
 +{
 +	return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
 +				 SCTP_DEFAULT_MINSEGMENT));
  }
  
  #endif /* __net_sctp_h__ */
diff --combined include/uapi/linux/nl80211.h
index 06f9af23156b,271b93783d28..28b36545de24
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@@ -11,7 -11,6 +11,7 @@@
   * Copyright 2008 Jouni Malinen <jouni.malinen at atheros.com>
   * Copyright 2008 Colin McCabe <colin at cozybit.com>
   * Copyright 2015-2017	Intel Deutschland GmbH
 + * Copyright (C) 2018 Intel Corporation
   *
   * Permission to use, copy, modify, and/or distribute this software for any
   * purpose with or without fee is hereby granted, provided that the above
@@@ -204,8 -203,7 +204,8 @@@
   * FILS shared key authentication offload should be able to construct the
   * authentication and association frames for FILS shared key authentication and
   * eventually do a key derivation as per IEEE 802.11ai. The below additional
 - * parameters should be given to driver in %NL80211_CMD_CONNECT.
 + * parameters should be given to driver in %NL80211_CMD_CONNECT and/or in
 + * %NL80211_CMD_UPDATE_CONNECT_PARAMS.
   *	%NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai
   *	%NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai
   *	%NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message
@@@ -216,8 -214,7 +216,8 @@@
   * as specified in IETF RFC 6696.
   *
   * When FILS shared key authentication is completed, driver needs to provide the
 - * below additional parameters to userspace.
 + * below additional parameters to userspace, which can be either after setting
 + * up a connection or after roaming.
   *	%NL80211_ATTR_FILS_KEK - used for key renewal
   *	%NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges
   *	%NL80211_ATTR_PMKID - used to identify the PMKSA used/generated
@@@ -2228,16 -2225,6 +2228,16 @@@ enum nl80211_commands 
   * @NL80211_ATTR_NSS: Station's New/updated  RX_NSS value notified using this
   *	u8 attribute. This is used with %NL80211_CMD_STA_OPMODE_CHANGED.
   *
 + * @NL80211_ATTR_TXQ_STATS: TXQ statistics (nested attribute, see &enum
 + *      nl80211_txq_stats)
 + * @NL80211_ATTR_TXQ_LIMIT: Total packet limit for the TXQ queues for this phy.
 + *      The smaller of this and the memory limit is enforced.
 + * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory memory limit (in bytes) for the
 + *      TXQ queues for this phy. The smaller of this and the packet limit is
 + *      enforced.
 + * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
 + *      a flow is assigned on each round of the DRR scheduler.
 + *
   * @NUM_NL80211_ATTR: total number of nl80211_attrs available
   * @NL80211_ATTR_MAX: highest attribute number currently defined
   * @__NL80211_ATTR_AFTER_LAST: internal use
@@@ -2672,11 -2659,6 +2672,11 @@@ enum nl80211_attrs 
  
  	NL80211_ATTR_CONTROL_PORT_OVER_NL80211,
  
 +	NL80211_ATTR_TXQ_STATS,
 +	NL80211_ATTR_TXQ_LIMIT,
 +	NL80211_ATTR_TXQ_MEMORY_LIMIT,
 +	NL80211_ATTR_TXQ_QUANTUM,
 +
  	/* add attributes here, update the policy in nl80211.c */
  
  	__NL80211_ATTR_AFTER_LAST,
@@@ -2716,7 -2698,7 +2716,7 @@@
  #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
  #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
  
- #define NL80211_WIPHY_NAME_MAXLEN		128
+ #define NL80211_WIPHY_NAME_MAXLEN		64
  
  #define NL80211_MAX_SUPP_RATES			32
  #define NL80211_MAX_SUPP_HT_RATES		77
@@@ -3000,8 -2982,6 +3000,8 @@@ enum nl80211_sta_bss_param 
   *	received from the station (u64, usec)
   * @NL80211_STA_INFO_PAD: attribute used for padding for 64-bit alignment
   * @NL80211_STA_INFO_ACK_SIGNAL: signal strength of the last ACK frame(u8, dBm)
 + * @NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG: avg signal strength of (data)
 + *	ACK frame (s8, dBm)
   * @__NL80211_STA_INFO_AFTER_LAST: internal
   * @NL80211_STA_INFO_MAX: highest possible station info attribute
   */
@@@ -3041,7 -3021,6 +3041,7 @@@ enum nl80211_sta_info 
  	NL80211_STA_INFO_RX_DURATION,
  	NL80211_STA_INFO_PAD,
  	NL80211_STA_INFO_ACK_SIGNAL,
 +	NL80211_STA_INFO_DATA_ACK_SIGNAL_AVG,
  
  	/* keep last */
  	__NL80211_STA_INFO_AFTER_LAST,
@@@ -3059,7 -3038,6 +3059,7 @@@
   * @NL80211_TID_STATS_TX_MSDU_FAILED: number of failed transmitted
   *	MSDUs (u64)
   * @NL80211_TID_STATS_PAD: attribute used for padding for 64-bit alignment
 + * @NL80211_TID_STATS_TXQ_STATS: TXQ stats (nested attribute)
   * @NUM_NL80211_TID_STATS: number of attributes here
   * @NL80211_TID_STATS_MAX: highest numbered attribute here
   */
@@@ -3070,7 -3048,6 +3070,7 @@@ enum nl80211_tid_stats 
  	NL80211_TID_STATS_TX_MSDU_RETRIES,
  	NL80211_TID_STATS_TX_MSDU_FAILED,
  	NL80211_TID_STATS_PAD,
 +	NL80211_TID_STATS_TXQ_STATS,
  
  	/* keep last */
  	NUM_NL80211_TID_STATS,
@@@ -3078,44 -3055,6 +3078,44 @@@
  };
  
  /**
 + * enum nl80211_txq_stats - per TXQ statistics attributes
 + * @__NL80211_TXQ_STATS_INVALID: attribute number 0 is reserved
 + * @NUM_NL80211_TXQ_STATS: number of attributes here
 + * @NL80211_TXQ_STATS_BACKLOG_BYTES: number of bytes currently backlogged
 + * @NL80211_TXQ_STATS_BACKLOG_PACKETS: number of packets currently
 + *      backlogged
 + * @NL80211_TXQ_STATS_FLOWS: total number of new flows seen
 + * @NL80211_TXQ_STATS_DROPS: total number of packet drops
 + * @NL80211_TXQ_STATS_ECN_MARKS: total number of packet ECN marks
 + * @NL80211_TXQ_STATS_OVERLIMIT: number of drops due to queue space overflow
 + * @NL80211_TXQ_STATS_OVERMEMORY: number of drops due to memory limit overflow
 + *      (only for per-phy stats)
 + * @NL80211_TXQ_STATS_COLLISIONS: number of hash collisions
 + * @NL80211_TXQ_STATS_TX_BYTES: total number of bytes dequeued from TXQ
 + * @NL80211_TXQ_STATS_TX_PACKETS: total number of packets dequeued from TXQ
 + * @NL80211_TXQ_STATS_MAX_FLOWS: number of flow buckets for PHY
 + * @NL80211_TXQ_STATS_MAX: highest numbered attribute here
 + */
 +enum nl80211_txq_stats {
 +	__NL80211_TXQ_STATS_INVALID,
 +	NL80211_TXQ_STATS_BACKLOG_BYTES,
 +	NL80211_TXQ_STATS_BACKLOG_PACKETS,
 +	NL80211_TXQ_STATS_FLOWS,
 +	NL80211_TXQ_STATS_DROPS,
 +	NL80211_TXQ_STATS_ECN_MARKS,
 +	NL80211_TXQ_STATS_OVERLIMIT,
 +	NL80211_TXQ_STATS_OVERMEMORY,
 +	NL80211_TXQ_STATS_COLLISIONS,
 +	NL80211_TXQ_STATS_TX_BYTES,
 +	NL80211_TXQ_STATS_TX_PACKETS,
 +	NL80211_TXQ_STATS_MAX_FLOWS,
 +
 +	/* keep last */
 +	NUM_NL80211_TXQ_STATS,
 +	NL80211_TXQ_STATS_MAX = NUM_NL80211_TXQ_STATS - 1
 +};
 +
 +/**
   * enum nl80211_mpath_flags - nl80211 mesh path flags
   *
   * @NL80211_MPATH_FLAG_ACTIVE: the mesh path is active
@@@ -3205,29 -3144,6 +3205,29 @@@ enum nl80211_band_attr 
  #define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA
  
  /**
 + * enum nl80211_wmm_rule - regulatory wmm rule
 + *
 + * @__NL80211_WMMR_INVALID: attribute number 0 is reserved
 + * @NL80211_WMMR_CW_MIN: Minimum contention window slot.
 + * @NL80211_WMMR_CW_MAX: Maximum contention window slot.
 + * @NL80211_WMMR_AIFSN: Arbitration Inter Frame Space.
 + * @NL80211_WMMR_TXOP: Maximum allowed tx operation time.
 + * @nl80211_WMMR_MAX: highest possible wmm rule.
 + * @__NL80211_WMMR_LAST: Internal use.
 + */
 +enum nl80211_wmm_rule {
 +	__NL80211_WMMR_INVALID,
 +	NL80211_WMMR_CW_MIN,
 +	NL80211_WMMR_CW_MAX,
 +	NL80211_WMMR_AIFSN,
 +	NL80211_WMMR_TXOP,
 +
 +	/* keep last */
 +	__NL80211_WMMR_LAST,
 +	NL80211_WMMR_MAX = __NL80211_WMMR_LAST - 1
 +};
 +
 +/**
   * enum nl80211_frequency_attr - frequency attributes
   * @__NL80211_FREQUENCY_ATTR_INVALID: attribute number 0 is reserved
   * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz
@@@ -3276,9 -3192,6 +3276,9 @@@
   *	on this channel in current regulatory domain.
   * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
   *	on this channel in current regulatory domain.
 + * @NL80211_FREQUENCY_ATTR_WMM: this channel has wmm limitations.
 + *	This is a nested attribute that contains the wmm limitation per AC.
 + *	(see &enum nl80211_wmm_rule)
   * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
   *	currently defined
   * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@@ -3307,7 -3220,6 +3307,7 @@@ enum nl80211_frequency_attr 
  	NL80211_FREQUENCY_ATTR_IR_CONCURRENT,
  	NL80211_FREQUENCY_ATTR_NO_20MHZ,
  	NL80211_FREQUENCY_ATTR_NO_10MHZ,
 +	NL80211_FREQUENCY_ATTR_WMM,
  
  	/* keep last */
  	__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@@ -5128,11 -5040,6 +5128,11 @@@ enum nl80211_feature_flags 
   *	"radar detected" event.
   * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211: Driver supports sending and
   *	receiving control port frames over nl80211 instead of the netdevice.
 + * @NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT: This Driver support data ack
 + *	rssi if firmware support, this flag is to intimate about ack rssi
 + *	support to nl80211.
 + * @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
 + *      TXQs.
   *
   * @NUM_NL80211_EXT_FEATURES: number of extended features.
   * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@@ -5165,8 -5072,6 +5165,8 @@@ enum nl80211_ext_feature_index 
  	NL80211_EXT_FEATURE_HIGH_ACCURACY_SCAN,
  	NL80211_EXT_FEATURE_DFS_OFFLOAD,
  	NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
 +	NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
 +	NL80211_EXT_FEATURE_TXQS,
  
  	/* add new features before the definition below */
  	NUM_NL80211_EXT_FEATURES,
diff --combined kernel/bpf/verifier.c
index 967cacf286ea,1904e814f282..1fd9667b29f1
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@@ -22,7 -22,6 +22,7 @@@
  #include <linux/stringify.h>
  #include <linux/bsearch.h>
  #include <linux/sort.h>
 +#include <linux/perf_event.h>
  
  #include "disasm.h"
  
@@@ -157,7 -156,29 +157,29 @@@ struct bpf_verifier_stack_elem 
  #define BPF_COMPLEXITY_LIMIT_INSNS	131072
  #define BPF_COMPLEXITY_LIMIT_STACK	1024
  
- #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
+ #define BPF_MAP_PTR_UNPRIV	1UL
+ #define BPF_MAP_PTR_POISON	((void *)((0xeB9FUL << 1) +	\
+ 					  POISON_POINTER_DELTA))
+ #define BPF_MAP_PTR(X)		((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
+ 
+ static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
+ {
+ 	return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON;
+ }
+ 
+ static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux)
+ {
+ 	return aux->map_state & BPF_MAP_PTR_UNPRIV;
+ }
+ 
+ static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux,
+ 			      const struct bpf_map *map, bool unpriv)
+ {
+ 	BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV);
+ 	unpriv |= bpf_map_ptr_unpriv(aux);
+ 	aux->map_state = (unsigned long)map |
+ 			 (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL);
+ }
  
  struct bpf_call_arg_meta {
  	struct bpf_map *map_ptr;
@@@ -165,8 -186,6 +187,8 @@@
  	bool pkt_access;
  	int regno;
  	int access_size;
 +	s64 msize_smax_value;
 +	u64 msize_umax_value;
  };
  
  static DEFINE_MUTEX(bpf_verifier_lock);
@@@ -741,19 -760,18 +763,19 @@@ enum reg_arg_type 
  
  static int cmp_subprogs(const void *a, const void *b)
  {
 -	return *(int *)a - *(int *)b;
 +	return ((struct bpf_subprog_info *)a)->start -
 +	       ((struct bpf_subprog_info *)b)->start;
  }
  
  static int find_subprog(struct bpf_verifier_env *env, int off)
  {
 -	u32 *p;
 +	struct bpf_subprog_info *p;
  
 -	p = bsearch(&off, env->subprog_starts, env->subprog_cnt,
 -		    sizeof(env->subprog_starts[0]), cmp_subprogs);
 +	p = bsearch(&off, env->subprog_info, env->subprog_cnt,
 +		    sizeof(env->subprog_info[0]), cmp_subprogs);
  	if (!p)
  		return -ENOENT;
 -	return p - env->subprog_starts;
 +	return p - env->subprog_info;
  
  }
  
@@@ -773,24 -791,18 +795,24 @@@ static int add_subprog(struct bpf_verif
  		verbose(env, "too many subprograms\n");
  		return -E2BIG;
  	}
 -	env->subprog_starts[env->subprog_cnt++] = off;
 -	sort(env->subprog_starts, env->subprog_cnt,
 -	     sizeof(env->subprog_starts[0]), cmp_subprogs, NULL);
 +	env->subprog_info[env->subprog_cnt++].start = off;
 +	sort(env->subprog_info, env->subprog_cnt,
 +	     sizeof(env->subprog_info[0]), cmp_subprogs, NULL);
  	return 0;
  }
  
  static int check_subprogs(struct bpf_verifier_env *env)
  {
  	int i, ret, subprog_start, subprog_end, off, cur_subprog = 0;
 +	struct bpf_subprog_info *subprog = env->subprog_info;
  	struct bpf_insn *insn = env->prog->insnsi;
  	int insn_cnt = env->prog->len;
  
 +	/* Add entry function. */
 +	ret = add_subprog(env, 0);
 +	if (ret < 0)
 +		return ret;
 +
  	/* determine subprog starts. The end is one before the next starts */
  	for (i = 0; i < insn_cnt; i++) {
  		if (insn[i].code != (BPF_JMP | BPF_CALL))
@@@ -810,18 -822,16 +832,18 @@@
  			return ret;
  	}
  
 +	/* Add a fake 'exit' subprog which could simplify subprog iteration
 +	 * logic. 'subprog_cnt' should not be increased.
 +	 */
 +	subprog[env->subprog_cnt].start = insn_cnt;
 +
  	if (env->log.level > 1)
  		for (i = 0; i < env->subprog_cnt; i++)
 -			verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]);
 +			verbose(env, "func#%d @%d\n", i, subprog[i].start);
  
  	/* now check that all jumps are within the same subprog */
 -	subprog_start = 0;
 -	if (env->subprog_cnt == cur_subprog)
 -		subprog_end = insn_cnt;
 -	else
 -		subprog_end = env->subprog_starts[cur_subprog++];
 +	subprog_start = subprog[cur_subprog].start;
 +	subprog_end = subprog[cur_subprog + 1].start;
  	for (i = 0; i < insn_cnt; i++) {
  		u8 code = insn[i].code;
  
@@@ -846,9 -856,10 +868,9 @@@ next
  				return -EINVAL;
  			}
  			subprog_start = subprog_end;
 -			if (env->subprog_cnt == cur_subprog)
 -				subprog_end = insn_cnt;
 -			else
 -				subprog_end = env->subprog_starts[cur_subprog++];
 +			cur_subprog++;
 +			if (cur_subprog < env->subprog_cnt)
 +				subprog_end = subprog[cur_subprog + 1].start;
  		}
  	}
  	return 0;
@@@ -989,7 -1000,7 +1011,7 @@@ static bool register_is_null(struct bpf
   */
  static int check_stack_write(struct bpf_verifier_env *env,
  			     struct bpf_func_state *state, /* func where register points to */
- 			     int off, int size, int value_regno)
+ 			     int off, int size, int value_regno, int insn_idx)
  {
  	struct bpf_func_state *cur; /* state of the current function */
  	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
@@@ -1028,8 -1039,33 +1050,33 @@@
  		state->stack[spi].spilled_ptr = cur->regs[value_regno];
  		state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
  
- 		for (i = 0; i < BPF_REG_SIZE; i++)
+ 		for (i = 0; i < BPF_REG_SIZE; i++) {
+ 			if (state->stack[spi].slot_type[i] == STACK_MISC &&
+ 			    !env->allow_ptr_leaks) {
+ 				int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
+ 				int soff = (-spi - 1) * BPF_REG_SIZE;
+ 
+ 				/* detected reuse of integer stack slot with a pointer
+ 				 * which means either llvm is reusing stack slot or
+ 				 * an attacker is trying to exploit CVE-2018-3639
+ 				 * (speculative store bypass)
+ 				 * Have to sanitize that slot with preemptive
+ 				 * store of zero.
+ 				 */
+ 				if (*poff && *poff != soff) {
+ 					/* disallow programs where single insn stores
+ 					 * into two different stack slots, since verifier
+ 					 * cannot sanitize them
+ 					 */
+ 					verbose(env,
+ 						"insn %d cannot access two stack slots fp%d and fp%d",
+ 						insn_idx, *poff, soff);
+ 					return -EINVAL;
+ 				}
+ 				*poff = soff;
+ 			}
  			state->stack[spi].slot_type[i] = STACK_SPILL;
+ 		}
  	} else {
  		u8 type = STACK_MISC;
  
@@@ -1262,7 -1298,6 +1309,7 @@@ static bool may_access_direct_pkt_data(
  	switch (env->prog->type) {
  	case BPF_PROG_TYPE_LWT_IN:
  	case BPF_PROG_TYPE_LWT_OUT:
 +	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
  		/* dst_input() and dst_output() can't write for now */
  		if (t == BPF_WRITE)
  			return false;
@@@ -1482,13 -1517,13 +1529,13 @@@ static int update_stack_depth(struct bp
  			      const struct bpf_func_state *func,
  			      int off)
  {
 -	u16 stack = env->subprog_stack_depth[func->subprogno];
 +	u16 stack = env->subprog_info[func->subprogno].stack_depth;
  
  	if (stack >= -off)
  		return 0;
  
  	/* update known max for given subprogram */
 -	env->subprog_stack_depth[func->subprogno] = -off;
 +	env->subprog_info[func->subprogno].stack_depth = -off;
  	return 0;
  }
  
@@@ -1500,9 -1535,9 +1547,9 @@@
   */
  static int check_max_stack_depth(struct bpf_verifier_env *env)
  {
 -	int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
 +	int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
 +	struct bpf_subprog_info *subprog = env->subprog_info;
  	struct bpf_insn *insn = env->prog->insnsi;
 -	int insn_cnt = env->prog->len;
  	int ret_insn[MAX_CALL_FRAMES];
  	int ret_prog[MAX_CALL_FRAMES];
  
@@@ -1510,14 -1545,17 +1557,14 @@@ process_func
  	/* round up to 32-bytes, since this is granularity
  	 * of interpreter stack size
  	 */
 -	depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
 +	depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
  	if (depth > MAX_BPF_STACK) {
  		verbose(env, "combined stack size of %d calls is %d. Too large\n",
  			frame + 1, depth);
  		return -EACCES;
  	}
  continue_func:
 -	if (env->subprog_cnt == subprog)
 -		subprog_end = insn_cnt;
 -	else
 -		subprog_end = env->subprog_starts[subprog];
 +	subprog_end = subprog[idx + 1].start;
  	for (; i < subprog_end; i++) {
  		if (insn[i].code != (BPF_JMP | BPF_CALL))
  			continue;
@@@ -1525,16 -1563,17 +1572,16 @@@
  			continue;
  		/* remember insn and function to return to */
  		ret_insn[frame] = i + 1;
 -		ret_prog[frame] = subprog;
 +		ret_prog[frame] = idx;
  
  		/* find the callee */
  		i = i + insn[i].imm + 1;
 -		subprog = find_subprog(env, i);
 -		if (subprog < 0) {
 +		idx = find_subprog(env, i);
 +		if (idx < 0) {
  			WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
  				  i);
  			return -EFAULT;
  		}
 -		subprog++;
  		frame++;
  		if (frame >= MAX_CALL_FRAMES) {
  			WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
@@@ -1547,10 -1586,10 +1594,10 @@@
  	 */
  	if (frame == 0)
  		return 0;
 -	depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
 +	depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32);
  	frame--;
  	i = ret_insn[frame];
 -	subprog = ret_prog[frame];
 +	idx = ret_prog[frame];
  	goto continue_func;
  }
  
@@@ -1566,7 -1605,8 +1613,7 @@@ static int get_callee_stack_depth(struc
  			  start);
  		return -EFAULT;
  	}
 -	subprog++;
 -	return env->subprog_stack_depth[subprog];
 +	return env->subprog_info[subprog].stack_depth;
  }
  #endif
  
@@@ -1701,7 -1741,7 +1748,7 @@@ static int check_mem_access(struct bpf_
  
  		if (t == BPF_WRITE)
  			err = check_stack_write(env, state, off, size,
- 						value_regno);
+ 						value_regno, insn_idx);
  		else
  			err = check_stack_read(env, state, off, size,
  					       value_regno);
@@@ -1921,7 -1961,7 +1968,7 @@@ static int check_func_arg(struct bpf_ve
  	if (arg_type == ARG_PTR_TO_MAP_KEY ||
  	    arg_type == ARG_PTR_TO_MAP_VALUE) {
  		expected_type = PTR_TO_STACK;
 -		if (!type_is_pkt_pointer(type) &&
 +		if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE &&
  		    type != expected_type)
  			goto err_type;
  	} else if (arg_type == ARG_CONST_SIZE ||
@@@ -1973,9 -2013,14 +2020,9 @@@
  			verbose(env, "invalid map_ptr to access map->key\n");
  			return -EACCES;
  		}
 -		if (type_is_pkt_pointer(type))
 -			err = check_packet_access(env, regno, reg->off,
 -						  meta->map_ptr->key_size,
 -						  false);
 -		else
 -			err = check_stack_boundary(env, regno,
 -						   meta->map_ptr->key_size,
 -						   false, NULL);
 +		err = check_helper_mem_access(env, regno,
 +					      meta->map_ptr->key_size, false,
 +					      NULL);
  	} else if (arg_type == ARG_PTR_TO_MAP_VALUE) {
  		/* bpf_map_xxx(..., map_ptr, ..., value) call:
  		 * check [value, value + map->value_size) validity
@@@ -1985,18 -2030,17 +2032,18 @@@
  			verbose(env, "invalid map_ptr to access map->value\n");
  			return -EACCES;
  		}
 -		if (type_is_pkt_pointer(type))
 -			err = check_packet_access(env, regno, reg->off,
 -						  meta->map_ptr->value_size,
 -						  false);
 -		else
 -			err = check_stack_boundary(env, regno,
 -						   meta->map_ptr->value_size,
 -						   false, NULL);
 +		err = check_helper_mem_access(env, regno,
 +					      meta->map_ptr->value_size, false,
 +					      NULL);
  	} else if (arg_type_is_mem_size(arg_type)) {
  		bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
  
 +		/* remember the mem_size which may be used later
 +		 * to refine return values.
 +		 */
 +		meta->msize_smax_value = reg->smax_value;
 +		meta->msize_umax_value = reg->umax_value;
 +
  		/* The register is SCALAR_VALUE; the access check
  		 * happens using its boundaries.
  		 */
@@@ -2074,11 -2118,8 +2121,11 @@@ static int check_map_func_compatibility
  		if (func_id != BPF_FUNC_redirect_map)
  			goto error;
  		break;
 -	/* Restrict bpf side of cpumap, open when use-cases appear */
 +	/* Restrict bpf side of cpumap and xskmap, open when use-cases
 +	 * appear.
 +	 */
  	case BPF_MAP_TYPE_CPUMAP:
 +	case BPF_MAP_TYPE_XSKMAP:
  		if (func_id != BPF_FUNC_redirect_map)
  			goto error;
  		break;
@@@ -2094,13 -2135,6 +2141,13 @@@
  		    func_id != BPF_FUNC_msg_redirect_map)
  			goto error;
  		break;
 +	case BPF_MAP_TYPE_SOCKHASH:
 +		if (func_id != BPF_FUNC_sk_redirect_hash &&
 +		    func_id != BPF_FUNC_sock_hash_update &&
 +		    func_id != BPF_FUNC_map_delete_elem &&
 +		    func_id != BPF_FUNC_msg_redirect_hash)
 +			goto error;
 +		break;
  	default:
  		break;
  	}
@@@ -2110,7 -2144,7 +2157,7 @@@
  	case BPF_FUNC_tail_call:
  		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
  			goto error;
 -		if (env->subprog_cnt) {
 +		if (env->subprog_cnt > 1) {
  			verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
  			return -EINVAL;
  		}
@@@ -2132,20 -2166,16 +2179,20 @@@
  		break;
  	case BPF_FUNC_redirect_map:
  		if (map->map_type != BPF_MAP_TYPE_DEVMAP &&
 -		    map->map_type != BPF_MAP_TYPE_CPUMAP)
 +		    map->map_type != BPF_MAP_TYPE_CPUMAP &&
 +		    map->map_type != BPF_MAP_TYPE_XSKMAP)
  			goto error;
  		break;
  	case BPF_FUNC_sk_redirect_map:
  	case BPF_FUNC_msg_redirect_map:
 +	case BPF_FUNC_sock_map_update:
  		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
  			goto error;
  		break;
 -	case BPF_FUNC_sock_map_update:
 -		if (map->map_type != BPF_MAP_TYPE_SOCKMAP)
 +	case BPF_FUNC_sk_redirect_hash:
 +	case BPF_FUNC_msg_redirect_hash:
 +	case BPF_FUNC_sock_hash_update:
 +		if (map->map_type != BPF_MAP_TYPE_SOCKHASH)
  			goto error;
  		break;
  	default:
@@@ -2286,7 -2316,7 +2333,7 @@@ static int check_func_call(struct bpf_v
  			/* remember the callsite, it will be used by bpf_exit */
  			*insn_idx /* callsite */,
  			state->curframe + 1 /* frameno within this callchain */,
 -			subprog + 1 /* subprog number within this prog */);
 +			subprog /* subprog number within this prog */);
  
  	/* copy r1 - r5 args that callee can access */
  	for (i = BPF_REG_1; i <= BPF_REG_5; i++)
@@@ -2350,23 -2380,29 +2397,46 @@@ static int prepare_func_exit(struct bpf
  	return 0;
  }
  
 +static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
 +				   int func_id,
 +				   struct bpf_call_arg_meta *meta)
 +{
 +	struct bpf_reg_state *ret_reg = &regs[BPF_REG_0];
 +
 +	if (ret_type != RET_INTEGER ||
 +	    (func_id != BPF_FUNC_get_stack &&
 +	     func_id != BPF_FUNC_probe_read_str))
 +		return;
 +
 +	ret_reg->smax_value = meta->msize_smax_value;
 +	ret_reg->umax_value = meta->msize_umax_value;
 +	__reg_deduce_bounds(ret_reg);
 +	__reg_bound_offset(ret_reg);
 +}
 +
+ static int
+ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
+ 		int func_id, int insn_idx)
+ {
+ 	struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
+ 
+ 	if (func_id != BPF_FUNC_tail_call &&
+ 	    func_id != BPF_FUNC_map_lookup_elem)
+ 		return 0;
+ 	if (meta->map_ptr == NULL) {
+ 		verbose(env, "kernel subsystem misconfigured verifier\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (!BPF_MAP_PTR(aux->map_state))
+ 		bpf_map_ptr_store(aux, meta->map_ptr,
+ 				  meta->map_ptr->unpriv_array);
+ 	else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr)
+ 		bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON,
+ 				  meta->map_ptr->unpriv_array);
+ 	return 0;
+ }
+ 
  static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
  {
  	const struct bpf_func_proto *fn = NULL;
@@@ -2421,13 -2457,6 +2491,6 @@@
  	err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
  	if (err)
  		return err;
- 	if (func_id == BPF_FUNC_tail_call) {
- 		if (meta.map_ptr == NULL) {
- 			verbose(env, "verifier bug\n");
- 			return -EINVAL;
- 		}
- 		env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
- 	}
  	err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
  	if (err)
  		return err;
@@@ -2438,6 -2467,10 +2501,10 @@@
  	if (err)
  		return err;
  
+ 	err = record_func_map(env, &meta, func_id, insn_idx);
+ 	if (err)
+ 		return err;
+ 
  	/* Mark slots with STACK_MISC in case of raw mode, stack offset
  	 * is inferred from register state.
  	 */
@@@ -2462,8 -2495,6 +2529,6 @@@
  	} else if (fn->ret_type == RET_VOID) {
  		regs[BPF_REG_0].type = NOT_INIT;
  	} else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) {
- 		struct bpf_insn_aux_data *insn_aux;
- 
  		regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
  		/* There is no offset yet applied, variable or fixed */
  		mark_reg_known_zero(env, regs, BPF_REG_0);
@@@ -2479,41 -2510,16 +2544,36 @@@
  		}
  		regs[BPF_REG_0].map_ptr = meta.map_ptr;
  		regs[BPF_REG_0].id = ++env->id_gen;
- 		insn_aux = &env->insn_aux_data[insn_idx];
- 		if (!insn_aux->map_ptr)
- 			insn_aux->map_ptr = meta.map_ptr;
- 		else if (insn_aux->map_ptr != meta.map_ptr)
- 			insn_aux->map_ptr = BPF_MAP_PTR_POISON;
  	} else {
  		verbose(env, "unknown return type %d of func %s#%d\n",
  			fn->ret_type, func_id_name(func_id), func_id);
  		return -EINVAL;
  	}
  
 +	do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
 +
  	err = check_map_func_compatibility(env, meta.map_ptr, func_id);
  	if (err)
  		return err;
  
 +	if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
 +		const char *err_str;
 +
 +#ifdef CONFIG_PERF_EVENTS
 +		err = get_callchain_buffers(sysctl_perf_event_max_stack);
 +		err_str = "cannot get callchain buffer for func %s#%d\n";
 +#else
 +		err = -ENOTSUPP;
 +		err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n";
 +#endif
 +		if (err) {
 +			verbose(env, err_str, func_id_name(func_id), func_id);
 +			return err;
 +		}
 +
 +		env->prog->has_callchain_buf = true;
 +	}
 +
  	if (changes_data)
  		clear_all_pkt_pointers(env);
  	return 0;
@@@ -2958,7 -2964,10 +3018,7 @@@ static int adjust_scalar_min_max_vals(s
  			dst_reg->umin_value <<= umin_val;
  			dst_reg->umax_value <<= umax_val;
  		}
 -		if (src_known)
 -			dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
 -		else
 -			dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val);
 +		dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val);
  		/* We may learn something more from the var_off */
  		__update_reg_bounds(dst_reg);
  		break;
@@@ -2986,35 -2995,16 +3046,35 @@@
  		 */
  		dst_reg->smin_value = S64_MIN;
  		dst_reg->smax_value = S64_MAX;
 -		if (src_known)
 -			dst_reg->var_off = tnum_rshift(dst_reg->var_off,
 -						       umin_val);
 -		else
 -			dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val);
 +		dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val);
  		dst_reg->umin_value >>= umax_val;
  		dst_reg->umax_value >>= umin_val;
  		/* We may learn something more from the var_off */
  		__update_reg_bounds(dst_reg);
  		break;
 +	case BPF_ARSH:
 +		if (umax_val >= insn_bitness) {
 +			/* Shifts greater than 31 or 63 are undefined.
 +			 * This includes shifts by a negative number.
 +			 */
 +			mark_reg_unknown(env, regs, insn->dst_reg);
 +			break;
 +		}
 +
 +		/* Upon reaching here, src_known is true and
 +		 * umax_val is equal to umin_val.
 +		 */
 +		dst_reg->smin_value >>= umin_val;
 +		dst_reg->smax_value >>= umin_val;
 +		dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
 +
 +		/* blow away the dst_reg umin_value/umax_value and rely on
 +		 * dst_reg var_off to refine the result.
 +		 */
 +		dst_reg->umin_value = 0;
 +		dst_reg->umax_value = U64_MAX;
 +		__update_reg_bounds(dst_reg);
 +		break;
  	default:
  		mark_reg_unknown(env, regs, insn->dst_reg);
  		break;
@@@ -3898,12 -3888,7 +3958,12 @@@ static int check_ld_abs(struct bpf_veri
  		return -EINVAL;
  	}
  
 -	if (env->subprog_cnt) {
 +	if (!env->ops->gen_ld_abs) {
 +		verbose(env, "bpf verifier is misconfigured\n");
 +		return -EINVAL;
 +	}
 +
 +	if (env->subprog_cnt > 1) {
  		/* when program has LD_ABS insn JITs and interpreter assume
  		 * that r1 == ctx == skb which is not the case for callees
  		 * that can have arbitrary arguments. It's problematic
@@@ -4934,15 -4919,15 +4994,15 @@@ process_bpf_exit
  
  	verbose(env, "processed %d insns (limit %d), stack depth ",
  		insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
 -	for (i = 0; i < env->subprog_cnt + 1; i++) {
 -		u32 depth = env->subprog_stack_depth[i];
 +	for (i = 0; i < env->subprog_cnt; i++) {
 +		u32 depth = env->subprog_info[i].stack_depth;
  
  		verbose(env, "%d", depth);
 -		if (i + 1 < env->subprog_cnt + 1)
 +		if (i + 1 < env->subprog_cnt)
  			verbose(env, "+");
  	}
  	verbose(env, "\n");
 -	env->prog->aux->stack_depth = env->subprog_stack_depth[0];
 +	env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
  	return 0;
  }
  
@@@ -5066,7 -5051,7 +5126,7 @@@ static int replace_map_fd_with_map_ptr(
  			/* hold the map. If the program is rejected by verifier,
  			 * the map will be released by release_maps() or it
  			 * will be used by the valid program until it's unloaded
 -			 * and all maps are released in free_bpf_prog_info()
 +			 * and all maps are released in free_used_maps()
  			 */
  			map = bpf_map_inc(map, false);
  			if (IS_ERR(map)) {
@@@ -5148,11 -5133,10 +5208,11 @@@ static void adjust_subprog_starts(struc
  
  	if (len == 1)
  		return;
 -	for (i = 0; i < env->subprog_cnt; i++) {
 -		if (env->subprog_starts[i] < off)
 +	/* NOTE: fake 'exit' subprog should be updated as well. */
 +	for (i = 0; i <= env->subprog_cnt; i++) {
 +		if (env->subprog_info[i].start < off)
  			continue;
 -		env->subprog_starts[i] += len - 1;
 +		env->subprog_info[i].start += len - 1;
  	}
  }
  
@@@ -5226,7 -5210,7 +5286,7 @@@ static int convert_ctx_accesses(struct 
  		}
  	}
  
 -	if (!ops->convert_ctx_access)
 +	if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux))
  		return 0;
  
  	insn = env->prog->insnsi + delta;
@@@ -5245,6 -5229,34 +5305,34 @@@
  		else
  			continue;
  
+ 		if (type == BPF_WRITE &&
+ 		    env->insn_aux_data[i + delta].sanitize_stack_off) {
+ 			struct bpf_insn patch[] = {
+ 				/* Sanitize suspicious stack slot with zero.
+ 				 * There are no memory dependencies for this store,
+ 				 * since it's only using frame pointer and immediate
+ 				 * constant of zero
+ 				 */
+ 				BPF_ST_MEM(BPF_DW, BPF_REG_FP,
+ 					   env->insn_aux_data[i + delta].sanitize_stack_off,
+ 					   0),
+ 				/* the original STX instruction will immediately
+ 				 * overwrite the same stack slot with appropriate value
+ 				 */
+ 				*insn,
+ 			};
+ 
+ 			cnt = ARRAY_SIZE(patch);
+ 			new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
+ 			if (!new_prog)
+ 				return -ENOMEM;
+ 
+ 			delta    += cnt - 1;
+ 			env->prog = new_prog;
+ 			insn      = new_prog->insnsi + i + delta;
+ 			continue;
+ 		}
+ 
  		if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
  			continue;
  
@@@ -5316,7 -5328,7 +5404,7 @@@ static int jit_subprogs(struct bpf_veri
  	void *old_bpf_func;
  	int err = -ENOMEM;
  
 -	if (env->subprog_cnt == 0)
 +	if (env->subprog_cnt <= 1)
  		return 0;
  
  	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@@@ -5332,7 -5344,7 +5420,7 @@@
  		/* temporarily remember subprog id inside insn instead of
  		 * aux_data, since next loop will split up all insns into funcs
  		 */
 -		insn->off = subprog + 1;
 +		insn->off = subprog;
  		/* remember original imm in case JIT fails and fallback
  		 * to interpreter will be needed
  		 */
@@@ -5341,13 -5353,16 +5429,13 @@@
  		insn->imm = 1;
  	}
  
 -	func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
 +	func = kzalloc(sizeof(prog) * env->subprog_cnt, GFP_KERNEL);
  	if (!func)
  		return -ENOMEM;
  
 -	for (i = 0; i <= env->subprog_cnt; i++) {
 +	for (i = 0; i < env->subprog_cnt; i++) {
  		subprog_start = subprog_end;
 -		if (env->subprog_cnt == i)
 -			subprog_end = prog->len;
 -		else
 -			subprog_end = env->subprog_starts[i];
 +		subprog_end = env->subprog_info[i + 1].start;
  
  		len = subprog_end - subprog_start;
  		func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
@@@ -5364,7 -5379,7 +5452,7 @@@
  		 * Long term would need debug info to populate names
  		 */
  		func[i]->aux->name[0] = 'F';
 -		func[i]->aux->stack_depth = env->subprog_stack_depth[i];
 +		func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
  		func[i]->jit_requested = 1;
  		func[i] = bpf_int_jit_compile(func[i]);
  		if (!func[i]->jited) {
@@@ -5377,33 -5392,20 +5465,33 @@@
  	 * now populate all bpf_calls with correct addresses and
  	 * run last pass of JIT
  	 */
 -	for (i = 0; i <= env->subprog_cnt; i++) {
 +	for (i = 0; i < env->subprog_cnt; i++) {
  		insn = func[i]->insnsi;
  		for (j = 0; j < func[i]->len; j++, insn++) {
  			if (insn->code != (BPF_JMP | BPF_CALL) ||
  			    insn->src_reg != BPF_PSEUDO_CALL)
  				continue;
  			subprog = insn->off;
 -			insn->off = 0;
  			insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
  				func[subprog]->bpf_func -
  				__bpf_call_base;
  		}
 +
 +		/* we use the aux data to keep a list of the start addresses
 +		 * of the JITed images for each function in the program
 +		 *
 +		 * for some architectures, such as powerpc64, the imm field
 +		 * might not be large enough to hold the offset of the start
 +		 * address of the callee's JITed image from __bpf_call_base
 +		 *
 +		 * in such cases, we can lookup the start address of a callee
 +		 * by using its subprog id, available from the off field of
 +		 * the call instruction, as an index for this list
 +		 */
 +		func[i]->aux->func = func;
 +		func[i]->aux->func_cnt = env->subprog_cnt;
  	}
 -	for (i = 0; i <= env->subprog_cnt; i++) {
 +	for (i = 0; i < env->subprog_cnt; i++) {
  		old_bpf_func = func[i]->bpf_func;
  		tmp = bpf_int_jit_compile(func[i]);
  		if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
@@@ -5417,7 -5419,7 +5505,7 @@@
  	/* finally lock prog and jit images for all functions and
  	 * populate kallsysm
  	 */
 -	for (i = 0; i <= env->subprog_cnt; i++) {
 +	for (i = 0; i < env->subprog_cnt; i++) {
  		bpf_prog_lock_ro(func[i]);
  		bpf_prog_kallsyms_add(func[i]);
  	}
@@@ -5427,21 -5429,26 +5515,21 @@@
  	 * later look the same as if they were interpreted only.
  	 */
  	for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
 -		unsigned long addr;
 -
  		if (insn->code != (BPF_JMP | BPF_CALL) ||
  		    insn->src_reg != BPF_PSEUDO_CALL)
  			continue;
  		insn->off = env->insn_aux_data[i].call_imm;
  		subprog = find_subprog(env, i + insn->off + 1);
 -		addr  = (unsigned long)func[subprog + 1]->bpf_func;
 -		addr &= PAGE_MASK;
 -		insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
 -			    addr - __bpf_call_base;
 +		insn->imm = subprog;
  	}
  
  	prog->jited = 1;
  	prog->bpf_func = func[0]->bpf_func;
  	prog->aux->func = func;
 -	prog->aux->func_cnt = env->subprog_cnt + 1;
 +	prog->aux->func_cnt = env->subprog_cnt;
  	return 0;
  out_free:
 -	for (i = 0; i <= env->subprog_cnt; i++)
 +	for (i = 0; i < env->subprog_cnt; i++)
  		if (func[i])
  			bpf_jit_free(func[i]);
  	kfree(func);
@@@ -5498,6 -5505,7 +5586,7 @@@ static int fixup_bpf_calls(struct bpf_v
  	struct bpf_insn *insn = prog->insnsi;
  	const struct bpf_func_proto *fn;
  	const int insn_cnt = prog->len;
+ 	struct bpf_insn_aux_data *aux;
  	struct bpf_insn insn_buf[16];
  	struct bpf_prog *new_prog;
  	struct bpf_map *map_ptr;
@@@ -5544,25 -5552,6 +5633,25 @@@
  			continue;
  		}
  
 +		if (BPF_CLASS(insn->code) == BPF_LD &&
 +		    (BPF_MODE(insn->code) == BPF_ABS ||
 +		     BPF_MODE(insn->code) == BPF_IND)) {
 +			cnt = env->ops->gen_ld_abs(insn, insn_buf);
 +			if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
 +				verbose(env, "bpf verifier is misconfigured\n");
 +				return -EINVAL;
 +			}
 +
 +			new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
 +			if (!new_prog)
 +				return -ENOMEM;
 +
 +			delta    += cnt - 1;
 +			env->prog = prog = new_prog;
 +			insn      = new_prog->insnsi + i + delta;
 +			continue;
 +		}
 +
  		if (insn->code != (BPF_JMP | BPF_CALL))
  			continue;
  		if (insn->src_reg == BPF_PSEUDO_CALL)
@@@ -5591,19 -5580,22 +5680,22 @@@
  			insn->imm = 0;
  			insn->code = BPF_JMP | BPF_TAIL_CALL;
  
+ 			aux = &env->insn_aux_data[i + delta];
+ 			if (!bpf_map_ptr_unpriv(aux))
+ 				continue;
+ 
  			/* instead of changing every JIT dealing with tail_call
  			 * emit two extra insns:
  			 * if (index >= max_entries) goto out;
  			 * index &= array->index_mask;
  			 * to avoid out-of-bounds cpu speculation
  			 */
- 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
- 			if (map_ptr == BPF_MAP_PTR_POISON) {
+ 			if (bpf_map_ptr_poisoned(aux)) {
  				verbose(env, "tail_call abusing map_ptr\n");
  				return -EINVAL;
  			}
- 			if (!map_ptr->unpriv_array)
- 				continue;
+ 
+ 			map_ptr = BPF_MAP_PTR(aux->map_state);
  			insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
  						  map_ptr->max_entries, 2);
  			insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
@@@ -5627,9 -5619,12 +5719,12 @@@
  		 */
  		if (prog->jit_requested && BITS_PER_LONG == 64 &&
  		    insn->imm == BPF_FUNC_map_lookup_elem) {
- 			map_ptr = env->insn_aux_data[i + delta].map_ptr;
- 			if (map_ptr == BPF_MAP_PTR_POISON ||
- 			    !map_ptr->ops->map_gen_lookup)
+ 			aux = &env->insn_aux_data[i + delta];
+ 			if (bpf_map_ptr_poisoned(aux))
+ 				goto patch_call_imm;
+ 
+ 			map_ptr = BPF_MAP_PTR(aux->map_state);
+ 			if (!map_ptr->ops->map_gen_lookup)
  				goto patch_call_imm;
  
  			cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
@@@ -5760,16 -5755,16 +5855,16 @@@ int bpf_check(struct bpf_prog **prog, u
  	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
  		env->strict_alignment = true;
  
 +	ret = replace_map_fd_with_map_ptr(env);
 +	if (ret < 0)
 +		goto skip_full_check;
 +
  	if (bpf_prog_is_dev_bound(env->prog->aux)) {
  		ret = bpf_prog_offload_verifier_prep(env);
  		if (ret)
 -			goto err_unlock;
 +			goto skip_full_check;
  	}
  
 -	ret = replace_map_fd_with_map_ptr(env);
 -	if (ret < 0)
 -		goto skip_full_check;
 -
  	env->explored_states = kcalloc(env->prog->len,
  				       sizeof(struct bpf_verifier_state_list *),
  				       GFP_USER);
@@@ -5840,7 -5835,7 +5935,7 @@@ skip_full_check
  err_release_maps:
  	if (!env->prog->aux->used_maps)
  		/* if we didn't copy map pointers into bpf_prog_info, release
 -		 * them now. Otherwise free_bpf_prog_info() will release them.
 +		 * them now. Otherwise free_used_maps() will release them.
  		 */
  		release_maps(env);
  	*prog = env->prog;
diff --combined net/batman-adv/multicast.c
index 36fd7b06c7cc,a35f597e8c8b..86725d792e15
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@@ -815,6 -815,9 +815,6 @@@ static int batadv_mcast_forw_mode_check
  	if (!atomic_read(&bat_priv->multicast_mode))
  		return -EINVAL;
  
 -	if (atomic_read(&bat_priv->mcast.num_disabled))
 -		return -EINVAL;
 -
  	switch (ntohs(ethhdr->h_proto)) {
  	case ETH_P_IP:
  		return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
@@@ -1180,23 -1183,33 +1180,23 @@@ static void batadv_mcast_tvlv_ogm_handl
  {
  	bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
  	u8 mcast_flags = BATADV_NO_FLAGS;
 -	bool orig_initialized;
  
  	if (orig_mcast_enabled && tvlv_value &&
  	    tvlv_value_len >= sizeof(mcast_flags))
  		mcast_flags = *(u8 *)tvlv_value;
  
 +	if (!orig_mcast_enabled) {
 +		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
 +		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
 +	}
 +
  	spin_lock_bh(&orig->mcast_handler_lock);
 -	orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
 -				    &orig->capa_initialized);
  
 -	/* If mcast support is turned on decrease the disabled mcast node
 -	 * counter only if we had increased it for this node before. If this
 -	 * is a completely new orig_node no need to decrease the counter.
 -	 */
  	if (orig_mcast_enabled &&
  	    !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
 -		if (orig_initialized)
 -			atomic_dec(&bat_priv->mcast.num_disabled);
  		set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
 -	/* If mcast support is being switched off or if this is an initial
 -	 * OGM without mcast support then increase the disabled mcast
 -	 * node counter.
 -	 */
  	} else if (!orig_mcast_enabled &&
 -		   (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
 -		    !orig_initialized)) {
 -		atomic_inc(&bat_priv->mcast.num_disabled);
 +		   test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
  		clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
  	}
  
@@@ -1523,7 -1536,7 +1523,7 @@@ out
  
  	if (!ret && primary_if)
  		*primary_if = hard_iface;
- 	else
+ 	else if (hard_iface)
  		batadv_hardif_put(hard_iface);
  
  	return ret;
@@@ -1582,6 -1595,10 +1582,6 @@@ void batadv_mcast_purge_orig(struct bat
  
  	spin_lock_bh(&orig->mcast_handler_lock);
  
 -	if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
 -	    test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
 -		atomic_dec(&bat_priv->mcast.num_disabled);
 -
  	batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
  	batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
  	batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
diff --combined net/ipv4/fib_frontend.c
index 045c43a27c12,e66172aaf241..b69e2824c761
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@@ -354,6 -354,8 +354,6 @@@ static int __fib_validate_source(struc
  		fl4.fl4_dport = 0;
  	}
  
 -	trace_fib_validate_source(dev, &fl4);
 -
  	if (fib_lookup(net, &fl4, &res, 0))
  		goto last_resort;
  	if (res.type != RTN_UNICAST &&
@@@ -647,9 -649,7 +647,10 @@@ const struct nla_policy rtm_ipv4_policy
  	[RTA_ENCAP]		= { .type = NLA_NESTED },
  	[RTA_UID]		= { .type = NLA_U32 },
  	[RTA_MARK]		= { .type = NLA_U32 },
+ 	[RTA_TABLE]		= { .type = NLA_U32 },
 +	[RTA_IP_PROTO]		= { .type = NLA_U8 },
 +	[RTA_SPORT]		= { .type = NLA_U16 },
 +	[RTA_DPORT]		= { .type = NLA_U16 },
  };
  
  static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
diff --combined net/ipv4/ip_sockglue.c
index e0791faacb24,57bbb060faaf..fc32fdbeefa6
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@@ -47,8 -47,6 +47,8 @@@
  #include <linux/errqueue.h>
  #include <linux/uaccess.h>
  
 +#include <linux/bpfilter.h>
 +
  /*
   *	SOL_IP control messages.
   */
@@@ -507,8 -505,6 +507,6 @@@ int ip_recv_error(struct sock *sk, stru
  	int err;
  	int copied;
  
- 	WARN_ON_ONCE(sk->sk_family == AF_INET6);
- 
  	err = -EAGAIN;
  	skb = sock_dequeue_err_skb(sk);
  	if (!skb)
@@@ -1246,11 -1242,6 +1244,11 @@@ int ip_setsockopt(struct sock *sk, int 
  		return -ENOPROTOOPT;
  
  	err = do_ip_setsockopt(sk, level, optname, optval, optlen);
 +#ifdef CONFIG_BPFILTER
 +	if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
 +	    optname < BPFILTER_IPT_SET_MAX)
 +		err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
 +#endif
  #ifdef CONFIG_NETFILTER
  	/* we need to exclude all possible ENOPROTOOPTs except default case */
  	if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
@@@ -1559,11 -1550,6 +1557,11 @@@ int ip_getsockopt(struct sock *sk, int 
  	int err;
  
  	err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
 +#ifdef CONFIG_BPFILTER
 +	if (optname >= BPFILTER_IPT_SO_GET_INFO &&
 +	    optname < BPFILTER_IPT_GET_MAX)
 +		err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
 +#endif
  #ifdef CONFIG_NETFILTER
  	/* we need to exclude all possible ENOPROTOOPTs except default case */
  	if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
@@@ -1596,11 -1582,6 +1594,11 @@@ int compat_ip_getsockopt(struct sock *s
  	err = do_ip_getsockopt(sk, level, optname, optval, optlen,
  		MSG_CMSG_COMPAT);
  
 +#ifdef CONFIG_BPFILTER
 +	if (optname >= BPFILTER_IPT_SO_GET_INFO &&
 +	    optname < BPFILTER_IPT_GET_MAX)
 +		err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
 +#endif
  #ifdef CONFIG_NETFILTER
  	/* we need to exclude all possible ENOPROTOOPTs except default case */
  	if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
diff --combined net/packet/af_packet.c
index 2cc98c763003,acb7b86574cd..b00aa959727d
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@@ -209,7 -209,7 +209,7 @@@ static void prb_clear_rxhash(struct tpa
  static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
  		struct tpacket3_hdr *);
  static void packet_flush_mclist(struct sock *sk);
 -static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb);
 +static u16 packet_pick_tx_queue(struct sk_buff *skb);
  
  struct packet_skb_cb {
  	union {
@@@ -243,7 -243,40 +243,7 @@@ static void __fanout_link(struct sock *
  
  static int packet_direct_xmit(struct sk_buff *skb)
  {
 -	struct net_device *dev = skb->dev;
 -	struct sk_buff *orig_skb = skb;
 -	struct netdev_queue *txq;
 -	int ret = NETDEV_TX_BUSY;
 -	bool again = false;
 -
 -	if (unlikely(!netif_running(dev) ||
 -		     !netif_carrier_ok(dev)))
 -		goto drop;
 -
 -	skb = validate_xmit_skb_list(skb, dev, &again);
 -	if (skb != orig_skb)
 -		goto drop;
 -
 -	packet_pick_tx_queue(dev, skb);
 -	txq = skb_get_tx_queue(dev, skb);
 -
 -	local_bh_disable();
 -
 -	HARD_TX_LOCK(dev, txq, smp_processor_id());
 -	if (!netif_xmit_frozen_or_drv_stopped(txq))
 -		ret = netdev_start_xmit(skb, dev, txq, false);
 -	HARD_TX_UNLOCK(dev, txq);
 -
 -	local_bh_enable();
 -
 -	if (!dev_xmit_complete(ret))
 -		kfree_skb(skb);
 -
 -	return ret;
 -drop:
 -	atomic_long_inc(&dev->tx_dropped);
 -	kfree_skb_list(skb);
 -	return NET_XMIT_DROP;
 +	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
  }
  
  static struct net_device *packet_cached_dev_get(struct packet_sock *po)
@@@ -280,9 -313,8 +280,9 @@@ static u16 __packet_pick_tx_queue(struc
  	return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
  }
  
 -static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
 +static u16 packet_pick_tx_queue(struct sk_buff *skb)
  {
 +	struct net_device *dev = skb->dev;
  	const struct net_device_ops *ops = dev->netdev_ops;
  	u16 queue_index;
  
@@@ -294,7 -326,7 +294,7 @@@
  		queue_index = __packet_pick_tx_queue(dev, skb);
  	}
  
 -	skb_set_queue_mapping(skb, queue_index);
 +	return queue_index;
  }
  
  /* __register_prot_hook must be invoked through register_prot_hook
@@@ -2879,7 -2911,7 +2879,7 @@@ static int packet_snd(struct socket *so
  		if (unlikely(offset < 0))
  			goto out_free;
  	} else if (reserve) {
- 		skb_push(skb, reserve);
+ 		skb_reserve(skb, -reserve);
  	}
  
  	/* Returns -EFAULT on error */
diff --combined net/sched/cls_api.c
index a4a5ace834c3,a57e112d9b3e..76303c45db19
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@@ -103,10 -103,9 +103,10 @@@ int unregister_tcf_proto_ops(struct tcf
  }
  EXPORT_SYMBOL(unregister_tcf_proto_ops);
  
 -bool tcf_queue_work(struct work_struct *work)
 +bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
  {
 -	return queue_work(tc_filter_wq, work);
 +	INIT_RCU_WORK(rwork, func);
 +	return queue_rcu_work(tc_filter_wq, rwork);
  }
  EXPORT_SYMBOL(tcf_queue_work);
  
@@@ -1589,7 -1588,7 +1589,7 @@@ int tc_setup_cb_call(struct tcf_block *
  		return ret;
  	ok_count = ret;
  
- 	if (!exts)
+ 	if (!exts || ok_count)
  		return ok_count;
  	ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
  	if (ret < 0)
diff --combined net/sctp/socket.c
index 1b4593b842b0,ae7e7c606f72..ce620e878538
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@@ -644,15 -644,16 +644,15 @@@ static int sctp_send_asconf_add_ip(stru
  
  			list_for_each_entry(trans,
  			    &asoc->peer.transport_addr_list, transports) {
 -				/* Clear the source and route cache */
 -				sctp_transport_dst_release(trans);
  				trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
  				    2*asoc->pathmtu, 4380));
  				trans->ssthresh = asoc->peer.i.a_rwnd;
  				trans->rto = asoc->rto_initial;
  				sctp_max_rto(asoc, trans);
  				trans->rtt = trans->srtt = trans->rttvar = 0;
 +				/* Clear the source and route cache */
  				sctp_transport_route(trans, NULL,
 -				    sctp_sk(asoc->base.sk));
 +						     sctp_sk(asoc->base.sk));
  			}
  		}
  		retval = sctp_send_asconf(asoc, chunk);
@@@ -895,6 -896,7 +895,6 @@@ skip_mkasconf
  		 */
  		list_for_each_entry(transport, &asoc->peer.transport_addr_list,
  					transports) {
 -			sctp_transport_dst_release(transport);
  			sctp_transport_route(transport, NULL,
  					     sctp_sk(asoc->base.sk));
  		}
@@@ -1084,7 -1086,7 +1084,7 @@@ out
   */
  static int __sctp_connect(struct sock *sk,
  			  struct sockaddr *kaddrs,
- 			  int addrs_size,
+ 			  int addrs_size, int flags,
  			  sctp_assoc_t *assoc_id)
  {
  	struct net *net = sock_net(sk);
@@@ -1102,7 -1104,6 +1102,6 @@@
  	union sctp_addr *sa_addr = NULL;
  	void *addr_buf;
  	unsigned short port;
- 	unsigned int f_flags = 0;
  
  	sp = sctp_sk(sk);
  	ep = sp->ep;
@@@ -1252,13 -1253,7 +1251,7 @@@
  	sp->pf->to_sk_daddr(sa_addr, sk);
  	sk->sk_err = 0;
  
- 	/* in-kernel sockets don't generally have a file allocated to them
- 	 * if all they do is call sock_create_kern().
- 	 */
- 	if (sk->sk_socket->file)
- 		f_flags = sk->sk_socket->file->f_flags;
- 
- 	timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
+ 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
  
  	if (assoc_id)
  		*assoc_id = asoc->assoc_id;
@@@ -1346,7 -1341,7 +1339,7 @@@ static int __sctp_setsockopt_connectx(s
  				      sctp_assoc_t *assoc_id)
  {
  	struct sockaddr *kaddrs;
- 	int err = 0;
+ 	int err = 0, flags = 0;
  
  	pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
  		 __func__, sk, addrs, addrs_size);
@@@ -1365,7 -1360,13 +1358,13 @@@
  	if (err)
  		goto out_free;
  
- 	err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
+ 	/* in-kernel sockets don't generally have a file allocated to them
+ 	 * if all they do is call sock_create_kern().
+ 	 */
+ 	if (sk->sk_socket->file)
+ 		flags = sk->sk_socket->file->f_flags;
+ 
+ 	err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
  
  out_free:
  	kvfree(kaddrs);
@@@ -1893,7 -1894,6 +1892,7 @@@ static int sctp_sendmsg_to_asoc(struct 
  				struct sctp_sndrcvinfo *sinfo)
  {
  	struct sock *sk = asoc->base.sk;
 +	struct sctp_sock *sp = sctp_sk(sk);
  	struct net *net = sock_net(sk);
  	struct sctp_datamsg *datamsg;
  	bool wait_connect = false;
@@@ -1912,16 -1912,13 +1911,16 @@@
  			goto err;
  	}
  
 -	if (sctp_sk(sk)->disable_fragments && msg_len > asoc->frag_point) {
 +	if (sp->disable_fragments && msg_len > asoc->frag_point) {
  		err = -EMSGSIZE;
  		goto err;
  	}
  
 -	if (asoc->pmtu_pending)
 -		sctp_assoc_pending_pmtu(asoc);
 +	if (asoc->pmtu_pending) {
 +		if (sp->param_flags & SPP_PMTUD_ENABLE)
 +			sctp_assoc_sync_pmtu(asoc);
 +		asoc->pmtu_pending = 0;
 +	}
  
  	if (sctp_wspace(asoc) < msg_len)
  		sctp_prsctp_prune(asoc, sinfo, msg_len - sctp_wspace(asoc));
@@@ -1938,7 -1935,7 +1937,7 @@@
  		if (err)
  			goto err;
  
 -		if (sctp_sk(sk)->strm_interleave) {
 +		if (sp->strm_interleave) {
  			timeo = sock_sndtimeo(sk, 0);
  			err = sctp_wait_for_connect(asoc, &timeo);
  			if (err)
@@@ -2541,7 -2538,7 +2540,7 @@@ static int sctp_apply_peer_addr_params(
  			trans->pathmtu = params->spp_pathmtu;
  			sctp_assoc_sync_pmtu(asoc);
  		} else if (asoc) {
 -			asoc->pathmtu = params->spp_pathmtu;
 +			sctp_assoc_set_pmtu(asoc, params->spp_pathmtu);
  		} else {
  			sp->pathmtu = params->spp_pathmtu;
  		}
@@@ -3211,6 -3208,7 +3210,6 @@@ static int sctp_setsockopt_mappedv4(str
  static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
  {
  	struct sctp_sock *sp = sctp_sk(sk);
 -	struct sctp_af *af = sp->pf->af;
  	struct sctp_assoc_value params;
  	struct sctp_association *asoc;
  	int val;
@@@ -3232,24 -3230,30 +3231,24 @@@
  		return -EINVAL;
  	}
  
 +	asoc = sctp_id2assoc(sk, params.assoc_id);
 +
  	if (val) {
  		int min_len, max_len;
 +		__u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
 +				 sizeof(struct sctp_data_chunk);
  
 -		min_len = SCTP_DEFAULT_MINSEGMENT - af->net_header_len;
 -		min_len -= af->ip_options_len(sk);
 -		min_len -= sizeof(struct sctphdr) +
 -			   sizeof(struct sctp_data_chunk);
 -
 -		max_len = SCTP_MAX_CHUNK_LEN - sizeof(struct sctp_data_chunk);
 +		min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT,
 +					   datasize);
 +		max_len = SCTP_MAX_CHUNK_LEN - datasize;
  
  		if (val < min_len || val > max_len)
  			return -EINVAL;
  	}
  
 -	asoc = sctp_id2assoc(sk, params.assoc_id);
  	if (asoc) {
 -		if (val == 0) {
 -			val = asoc->pathmtu - af->net_header_len;
 -			val -= af->ip_options_len(sk);
 -			val -= sizeof(struct sctphdr) +
 -			       sctp_datachk_len(&asoc->stream);
 -		}
  		asoc->user_frag = val;
 -		asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
 +		sctp_assoc_update_frag_point(asoc);
  	} else {
  		if (params.assoc_id && sctp_style(sk, UDP))
  			return -EINVAL;
@@@ -4392,16 -4396,26 +4391,26 @@@ out_nounlock
   * len: the size of the address.
   */
  static int sctp_connect(struct sock *sk, struct sockaddr *addr,
- 			int addr_len)
+ 			int addr_len, int flags)
  {
- 	int err = 0;
+ 	struct inet_sock *inet = inet_sk(sk);
  	struct sctp_af *af;
+ 	int err = 0;
  
  	lock_sock(sk);
  
  	pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
  		 addr, addr_len);
  
+ 	/* We may need to bind the socket. */
+ 	if (!inet->inet_num) {
+ 		if (sk->sk_prot->get_port(sk, 0)) {
+ 			release_sock(sk);
+ 			return -EAGAIN;
+ 		}
+ 		inet->inet_sport = htons(inet->inet_num);
+ 	}
+ 
  	/* Validate addr_len before calling common connect/connectx routine. */
  	af = sctp_get_af_specific(addr->sa_family);
  	if (!af || addr_len < af->sockaddr_len) {
@@@ -4410,13 -4424,25 +4419,25 @@@
  		/* Pass correct addr len to common routine (so it knows there
  		 * is only one address being passed.
  		 */
- 		err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
+ 		err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
  	}
  
  	release_sock(sk);
  	return err;
  }
  
+ int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
+ 		      int addr_len, int flags)
+ {
+ 	if (addr_len < sizeof(uaddr->sa_family))
+ 		return -EINVAL;
+ 
+ 	if (uaddr->sa_family == AF_UNSPEC)
+ 		return -EOPNOTSUPP;
+ 
+ 	return sctp_connect(sock->sk, uaddr, addr_len, flags);
+ }
+ 
  /* FIXME: Write comments. */
  static int sctp_disconnect(struct sock *sk, int flags)
  {
@@@ -8719,7 -8745,6 +8740,6 @@@ struct proto sctp_prot = 
  	.name        =	"SCTP",
  	.owner       =	THIS_MODULE,
  	.close       =	sctp_close,
- 	.connect     =	sctp_connect,
  	.disconnect  =	sctp_disconnect,
  	.accept      =	sctp_accept,
  	.ioctl       =	sctp_ioctl,
@@@ -8762,7 -8787,6 +8782,6 @@@ struct proto sctpv6_prot = 
  	.name		= "SCTPv6",
  	.owner		= THIS_MODULE,
  	.close		= sctp_close,
- 	.connect	= sctp_connect,
  	.disconnect	= sctp_disconnect,
  	.accept		= sctp_accept,
  	.ioctl		= sctp_ioctl,
diff --combined net/wireless/nl80211.c
index bc40a783cb27,7c5135a92d76..07514ca011b2
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@@ -4,7 -4,6 +4,7 @@@
   * Copyright 2006-2010	Johannes Berg <johannes at sipsolutions.net>
   * Copyright 2013-2014  Intel Mobile Communications GmbH
   * Copyright 2015-2017	Intel Deutschland GmbH
 + * Copyright (C) 2018 Intel Corporation
   */
  
  #include <linux/if.h>
@@@ -424,10 -423,6 +424,10 @@@ static const struct nla_policy nl80211_
  	[NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
  	[NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG },
  	[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG },
 +
 +	[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
 +	[NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
 +	[NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
  };
  
  /* policy for the key attributes */
@@@ -650,43 -645,7 +650,43 @@@ static inline void *nl80211hdr_put(stru
  	return genlmsg_put(skb, portid, seq, &nl80211_fam, flags, cmd);
  }
  
 -static int nl80211_msg_put_channel(struct sk_buff *msg,
 +static int nl80211_msg_put_wmm_rules(struct sk_buff *msg,
 +				     const struct ieee80211_reg_rule *rule)
 +{
 +	int j;
 +	struct nlattr *nl_wmm_rules =
 +		nla_nest_start(msg, NL80211_FREQUENCY_ATTR_WMM);
 +
 +	if (!nl_wmm_rules)
 +		goto nla_put_failure;
 +
 +	for (j = 0; j < IEEE80211_NUM_ACS; j++) {
 +		struct nlattr *nl_wmm_rule = nla_nest_start(msg, j);
 +
 +		if (!nl_wmm_rule)
 +			goto nla_put_failure;
 +
 +		if (nla_put_u16(msg, NL80211_WMMR_CW_MIN,
 +				rule->wmm_rule->client[j].cw_min) ||
 +		    nla_put_u16(msg, NL80211_WMMR_CW_MAX,
 +				rule->wmm_rule->client[j].cw_max) ||
 +		    nla_put_u8(msg, NL80211_WMMR_AIFSN,
 +			       rule->wmm_rule->client[j].aifsn) ||
 +		    nla_put_u8(msg, NL80211_WMMR_TXOP,
 +			       rule->wmm_rule->client[j].cot))
 +			goto nla_put_failure;
 +
 +		nla_nest_end(msg, nl_wmm_rule);
 +	}
 +	nla_nest_end(msg, nl_wmm_rules);
 +
 +	return 0;
 +
 +nla_put_failure:
 +	return -ENOBUFS;
 +}
 +
 +static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
  				   struct ieee80211_channel *chan,
  				   bool large)
  {
@@@ -762,55 -721,12 +762,55 @@@
  			DBM_TO_MBM(chan->max_power)))
  		goto nla_put_failure;
  
 +	if (large) {
 +		const struct ieee80211_reg_rule *rule =
 +			freq_reg_info(wiphy, chan->center_freq);
 +
 +		if (!IS_ERR(rule) && rule->wmm_rule) {
 +			if (nl80211_msg_put_wmm_rules(msg, rule))
 +				goto nla_put_failure;
 +		}
 +	}
 +
  	return 0;
  
   nla_put_failure:
  	return -ENOBUFS;
  }
  
 +static bool nl80211_put_txq_stats(struct sk_buff *msg,
 +				  struct cfg80211_txq_stats *txqstats,
 +				  int attrtype)
 +{
 +	struct nlattr *txqattr;
 +
 +#define PUT_TXQVAL_U32(attr, memb) do {					  \
 +	if (txqstats->filled & BIT(NL80211_TXQ_STATS_ ## attr) &&	  \
 +	    nla_put_u32(msg, NL80211_TXQ_STATS_ ## attr, txqstats->memb)) \
 +		return false;						  \
 +	} while (0)
 +
 +	txqattr = nla_nest_start(msg, attrtype);
 +	if (!txqattr)
 +		return false;
 +
 +	PUT_TXQVAL_U32(BACKLOG_BYTES, backlog_bytes);
 +	PUT_TXQVAL_U32(BACKLOG_PACKETS, backlog_packets);
 +	PUT_TXQVAL_U32(FLOWS, flows);
 +	PUT_TXQVAL_U32(DROPS, drops);
 +	PUT_TXQVAL_U32(ECN_MARKS, ecn_marks);
 +	PUT_TXQVAL_U32(OVERLIMIT, overlimit);
 +	PUT_TXQVAL_U32(OVERMEMORY, overmemory);
 +	PUT_TXQVAL_U32(COLLISIONS, collisions);
 +	PUT_TXQVAL_U32(TX_BYTES, tx_bytes);
 +	PUT_TXQVAL_U32(TX_PACKETS, tx_packets);
 +	PUT_TXQVAL_U32(MAX_FLOWS, max_flows);
 +	nla_nest_end(msg, txqattr);
 +
 +#undef PUT_TXQVAL_U32
 +	return true;
 +}
 +
  /* netlink command implementations */
  
  struct key_parse {
@@@ -1715,7 -1631,7 +1715,7 @@@ static int nl80211_send_wiphy(struct cf
  					chan = &sband->channels[i];
  
  					if (nl80211_msg_put_channel(
 -							msg, chan,
 +							msg, &rdev->wiphy, chan,
  							state->split))
  						goto nla_put_failure;
  
@@@ -2010,28 -1926,6 +2010,28 @@@
  				rdev->wiphy.nan_supported_bands))
  			goto nla_put_failure;
  
 +		if (wiphy_ext_feature_isset(&rdev->wiphy,
 +					    NL80211_EXT_FEATURE_TXQS)) {
 +			struct cfg80211_txq_stats txqstats = {};
 +			int res;
 +
 +			res = rdev_get_txq_stats(rdev, NULL, &txqstats);
 +			if (!res &&
 +			    !nl80211_put_txq_stats(msg, &txqstats,
 +						   NL80211_ATTR_TXQ_STATS))
 +				goto nla_put_failure;
 +
 +			if (nla_put_u32(msg, NL80211_ATTR_TXQ_LIMIT,
 +					rdev->wiphy.txq_limit))
 +				goto nla_put_failure;
 +			if (nla_put_u32(msg, NL80211_ATTR_TXQ_MEMORY_LIMIT,
 +					rdev->wiphy.txq_memory_limit))
 +				goto nla_put_failure;
 +			if (nla_put_u32(msg, NL80211_ATTR_TXQ_QUANTUM,
 +					rdev->wiphy.txq_quantum))
 +				goto nla_put_failure;
 +		}
 +
  		/* done */
  		state->split_start = 0;
  		break;
@@@ -2409,7 -2303,6 +2409,7 @@@ static int nl80211_set_wiphy(struct sk_
  	u8 retry_short = 0, retry_long = 0;
  	u32 frag_threshold = 0, rts_threshold = 0;
  	u8 coverage_class = 0;
 +	u32 txq_limit = 0, txq_memory_limit = 0, txq_quantum = 0;
  
  	ASSERT_RTNL();
  
@@@ -2616,38 -2509,10 +2616,38 @@@
  		changed |= WIPHY_PARAM_DYN_ACK;
  	}
  
 +	if (info->attrs[NL80211_ATTR_TXQ_LIMIT]) {
 +		if (!wiphy_ext_feature_isset(&rdev->wiphy,
 +					     NL80211_EXT_FEATURE_TXQS))
 +			return -EOPNOTSUPP;
 +		txq_limit = nla_get_u32(
 +			info->attrs[NL80211_ATTR_TXQ_LIMIT]);
 +		changed |= WIPHY_PARAM_TXQ_LIMIT;
 +	}
 +
 +	if (info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]) {
 +		if (!wiphy_ext_feature_isset(&rdev->wiphy,
 +					     NL80211_EXT_FEATURE_TXQS))
 +			return -EOPNOTSUPP;
 +		txq_memory_limit = nla_get_u32(
 +			info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]);
 +		changed |= WIPHY_PARAM_TXQ_MEMORY_LIMIT;
 +	}
 +
 +	if (info->attrs[NL80211_ATTR_TXQ_QUANTUM]) {
 +		if (!wiphy_ext_feature_isset(&rdev->wiphy,
 +					     NL80211_EXT_FEATURE_TXQS))
 +			return -EOPNOTSUPP;
 +		txq_quantum = nla_get_u32(
 +			info->attrs[NL80211_ATTR_TXQ_QUANTUM]);
 +		changed |= WIPHY_PARAM_TXQ_QUANTUM;
 +	}
 +
  	if (changed) {
  		u8 old_retry_short, old_retry_long;
  		u32 old_frag_threshold, old_rts_threshold;
  		u8 old_coverage_class;
 +		u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum;
  
  		if (!rdev->ops->set_wiphy_params)
  			return -EOPNOTSUPP;
@@@ -2657,9 -2522,6 +2657,9 @@@
  		old_frag_threshold = rdev->wiphy.frag_threshold;
  		old_rts_threshold = rdev->wiphy.rts_threshold;
  		old_coverage_class = rdev->wiphy.coverage_class;
 +		old_txq_limit = rdev->wiphy.txq_limit;
 +		old_txq_memory_limit = rdev->wiphy.txq_memory_limit;
 +		old_txq_quantum = rdev->wiphy.txq_quantum;
  
  		if (changed & WIPHY_PARAM_RETRY_SHORT)
  			rdev->wiphy.retry_short = retry_short;
@@@ -2671,12 -2533,6 +2671,12 @@@
  			rdev->wiphy.rts_threshold = rts_threshold;
  		if (changed & WIPHY_PARAM_COVERAGE_CLASS)
  			rdev->wiphy.coverage_class = coverage_class;
 +		if (changed & WIPHY_PARAM_TXQ_LIMIT)
 +			rdev->wiphy.txq_limit = txq_limit;
 +		if (changed & WIPHY_PARAM_TXQ_MEMORY_LIMIT)
 +			rdev->wiphy.txq_memory_limit = txq_memory_limit;
 +		if (changed & WIPHY_PARAM_TXQ_QUANTUM)
 +			rdev->wiphy.txq_quantum = txq_quantum;
  
  		result = rdev_set_wiphy_params(rdev, changed);
  		if (result) {
@@@ -2685,9 -2541,6 +2685,9 @@@
  			rdev->wiphy.frag_threshold = old_frag_threshold;
  			rdev->wiphy.rts_threshold = old_rts_threshold;
  			rdev->wiphy.coverage_class = old_coverage_class;
 +			rdev->wiphy.txq_limit = old_txq_limit;
 +			rdev->wiphy.txq_memory_limit = old_txq_memory_limit;
 +			rdev->wiphy.txq_quantum = old_txq_quantum;
  			return result;
  		}
  	}
@@@ -2809,16 -2662,6 +2809,16 @@@ static int nl80211_send_iface(struct sk
  	}
  	wdev_unlock(wdev);
  
 +	if (rdev->ops->get_txq_stats) {
 +		struct cfg80211_txq_stats txqstats = {};
 +		int ret = rdev_get_txq_stats(rdev, wdev, &txqstats);
 +
 +		if (ret == 0 &&
 +		    !nl80211_put_txq_stats(msg, &txqstats,
 +					   NL80211_ATTR_TXQ_STATS))
 +			goto nla_put_failure;
 +	}
 +
  	genlmsg_end(msg, hdr);
  	return 0;
  
@@@ -4651,14 -4494,11 +4651,14 @@@ static int nl80211_send_station(struct 
  	PUT_SINFO_U64(BEACON_RX, rx_beacon);
  	PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8);
  	PUT_SINFO(ACK_SIGNAL, ack_signal, u8);
 +	if (wiphy_ext_feature_isset(&rdev->wiphy,
 +				    NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT))
 +		PUT_SINFO(DATA_ACK_SIGNAL_AVG, avg_ack_signal, s8);
  
  #undef PUT_SINFO
  #undef PUT_SINFO_U64
  
 -	if (sinfo->filled & BIT(NL80211_STA_INFO_TID_STATS)) {
 +	if (sinfo->pertid) {
  		struct nlattr *tidsattr;
  		int tid;
  
@@@ -4692,12 -4532,6 +4692,12 @@@
  			PUT_TIDVAL_U64(TX_MSDU_FAILED, tx_msdu_failed);
  
  #undef PUT_TIDVAL_U64
 +			if ((tidstats->filled &
 +			     BIT(NL80211_TID_STATS_TXQ_STATS)) &&
 +			    !nl80211_put_txq_stats(msg, &tidstats->txq_stats,
 +						   NL80211_TID_STATS_TXQ_STATS))
 +				goto nla_put_failure;
 +
  			nla_nest_end(msg, tidattr);
  		}
  
@@@ -4711,12 -4545,10 +4711,12 @@@
  		    sinfo->assoc_req_ies))
  		goto nla_put_failure;
  
 +	cfg80211_sinfo_release_content(sinfo);
  	genlmsg_end(msg, hdr);
  	return 0;
  
   nla_put_failure:
 +	cfg80211_sinfo_release_content(sinfo);
  	genlmsg_cancel(msg, hdr);
  	return -EMSGSIZE;
  }
@@@ -4798,10 -4630,8 +4798,10 @@@ static int nl80211_get_station(struct s
  		return err;
  
  	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 -	if (!msg)
 +	if (!msg) {
 +		cfg80211_sinfo_release_content(&sinfo);
  		return -ENOMEM;
 +	}
  
  	if (nl80211_send_station(msg, NL80211_CMD_NEW_STATION,
  				 info->snd_portid, info->snd_seq, 0,
@@@ -8100,15 -7930,7 +8100,15 @@@ static int nl80211_dump_scan(struct sk_
  
  	wdev_lock(wdev);
  	spin_lock_bh(&rdev->bss_lock);
 -	cfg80211_bss_expire(rdev);
 +
 +	/*
 +	 * dump_scan will be called multiple times to break up the scan results
 +	 * into multiple messages.  It is unlikely that any more bss-es will be
 +	 * expired after the first call, so only call only call this on the
 +	 * first dump_scan invocation.
 +	 */
 +	if (start == 0)
 +		cfg80211_bss_expire(rdev);
  
  	cb->seq = rdev->bss_generation;
  
@@@ -8514,10 -8336,6 +8514,10 @@@ static int nl80211_associate(struct sk_
  	const u8 *bssid, *ssid;
  	int err, ssid_len = 0;
  
 +	if (dev->ieee80211_ptr->conn_owner_nlportid &&
 +	    dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
 +		return -EPERM;
 +
  	if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
  		return -EINVAL;
  
@@@ -8640,10 -8458,6 +8640,10 @@@ static int nl80211_deauthenticate(struc
  	u16 reason_code;
  	bool local_state_change;
  
 +	if (dev->ieee80211_ptr->conn_owner_nlportid &&
 +	    dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
 +		return -EPERM;
 +
  	if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
  		return -EINVAL;
  
@@@ -8691,10 -8505,6 +8691,10 @@@ static int nl80211_disassociate(struct 
  	u16 reason_code;
  	bool local_state_change;
  
 +	if (dev->ieee80211_ptr->conn_owner_nlportid &&
 +	    dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
 +		return -EPERM;
 +
  	if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
  		return -EINVAL;
  
@@@ -9441,8 -9251,6 +9441,8 @@@ static int nl80211_update_connect_param
  	struct cfg80211_registered_device *rdev = info->user_ptr[0];
  	struct net_device *dev = info->user_ptr[1];
  	struct wireless_dev *wdev = dev->ieee80211_ptr;
 +	bool fils_sk_offload;
 +	u32 auth_type;
  	u32 changed = 0;
  	int ret;
  
@@@ -9457,56 -9265,6 +9457,56 @@@
  		changed |= UPDATE_ASSOC_IES;
  	}
  
 +	fils_sk_offload = wiphy_ext_feature_isset(&rdev->wiphy,
 +						  NL80211_EXT_FEATURE_FILS_SK_OFFLOAD);
 +
 +	/*
 +	 * when driver supports fils-sk offload all attributes must be
 +	 * provided. So the else covers "fils-sk-not-all" and
 +	 * "no-fils-sk-any".
 +	 */
 +	if (fils_sk_offload &&
 +	    info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] &&
 +	    info->attrs[NL80211_ATTR_FILS_ERP_REALM] &&
 +	    info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] &&
 +	    info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
 +		connect.fils_erp_username =
 +			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
 +		connect.fils_erp_username_len =
 +			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_USERNAME]);
 +		connect.fils_erp_realm =
 +			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
 +		connect.fils_erp_realm_len =
 +			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_REALM]);
 +		connect.fils_erp_next_seq_num =
 +			nla_get_u16(
 +			   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM]);
 +		connect.fils_erp_rrk =
 +			nla_data(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
 +		connect.fils_erp_rrk_len =
 +			nla_len(info->attrs[NL80211_ATTR_FILS_ERP_RRK]);
 +		changed |= UPDATE_FILS_ERP_INFO;
 +	} else if (info->attrs[NL80211_ATTR_FILS_ERP_USERNAME] ||
 +		   info->attrs[NL80211_ATTR_FILS_ERP_REALM] ||
 +		   info->attrs[NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM] ||
 +		   info->attrs[NL80211_ATTR_FILS_ERP_RRK]) {
 +		return -EINVAL;
 +	}
 +
 +	if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
 +		auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
 +		if (!nl80211_valid_auth_type(rdev, auth_type,
 +					     NL80211_CMD_CONNECT))
 +			return -EINVAL;
 +
 +		if (auth_type == NL80211_AUTHTYPE_FILS_SK &&
 +		    fils_sk_offload && !(changed & UPDATE_FILS_ERP_INFO))
 +			return -EINVAL;
 +
 +		connect.auth_type = auth_type;
 +		changed |= UPDATE_AUTH_TYPE;
 +	}
 +
  	wdev_lock(dev->ieee80211_ptr);
  	if (!wdev->current_bss)
  		ret = -ENOLINK;
@@@ -9524,10 -9282,6 +9524,10 @@@ static int nl80211_disconnect(struct sk
  	u16 reason;
  	int ret;
  
 +	if (dev->ieee80211_ptr->conn_owner_nlportid &&
 +	    dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
 +		return -EPERM;
 +
  	if (!info->attrs[NL80211_ATTR_REASON_CODE])
  		reason = WLAN_REASON_DEAUTH_LEAVING;
  	else
@@@ -14274,8 -14028,8 +14274,8 @@@ void nl80211_send_connect_result(struc
  	void *hdr;
  
  	msg = nlmsg_new(100 + cr->req_ie_len + cr->resp_ie_len +
 -			cr->fils_kek_len + cr->pmk_len +
 -			(cr->pmkid ? WLAN_PMKID_LEN : 0), gfp);
 +			cr->fils.kek_len + cr->fils.pmk_len +
 +			(cr->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
  	if (!msg)
  		return;
  
@@@ -14301,17 -14055,17 +14301,17 @@@
  	    (cr->resp_ie &&
  	     nla_put(msg, NL80211_ATTR_RESP_IE, cr->resp_ie_len,
  		     cr->resp_ie)) ||
 -	    (cr->update_erp_next_seq_num &&
 +	    (cr->fils.update_erp_next_seq_num &&
  	     nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
 -			 cr->fils_erp_next_seq_num)) ||
 +			 cr->fils.erp_next_seq_num)) ||
  	    (cr->status == WLAN_STATUS_SUCCESS &&
 -	     ((cr->fils_kek &&
 -	       nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils_kek_len,
 -		       cr->fils_kek)) ||
 -	      (cr->pmk &&
 -	       nla_put(msg, NL80211_ATTR_PMK, cr->pmk_len, cr->pmk)) ||
 -	      (cr->pmkid &&
 -	       nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->pmkid)))))
 +	     ((cr->fils.kek &&
 +	       nla_put(msg, NL80211_ATTR_FILS_KEK, cr->fils.kek_len,
 +		       cr->fils.kek)) ||
 +	      (cr->fils.pmk &&
 +	       nla_put(msg, NL80211_ATTR_PMK, cr->fils.pmk_len, cr->fils.pmk)) ||
 +	      (cr->fils.pmkid &&
 +	       nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, cr->fils.pmkid)))))
  		goto nla_put_failure;
  
  	genlmsg_end(msg, hdr);
@@@ -14332,9 -14086,7 +14332,9 @@@ void nl80211_send_roamed(struct cfg8021
  	void *hdr;
  	const u8 *bssid = info->bss ? info->bss->bssid : info->bssid;
  
 -	msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len, gfp);
 +	msg = nlmsg_new(100 + info->req_ie_len + info->resp_ie_len +
 +			info->fils.kek_len + info->fils.pmk_len +
 +			(info->fils.pmkid ? WLAN_PMKID_LEN : 0), gfp);
  	if (!msg)
  		return;
  
@@@ -14352,17 -14104,7 +14352,17 @@@
  		     info->req_ie)) ||
  	    (info->resp_ie &&
  	     nla_put(msg, NL80211_ATTR_RESP_IE, info->resp_ie_len,
 -		     info->resp_ie)))
 +		     info->resp_ie)) ||
 +	    (info->fils.update_erp_next_seq_num &&
 +	     nla_put_u16(msg, NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
 +			 info->fils.erp_next_seq_num)) ||
 +	    (info->fils.kek &&
 +	     nla_put(msg, NL80211_ATTR_FILS_KEK, info->fils.kek_len,
 +		     info->fils.kek)) ||
 +	    (info->fils.pmk &&
 +	     nla_put(msg, NL80211_ATTR_PMK, info->fils.pmk_len, info->fils.pmk)) ||
 +	    (info->fils.pmkid &&
 +	     nla_put(msg, NL80211_ATTR_PMKID, WLAN_PMKID_LEN, info->fils.pmkid)))
  		goto nla_put_failure;
  
  	genlmsg_end(msg, hdr);
@@@ -14579,8 -14321,7 +14579,8 @@@ void nl80211_send_beacon_hint_event(str
  	nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
  	if (!nl_freq)
  		goto nla_put_failure;
 -	if (nl80211_msg_put_channel(msg, channel_before, false))
 +
 +	if (nl80211_msg_put_channel(msg, wiphy, channel_before, false))
  		goto nla_put_failure;
  	nla_nest_end(msg, nl_freq);
  
@@@ -14588,8 -14329,7 +14588,8 @@@
  	nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER);
  	if (!nl_freq)
  		goto nla_put_failure;
 -	if (nl80211_msg_put_channel(msg, channel_after, false))
 +
 +	if (nl80211_msg_put_channel(msg, wiphy, channel_after, false))
  		goto nla_put_failure;
  	nla_nest_end(msg, nl_freq);
  
@@@ -14716,10 -14456,8 +14716,10 @@@ void cfg80211_del_sta_sinfo(struct net_
  	trace_cfg80211_del_sta(dev, mac_addr);
  
  	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 -	if (!msg)
 +	if (!msg) {
 +		cfg80211_sinfo_release_content(sinfo);
  		return;
 +	}
  
  	if (nl80211_send_station(msg, NL80211_CMD_DEL_STATION, 0, 0, 0,
  				 rdev, dev, mac_addr, sinfo) < 0) {
@@@ -15817,7 -15555,8 +15817,8 @@@ void cfg80211_ft_event(struct net_devic
  	if (!ft_event->target_ap)
  		return;
  
- 	msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
+ 	msg = nlmsg_new(100 + ft_event->ies_len + ft_event->ric_ies_len,
+ 			GFP_KERNEL);
  	if (!msg)
  		return;
  
diff --combined net/wireless/reg.c
index e55099b1785d,5fcec5c94eb7..bbe6298e4bb9
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@@ -916,6 -916,9 +916,9 @@@ int reg_query_regdb_wmm(char *alpha2, i
  	const struct fwdb_header *hdr = regdb;
  	const struct fwdb_country *country;
  
+ 	if (!regdb)
+ 		return -ENODATA;
+ 
  	if (IS_ERR(regdb))
  		return PTR_ERR(regdb);
  
@@@ -1653,7 -1656,7 +1656,7 @@@ const char *reg_initiator_name(enum nl8
  	case NL80211_REGDOM_SET_BY_DRIVER:
  		return "driver";
  	case NL80211_REGDOM_SET_BY_COUNTRY_IE:
 -		return "country IE";
 +		return "country element";
  	default:
  		WARN_ON(1);
  		return "bug";
@@@ -2619,7 -2622,7 +2622,7 @@@ reg_process_hint_country_ie(struct wiph
  		 * This doesn't happen yet, not sure we
  		 * ever want to support it for this case.
  		 */
 -		WARN_ONCE(1, "Unexpected intersection for country IEs");
 +		WARN_ONCE(1, "Unexpected intersection for country elements");
  		return REG_REQ_IGNORE;
  	}
  
@@@ -2769,21 -2772,6 +2772,21 @@@ out_free
  	reg_free_request(reg_request);
  }
  
 +static void notify_self_managed_wiphys(struct regulatory_request *request)
 +{
 +	struct cfg80211_registered_device *rdev;
 +	struct wiphy *wiphy;
 +
 +	list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
 +		wiphy = &rdev->wiphy;
 +		if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED &&
 +		    request->initiator == NL80211_REGDOM_SET_BY_USER &&
 +		    request->user_reg_hint_type ==
 +				NL80211_USER_REG_HINT_CELL_BASE)
 +			reg_call_notifier(wiphy, request);
 +	}
 +}
 +
  static bool reg_only_self_managed_wiphys(void)
  {
  	struct cfg80211_registered_device *rdev;
@@@ -2835,7 -2823,6 +2838,7 @@@ static void reg_process_pending_hints(v
  
  	spin_unlock(&reg_requests_lock);
  
 +	notify_self_managed_wiphys(reg_request);
  	if (reg_only_self_managed_wiphys()) {
  		reg_free_request(reg_request);
  		return;
@@@ -3400,7 -3387,7 +3403,7 @@@ bool reg_supported_dfs_region(enum nl80
  	case NL80211_DFS_JP:
  		return true;
  	default:
 -		pr_debug("Ignoring uknown DFS master region: %d\n", dfs_region);
 +		pr_debug("Ignoring unknown DFS master region: %d\n", dfs_region);
  		return false;
  	}
  }
@@@ -3715,26 -3702,17 +3718,26 @@@ EXPORT_SYMBOL(regulatory_set_wiphy_regd
  
  void wiphy_regulatory_register(struct wiphy *wiphy)
  {
 -	struct regulatory_request *lr;
 +	struct regulatory_request *lr = get_last_request();
  
 -	/* self-managed devices ignore external hints */
 -	if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
 +	/* self-managed devices ignore beacon hints and country IE */
 +	if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
  		wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS |
  					   REGULATORY_COUNTRY_IE_IGNORE;
  
 +		/*
 +		 * The last request may have been received before this
 +		 * registration call. Call the driver notifier if
 +		 * initiator is USER and user type is CELL_BASE.
 +		 */
 +		if (lr->initiator == NL80211_REGDOM_SET_BY_USER &&
 +		    lr->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE)
 +			reg_call_notifier(wiphy, lr);
 +	}
 +
  	if (!reg_dev_ignore_cell_hint(wiphy))
  		reg_num_devs_support_basehint++;
  
 -	lr = get_last_request();
  	wiphy_update_regulatory(wiphy, lr->initiator);
  	wiphy_all_share_dfs_chan_state(wiphy);
  }
diff --combined security/selinux/hooks.c
index 6bd9358e5e62,179dd20bec0a..ae8672482e10
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@@ -1471,9 -1471,7 +1471,9 @@@ static inline u16 socket_type_to_securi
  			return SECCLASS_QIPCRTR_SOCKET;
  		case PF_SMC:
  			return SECCLASS_SMC_SOCKET;
 -#if PF_MAX > 44
 +		case PF_XDP:
 +			return SECCLASS_XDP_SOCKET;
 +#if PF_MAX > 45
  #error New address family defined, please update this function.
  #endif
  		}
@@@ -1570,8 -1568,15 +1570,15 @@@ static int inode_doinit_with_dentry(str
  			/* Called from d_instantiate or d_splice_alias. */
  			dentry = dget(opt_dentry);
  		} else {
- 			/* Called from selinux_complete_init, try to find a dentry. */
+ 			/*
+ 			 * Called from selinux_complete_init, try to find a dentry.
+ 			 * Some filesystems really want a connected one, so try
+ 			 * that first.  We could split SECURITY_FS_USE_XATTR in
+ 			 * two, depending upon that...
+ 			 */
  			dentry = d_find_alias(inode);
+ 			if (!dentry)
+ 				dentry = d_find_any_alias(inode);
  		}
  		if (!dentry) {
  			/*
@@@ -1676,14 -1681,19 +1683,19 @@@
  		if ((sbsec->flags & SE_SBGENFS) && !S_ISLNK(inode->i_mode)) {
  			/* We must have a dentry to determine the label on
  			 * procfs inodes */
- 			if (opt_dentry)
+ 			if (opt_dentry) {
  				/* Called from d_instantiate or
  				 * d_splice_alias. */
  				dentry = dget(opt_dentry);
- 			else
+ 			} else {
  				/* Called from selinux_complete_init, try to
- 				 * find a dentry. */
+ 				 * find a dentry.  Some filesystems really want
+ 				 * a connected one, so try that first.
+ 				 */
  				dentry = d_find_alias(inode);
+ 				if (!dentry)
+ 					dentry = d_find_any_alias(inode);
+ 			}
  			/*
  			 * This can be hit on boot when a file is accessed
  			 * before the policy is loaded.  When we load policy we

-- 
LinuxNextTracking


More information about the linux-merge mailing list