[linux-next] LinuxNextTracking branch, master, updated. next-20140519

batman at open-mesh.org batman at open-mesh.org
Tue May 20 00:19:40 CEST 2014


The following commit has been merged in the master branch:
commit eeaf5807bc1de5a52bc99e71e0ef92d17f3dd45e
Merge: 11cef2c8c00a6f1a9472413e707135a8b56c2231 dab531b4305bc2852ce6f934dc283464d46871a5
Author: Stephen Rothwell <sfr at canb.auug.org.au>
Date:   Mon May 19 13:37:02 2014 +1000

    next-20140516/net-next
    
    Conflicts:
    	drivers/net/ethernet/altera/altera_msgdma.c
    	drivers/net/ethernet/altera/altera_sgdma.c
    	net/ipv6/xfrm6_output.c

diff --combined MAINTAINERS
index 1cb6ac9,bde15ff..ce306e0
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -355,7 -355,7 +355,7 @@@ F:	Documentation/hwmon/adm102
  F:	drivers/hwmon/adm1025.c
  
  ADM1029 HARDWARE MONITOR DRIVER
 -M:	Corentin Labbe <corentin.labbe at geomatys.fr>
 +M:	Corentin Labbe <clabbe.montjoie at gmail.com>
  L:	lm-sensors at lm-sensors.org
  S:	Maintained
  F:	drivers/hwmon/adm1029.c
@@@ -1617,6 -1617,12 +1617,6 @@@ S:	Supporte
  F:	drivers/misc/atmel_tclib.c
  F:	drivers/clocksource/tcb_clksrc.c
  
 -ATMEL TSADCC DRIVER
 -M:	Josh Wu <josh.wu at atmel.com>
 -L:	linux-input at vger.kernel.org
 -S:	Supported
 -F:	drivers/input/touchscreen/atmel_tsadcc.c
 -
  ATMEL USBA UDC DRIVER
  M:	Nicolas Ferre <nicolas.ferre at atmel.com>
  L:	linux-arm-kernel at lists.infradead.org (moderated for non-subscribers)
@@@ -1887,15 -1893,14 +1887,15 @@@ L:	netdev at vger.kernel.or
  S:	Supported
  F:	drivers/net/ethernet/broadcom/bnx2x/
  
 -BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
 +BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
  M:	Christian Daudt <bcm at fixthebug.org>
  M:	Matt Porter <mporter at linaro.org>
  L:	bcm-kernel-feedback-list at broadcom.com
 -T:	git git://git.github.com/broadcom/bcm11351
 +T:	git git://github.com/broadcom/mach-bcm
  S:	Maintained
  F:	arch/arm/mach-bcm/
  F:	arch/arm/boot/dts/bcm113*
 +F:	arch/arm/boot/dts/bcm216*
  F:	arch/arm/boot/dts/bcm281*
  F:	arch/arm/configs/bcm_defconfig
  F:	drivers/mmc/host/sdhci_bcm_kona.c
@@@ -1962,6 -1967,12 +1962,12 @@@ S:	Maintaine
  F:	drivers/bcma/
  F:	include/linux/bcma/
  
+ BROADCOM SYSTEMPORT ETHERNET DRIVER
+ M:	Florian Fainelli <f.fainelli at gmail.com>
+ L:	netdev at vger.kernel.org
+ S:	Supported
+ F:	drivers/net/ethernet/broadcom/bcmsysport.*
+ 
  BROCADE BFA FC SCSI DRIVER
  M:	Anil Gurumurthy <anil.gurumurthy at qlogic.com>
  M:	Sudarsana Kalluru <sudarsana.kalluru at qlogic.com>
@@@ -2240,6 -2251,12 +2246,6 @@@ L:	linux-usb at vger.kernel.or
  S:	Maintained
  F:	drivers/usb/host/ohci-ep93xx.c
  
 -CIRRUS LOGIC CS4270 SOUND DRIVER
 -M:	Timur Tabi <timur at tabi.org>
 -L:	alsa-devel at alsa-project.org (moderated for non-subscribers)
 -S:	Odd Fixes
 -F:	sound/soc/codecs/cs4270*
 -
  CIRRUS LOGIC AUDIO CODEC DRIVERS
  M:	Brian Austin <brian.austin at cirrus.com>
  M:	Paul Handrigan <Paul.Handrigan at cirrus.com>
@@@ -2404,6 -2421,7 +2410,6 @@@ F:	drivers/net/ethernet/ti/cpmac.
  CPU FREQUENCY DRIVERS
  M:	Rafael J. Wysocki <rjw at rjwysocki.net>
  M:	Viresh Kumar <viresh.kumar at linaro.org>
 -L:	cpufreq at vger.kernel.org
  L:	linux-pm at vger.kernel.org
  S:	Maintained
  T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
@@@ -2414,6 -2432,7 +2420,6 @@@ F:	include/linux/cpufreq.
  CPU FREQUENCY DRIVERS - ARM BIG LITTLE
  M:	Viresh Kumar <viresh.kumar at linaro.org>
  M:	Sudeep Holla <sudeep.holla at arm.com>
 -L:	cpufreq at vger.kernel.org
  L:	linux-pm at vger.kernel.org
  W:	http://www.arm.com/products/processors/technologies/biglittleprocessing.php
  S:	Maintained
@@@ -5477,15 -5496,15 +5483,15 @@@ F:	Documentation/hwmon/ltc426
  F:	drivers/hwmon/ltc4261.c
  
  LTP (Linux Test Project)
 -M:	Shubham Goyal <shubham at linux.vnet.ibm.com>
  M:	Mike Frysinger <vapier at gentoo.org>
  M:	Cyril Hrubis <chrubis at suse.cz>
 -M:	Caspar Zhang <caspar at casparzhang.com>
  M:	Wanlong Gao <gaowanlong at cn.fujitsu.com>
 +M:	Jan Stancek <jstancek at redhat.com>
 +M:	Stanislav Kholmanskikh <stanislav.kholmanskikh at oracle.com>
 +M:	Alexey Kodanev <alexey.kodanev at oracle.com>
  L:	ltp-list at lists.sourceforge.net (subscribers-only)
 -W:	http://ltp.sourceforge.net/
 +W:	http://linux-test-project.github.io/
  T:	git git://github.com/linux-test-project/ltp.git
 -T:	git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev
  S:	Maintained
  
  M32R ARCHITECTURE
@@@ -6691,7 -6710,6 +6697,7 @@@ F:	Documentation/PCI
  F:	drivers/pci/
  F:	include/linux/pci*
  F:	arch/x86/pci/
 +F:	arch/x86/kernel/quirks.c
  
  PCI DRIVER FOR IMX6
  M:	Richard Zhu <r65037 at freescale.com>
@@@ -7942,26 -7960,6 +7948,26 @@@ M:	Robin Holt <robinmholt at gmail.com
  S:	Maintained
  F:	drivers/misc/sgi-xp/
  
 +SI2157 MEDIA DRIVER
 +M:	Antti Palosaari <crope at iki.fi>
 +L:	linux-media at vger.kernel.org
 +W:	http://linuxtv.org/
 +W:	http://palosaari.fi/linux/
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +T:	git git://linuxtv.org/anttip/media_tree.git
 +S:	Maintained
 +F:	drivers/media/tuners/si2157*
 +
 +SI2168 MEDIA DRIVER
 +M:	Antti Palosaari <crope at iki.fi>
 +L:	linux-media at vger.kernel.org
 +W:	http://linuxtv.org/
 +W:	http://palosaari.fi/linux/
 +Q:	http://patchwork.linuxtv.org/project/linux-media/list/
 +T:	git git://linuxtv.org/anttip/media_tree.git
 +S:	Maintained
 +F:	drivers/media/dvb-frontends/si2168*
 +
  SI470X FM RADIO RECEIVER I2C DRIVER
  M:	Hans Verkuil <hverkuil at xs4all.nl>
  L:	linux-media at vger.kernel.org
@@@ -9115,9 -9113,6 +9121,9 @@@ F:	arch/um/os-Linux/drivers
  
  TURBOCHANNEL SUBSYSTEM
  M:	"Maciej W. Rozycki" <macro at linux-mips.org>
 +M:	Ralf Baechle <ralf at linux-mips.org>
 +L:	linux-mips at linux-mips.org
 +Q:	http://patchwork.linux-mips.org/project/linux-mips/list/
  S:	Maintained
  F:	drivers/tc/
  F:	include/linux/tc.h
@@@ -9971,7 -9966,7 +9977,7 @@@ F:	drivers/net/hamradio/*scc.
  F:	drivers/net/hamradio/z8530.h
  
  ZBUD COMPRESSED PAGE ALLOCATOR
 -M:	Seth Jennings <sjenning at linux.vnet.ibm.com>
 +M:	Seth Jennings <sjennings at variantweb.net>
  L:	linux-mm at kvack.org
  S:	Maintained
  F:	mm/zbud.c
@@@ -10016,7 -10011,7 +10022,7 @@@ F:	mm/zsmalloc.
  F:	include/linux/zsmalloc.h
  
  ZSWAP COMPRESSED SWAP CACHING
 -M:	Seth Jennings <sjenning at linux.vnet.ibm.com>
 +M:	Seth Jennings <sjennings at variantweb.net>
  L:	linux-mm at kvack.org
  S:	Maintained
  F:	mm/zswap.c
diff --combined arch/arm/boot/dts/am33xx.dtsi
index 7ad75b4,baf56cc..f1eea4a
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@@ -144,7 -144,7 +144,7 @@@
  			compatible = "ti,edma3";
  			ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
  			reg =	<0x49000000 0x10000>,
 -				<0x44e10f90 0x10>;
 +				<0x44e10f90 0x40>;
  			interrupts = <12 13 14>;
  			#dma-cells = <1>;
  			dma-channels = <64>;
@@@ -665,6 -665,8 +665,8 @@@
  		mac: ethernet at 4a100000 {
  			compatible = "ti,cpsw";
  			ti,hwmods = "cpgmac0";
+ 			clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+ 			clock-names = "fck", "cpts";
  			cpdma_channels = <8>;
  			ale_entries = <1024>;
  			bd_ram_size = <0x2000>;
diff --combined arch/x86/net/bpf_jit_comp.c
index 6d5663a,92aef8fd..080f3f0
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@@ -1,6 -1,7 +1,7 @@@
  /* bpf_jit_comp.c : BPF JIT compiler
   *
   * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet at gmail.com)
+  * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   *
   * This program is free software; you can redistribute it and/or
   * modify it under the terms of the GNU General Public License
@@@ -14,28 -15,16 +15,16 @@@
  #include <linux/if_vlan.h>
  #include <linux/random.h>
  
- /*
-  * Conventions :
-  *  EAX : BPF A accumulator
-  *  EBX : BPF X accumulator
-  *  RDI : pointer to skb   (first argument given to JIT function)
-  *  RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
-  *  ECX,EDX,ESI : scratch registers
-  *  r9d : skb->len - skb->data_len (headlen)
-  *  r8  : skb->data
-  * -8(RBP) : saved RBX value
-  * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
-  */
  int bpf_jit_enable __read_mostly;
  
  /*
   * assembly code in arch/x86/net/bpf_jit.S
   */
- extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
- extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
+ extern u8 sk_load_byte_positive_offset[];
  extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
- extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
+ extern u8 sk_load_byte_negative_offset[];
  
  static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
  {
@@@ -56,30 -45,44 +45,44 @@@
  #define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
  #define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
  #define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
- #define EMIT1_off32(b1, off)	do { EMIT1(b1); EMIT(off, 4);} while (0)
- 
- #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
- #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
+ #define EMIT1_off32(b1, off) \
+ 	do {EMIT1(b1); EMIT(off, 4); } while (0)
+ #define EMIT2_off32(b1, b2, off) \
+ 	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
+ #define EMIT3_off32(b1, b2, b3, off) \
+ 	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
+ #define EMIT4_off32(b1, b2, b3, b4, off) \
+ 	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
  
  static inline bool is_imm8(int value)
  {
  	return value <= 127 && value >= -128;
  }
  
- static inline bool is_near(int offset)
+ static inline bool is_simm32(s64 value)
  {
- 	return offset <= 127 && offset >= -128;
+ 	return value == (s64) (s32) value;
  }
  
- #define EMIT_JMP(offset)						\
- do {									\
- 	if (offset) {							\
- 		if (is_near(offset))					\
- 			EMIT2(0xeb, offset); /* jmp .+off8 */		\
- 		else							\
- 			EMIT1_off32(0xe9, offset); /* jmp .+off32 */	\
- 	}								\
- } while (0)
+ /* mov A, X */
+ #define EMIT_mov(A, X) \
+ 	do {if (A != X) \
+ 		EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \
+ 	} while (0)
+ 
+ static int bpf_size_to_x86_bytes(int bpf_size)
+ {
+ 	if (bpf_size == BPF_W)
+ 		return 4;
+ 	else if (bpf_size == BPF_H)
+ 		return 2;
+ 	else if (bpf_size == BPF_B)
+ 		return 1;
+ 	else if (bpf_size == BPF_DW)
+ 		return 4; /* imm32 */
+ 	else
+ 		return 0;
+ }
  
  /* list of x86 cond jumps opcodes (. + s8)
   * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
@@@ -90,27 -93,8 +93,8 @@@
  #define X86_JNE 0x75
  #define X86_JBE 0x76
  #define X86_JA  0x77
- 
- #define EMIT_COND_JMP(op, offset)				\
- do {								\
- 	if (is_near(offset))					\
- 		EMIT2(op, offset); /* jxx .+off8 */		\
- 	else {							\
- 		EMIT2(0x0f, op + 0x10);				\
- 		EMIT(offset, 4); /* jxx .+off32 */		\
- 	}							\
- } while (0)
- 
- #define COND_SEL(CODE, TOP, FOP)	\
- 	case CODE:			\
- 		t_op = TOP;		\
- 		f_op = FOP;		\
- 		goto cond_branch
- 
- 
- #define SEEN_DATAREF 1 /* might call external helpers */
- #define SEEN_XREG    2 /* ebx is used */
- #define SEEN_MEM     4 /* use mem[] for temporary storage */
+ #define X86_JGE 0x7D
+ #define X86_JG  0x7F
  
  static inline void bpf_flush_icache(void *start, void *end)
  {
@@@ -125,26 -109,6 +109,6 @@@
  #define CHOOSE_LOAD_FUNC(K, func) \
  	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
  
- /* Helper to find the offset of pkt_type in sk_buff
-  * We want to make sure its still a 3bit field starting at a byte boundary.
-  */
- #define PKT_TYPE_MAX 7
- static int pkt_type_offset(void)
- {
- 	struct sk_buff skb_probe = {
- 		.pkt_type = ~0,
- 	};
- 	char *ct = (char *)&skb_probe;
- 	unsigned int off;
- 
- 	for (off = 0; off < sizeof(struct sk_buff); off++) {
- 		if (ct[off] == PKT_TYPE_MAX)
- 			return off;
- 	}
- 	pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
- 	return -1;
- }
- 
  struct bpf_binary_header {
  	unsigned int	pages;
  	/* Note : for security reasons, bpf code will follow a randomly
@@@ -171,590 -135,778 +135,778 @@@ static struct bpf_binary_header *bpf_al
  	memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
  
  	header->pages = sz / PAGE_SIZE;
 -	hole = sz - (proglen + sizeof(*header));
 +	hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
  
  	/* insert a random number of int3 instructions before BPF code */
  	*image_ptr = &header->image[prandom_u32() % hole];
  	return header;
  }
  
- void bpf_jit_compile(struct sk_filter *fp)
+ /* pick a register outside of BPF range for JIT internal work */
+ #define AUX_REG (MAX_BPF_REG + 1)
+ 
+ /* the following table maps BPF registers to x64 registers.
+  * x64 register r12 is unused, since if used as base address register
+  * in load/store instructions, it always needs an extra byte of encoding
+  */
+ static const int reg2hex[] = {
+ 	[BPF_REG_0] = 0,  /* rax */
+ 	[BPF_REG_1] = 7,  /* rdi */
+ 	[BPF_REG_2] = 6,  /* rsi */
+ 	[BPF_REG_3] = 2,  /* rdx */
+ 	[BPF_REG_4] = 1,  /* rcx */
+ 	[BPF_REG_5] = 0,  /* r8 */
+ 	[BPF_REG_6] = 3,  /* rbx callee saved */
+ 	[BPF_REG_7] = 5,  /* r13 callee saved */
+ 	[BPF_REG_8] = 6,  /* r14 callee saved */
+ 	[BPF_REG_9] = 7,  /* r15 callee saved */
+ 	[BPF_REG_FP] = 5, /* rbp readonly */
+ 	[AUX_REG] = 3,    /* r11 temp register */
+ };
+ 
+ /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
+  * which need extra byte of encoding.
+  * rax,rcx,...,rbp have simpler encoding
+  */
+ static inline bool is_ereg(u32 reg)
  {
- 	u8 temp[64];
- 	u8 *prog;
- 	unsigned int proglen, oldproglen = 0;
- 	int ilen, i;
- 	int t_offset, f_offset;
- 	u8 t_op, f_op, seen = 0, pass;
- 	u8 *image = NULL;
- 	struct bpf_binary_header *header = NULL;
- 	u8 *func;
- 	int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
- 	unsigned int cleanup_addr; /* epilogue code offset */
- 	unsigned int *addrs;
- 	const struct sock_filter *filter = fp->insns;
- 	int flen = fp->len;
+ 	if (reg == BPF_REG_5 || reg == AUX_REG ||
+ 	    (reg >= BPF_REG_7 && reg <= BPF_REG_9))
+ 		return true;
+ 	else
+ 		return false;
+ }
  
- 	if (!bpf_jit_enable)
- 		return;
+ /* add modifiers if 'reg' maps to x64 registers r8..r15 */
+ static inline u8 add_1mod(u8 byte, u32 reg)
+ {
+ 	if (is_ereg(reg))
+ 		byte |= 1;
+ 	return byte;
+ }
  
- 	addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
- 	if (addrs == NULL)
- 		return;
+ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
+ {
+ 	if (is_ereg(r1))
+ 		byte |= 1;
+ 	if (is_ereg(r2))
+ 		byte |= 4;
+ 	return byte;
+ }
  
- 	/* Before first pass, make a rough estimation of addrs[]
- 	 * each bpf instruction is translated to less than 64 bytes
+ /* encode dest register 'a_reg' into x64 opcode 'byte' */
+ static inline u8 add_1reg(u8 byte, u32 a_reg)
+ {
+ 	return byte + reg2hex[a_reg];
+ }
+ 
+ /* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */
+ static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg)
+ {
+ 	return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3);
+ }
+ 
+ struct jit_context {
+ 	unsigned int cleanup_addr; /* epilogue code offset */
+ 	bool seen_ld_abs;
+ };
+ 
+ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
+ 		  int oldproglen, struct jit_context *ctx)
+ {
+ 	struct sock_filter_int *insn = bpf_prog->insnsi;
+ 	int insn_cnt = bpf_prog->len;
+ 	u8 temp[64];
+ 	int i;
+ 	int proglen = 0;
+ 	u8 *prog = temp;
+ 	int stacksize = MAX_BPF_STACK +
+ 		32 /* space for rbx, r13, r14, r15 */ +
+ 		8 /* space for skb_copy_bits() buffer */;
+ 
+ 	EMIT1(0x55); /* push rbp */
+ 	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
+ 
+ 	/* sub rsp, stacksize */
+ 	EMIT3_off32(0x48, 0x81, 0xEC, stacksize);
+ 
+ 	/* all classic BPF filters use R6(rbx) save it */
+ 
+ 	/* mov qword ptr [rbp-X],rbx */
+ 	EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
+ 
+ 	/* sk_convert_filter() maps classic BPF register X to R7 and uses R8
+ 	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
+ 	 * R8(r14). R9(r15) spill could be made conditional, but there is only
+ 	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
+ 	 * The overhead of extra spill is negligible for any filter other
+ 	 * than synthetic ones. Therefore not worth adding complexity.
  	 */
- 	for (proglen = 0, i = 0; i < flen; i++) {
- 		proglen += 64;
- 		addrs[i] = proglen;
+ 
+ 	/* mov qword ptr [rbp-X],r13 */
+ 	EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8);
+ 	/* mov qword ptr [rbp-X],r14 */
+ 	EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16);
+ 	/* mov qword ptr [rbp-X],r15 */
+ 	EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24);
+ 
+ 	/* clear A and X registers */
+ 	EMIT2(0x31, 0xc0); /* xor eax, eax */
+ 	EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
+ 
+ 	if (ctx->seen_ld_abs) {
+ 		/* r9d : skb->len - skb->data_len (headlen)
+ 		 * r10 : skb->data
+ 		 */
+ 		if (is_imm8(offsetof(struct sk_buff, len)))
+ 			/* mov %r9d, off8(%rdi) */
+ 			EMIT4(0x44, 0x8b, 0x4f,
+ 			      offsetof(struct sk_buff, len));
+ 		else
+ 			/* mov %r9d, off32(%rdi) */
+ 			EMIT3_off32(0x44, 0x8b, 0x8f,
+ 				    offsetof(struct sk_buff, len));
+ 
+ 		if (is_imm8(offsetof(struct sk_buff, data_len)))
+ 			/* sub %r9d, off8(%rdi) */
+ 			EMIT4(0x44, 0x2b, 0x4f,
+ 			      offsetof(struct sk_buff, data_len));
+ 		else
+ 			EMIT3_off32(0x44, 0x2b, 0x8f,
+ 				    offsetof(struct sk_buff, data_len));
+ 
+ 		if (is_imm8(offsetof(struct sk_buff, data)))
+ 			/* mov %r10, off8(%rdi) */
+ 			EMIT4(0x4c, 0x8b, 0x57,
+ 			      offsetof(struct sk_buff, data));
+ 		else
+ 			/* mov %r10, off32(%rdi) */
+ 			EMIT3_off32(0x4c, 0x8b, 0x97,
+ 				    offsetof(struct sk_buff, data));
  	}
- 	cleanup_addr = proglen; /* epilogue address */
  
- 	for (pass = 0; pass < 10; pass++) {
- 		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
- 		/* no prologue/epilogue for trivial filters (RET something) */
- 		proglen = 0;
- 		prog = temp;
+ 	for (i = 0; i < insn_cnt; i++, insn++) {
+ 		const s32 K = insn->imm;
+ 		u32 a_reg = insn->a_reg;
+ 		u32 x_reg = insn->x_reg;
+ 		u8 b1 = 0, b2 = 0, b3 = 0;
+ 		s64 jmp_offset;
+ 		u8 jmp_cond;
+ 		int ilen;
+ 		u8 *func;
+ 
+ 		switch (insn->code) {
+ 			/* ALU */
+ 		case BPF_ALU | BPF_ADD | BPF_X:
+ 		case BPF_ALU | BPF_SUB | BPF_X:
+ 		case BPF_ALU | BPF_AND | BPF_X:
+ 		case BPF_ALU | BPF_OR | BPF_X:
+ 		case BPF_ALU | BPF_XOR | BPF_X:
+ 		case BPF_ALU64 | BPF_ADD | BPF_X:
+ 		case BPF_ALU64 | BPF_SUB | BPF_X:
+ 		case BPF_ALU64 | BPF_AND | BPF_X:
+ 		case BPF_ALU64 | BPF_OR | BPF_X:
+ 		case BPF_ALU64 | BPF_XOR | BPF_X:
+ 			switch (BPF_OP(insn->code)) {
+ 			case BPF_ADD: b2 = 0x01; break;
+ 			case BPF_SUB: b2 = 0x29; break;
+ 			case BPF_AND: b2 = 0x21; break;
+ 			case BPF_OR: b2 = 0x09; break;
+ 			case BPF_XOR: b2 = 0x31; break;
+ 			}
+ 			if (BPF_CLASS(insn->code) == BPF_ALU64)
+ 				EMIT1(add_2mod(0x48, a_reg, x_reg));
+ 			else if (is_ereg(a_reg) || is_ereg(x_reg))
+ 				EMIT1(add_2mod(0x40, a_reg, x_reg));
+ 			EMIT2(b2, add_2reg(0xC0, a_reg, x_reg));
+ 			break;
  
- 		if (seen_or_pass0) {
- 			EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
- 			EMIT4(0x48, 0x83, 0xec, 96);	/* subq  $96,%rsp	*/
- 			/* note : must save %rbx in case bpf_error is hit */
- 			if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
- 				EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
- 			if (seen_or_pass0 & SEEN_XREG)
- 				CLEAR_X(); /* make sure we dont leek kernel memory */
- 
- 			/*
- 			 * If this filter needs to access skb data,
- 			 * loads r9 and r8 with :
- 			 *  r9 = skb->len - skb->data_len
- 			 *  r8 = skb->data
+ 			/* mov A, X */
+ 		case BPF_ALU64 | BPF_MOV | BPF_X:
+ 			EMIT_mov(a_reg, x_reg);
+ 			break;
+ 
+ 			/* mov32 A, X */
+ 		case BPF_ALU | BPF_MOV | BPF_X:
+ 			if (is_ereg(a_reg) || is_ereg(x_reg))
+ 				EMIT1(add_2mod(0x40, a_reg, x_reg));
+ 			EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg));
+ 			break;
+ 
+ 			/* neg A */
+ 		case BPF_ALU | BPF_NEG:
+ 		case BPF_ALU64 | BPF_NEG:
+ 			if (BPF_CLASS(insn->code) == BPF_ALU64)
+ 				EMIT1(add_1mod(0x48, a_reg));
+ 			else if (is_ereg(a_reg))
+ 				EMIT1(add_1mod(0x40, a_reg));
+ 			EMIT2(0xF7, add_1reg(0xD8, a_reg));
+ 			break;
+ 
+ 		case BPF_ALU | BPF_ADD | BPF_K:
+ 		case BPF_ALU | BPF_SUB | BPF_K:
+ 		case BPF_ALU | BPF_AND | BPF_K:
+ 		case BPF_ALU | BPF_OR | BPF_K:
+ 		case BPF_ALU | BPF_XOR | BPF_K:
+ 		case BPF_ALU64 | BPF_ADD | BPF_K:
+ 		case BPF_ALU64 | BPF_SUB | BPF_K:
+ 		case BPF_ALU64 | BPF_AND | BPF_K:
+ 		case BPF_ALU64 | BPF_OR | BPF_K:
+ 		case BPF_ALU64 | BPF_XOR | BPF_K:
+ 			if (BPF_CLASS(insn->code) == BPF_ALU64)
+ 				EMIT1(add_1mod(0x48, a_reg));
+ 			else if (is_ereg(a_reg))
+ 				EMIT1(add_1mod(0x40, a_reg));
+ 
+ 			switch (BPF_OP(insn->code)) {
+ 			case BPF_ADD: b3 = 0xC0; break;
+ 			case BPF_SUB: b3 = 0xE8; break;
+ 			case BPF_AND: b3 = 0xE0; break;
+ 			case BPF_OR: b3 = 0xC8; break;
+ 			case BPF_XOR: b3 = 0xF0; break;
+ 			}
+ 
+ 			if (is_imm8(K))
+ 				EMIT3(0x83, add_1reg(b3, a_reg), K);
+ 			else
+ 				EMIT2_off32(0x81, add_1reg(b3, a_reg), K);
+ 			break;
+ 
+ 		case BPF_ALU64 | BPF_MOV | BPF_K:
+ 			/* optimization: if imm32 is positive,
+ 			 * use 'mov eax, imm32' (which zero-extends imm32)
+ 			 * to save 2 bytes
  			 */
- 			if (seen_or_pass0 & SEEN_DATAREF) {
- 				if (offsetof(struct sk_buff, len) <= 127)
- 					/* mov    off8(%rdi),%r9d */
- 					EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
- 				else {
- 					/* mov    off32(%rdi),%r9d */
- 					EMIT3(0x44, 0x8b, 0x8f);
- 					EMIT(offsetof(struct sk_buff, len), 4);
- 				}
- 				if (is_imm8(offsetof(struct sk_buff, data_len)))
- 					/* sub    off8(%rdi),%r9d */
- 					EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
- 				else {
- 					EMIT3(0x44, 0x2b, 0x8f);
- 					EMIT(offsetof(struct sk_buff, data_len), 4);
- 				}
+ 			if (K < 0) {
+ 				/* 'mov rax, imm32' sign extends imm32 */
+ 				b1 = add_1mod(0x48, a_reg);
+ 				b2 = 0xC7;
+ 				b3 = 0xC0;
+ 				EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K);
+ 				break;
+ 			}
  
- 				if (is_imm8(offsetof(struct sk_buff, data)))
- 					/* mov off8(%rdi),%r8 */
- 					EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
- 				else {
- 					/* mov off32(%rdi),%r8 */
- 					EMIT3(0x4c, 0x8b, 0x87);
- 					EMIT(offsetof(struct sk_buff, data), 4);
- 				}
+ 		case BPF_ALU | BPF_MOV | BPF_K:
+ 			/* mov %eax, imm32 */
+ 			if (is_ereg(a_reg))
+ 				EMIT1(add_1mod(0x40, a_reg));
+ 			EMIT1_off32(add_1reg(0xB8, a_reg), K);
+ 			break;
+ 
+ 			/* A %= X, A /= X, A %= K, A /= K */
+ 		case BPF_ALU | BPF_MOD | BPF_X:
+ 		case BPF_ALU | BPF_DIV | BPF_X:
+ 		case BPF_ALU | BPF_MOD | BPF_K:
+ 		case BPF_ALU | BPF_DIV | BPF_K:
+ 		case BPF_ALU64 | BPF_MOD | BPF_X:
+ 		case BPF_ALU64 | BPF_DIV | BPF_X:
+ 		case BPF_ALU64 | BPF_MOD | BPF_K:
+ 		case BPF_ALU64 | BPF_DIV | BPF_K:
+ 			EMIT1(0x50); /* push rax */
+ 			EMIT1(0x52); /* push rdx */
+ 
+ 			if (BPF_SRC(insn->code) == BPF_X)
+ 				/* mov r11, X */
+ 				EMIT_mov(AUX_REG, x_reg);
+ 			else
+ 				/* mov r11, K */
+ 				EMIT3_off32(0x49, 0xC7, 0xC3, K);
+ 
+ 			/* mov rax, A */
+ 			EMIT_mov(BPF_REG_0, a_reg);
+ 
+ 			/* xor edx, edx
+ 			 * equivalent to 'xor rdx, rdx', but one byte less
+ 			 */
+ 			EMIT2(0x31, 0xd2);
+ 
+ 			if (BPF_SRC(insn->code) == BPF_X) {
+ 				/* if (X == 0) return 0 */
+ 
+ 				/* cmp r11, 0 */
+ 				EMIT4(0x49, 0x83, 0xFB, 0x00);
+ 
+ 				/* jne .+9 (skip over pop, pop, xor and jmp) */
+ 				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
+ 				EMIT1(0x5A); /* pop rdx */
+ 				EMIT1(0x58); /* pop rax */
+ 				EMIT2(0x31, 0xc0); /* xor eax, eax */
+ 
+ 				/* jmp cleanup_addr
+ 				 * addrs[i] - 11, because there are 11 bytes
+ 				 * after this insn: div, mov, pop, pop, mov
+ 				 */
+ 				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
+ 				EMIT1_off32(0xE9, jmp_offset);
  			}
- 		}
  
- 		switch (filter[0].code) {
- 		case BPF_S_RET_K:
- 		case BPF_S_LD_W_LEN:
- 		case BPF_S_ANC_PROTOCOL:
- 		case BPF_S_ANC_IFINDEX:
- 		case BPF_S_ANC_MARK:
- 		case BPF_S_ANC_RXHASH:
- 		case BPF_S_ANC_CPU:
- 		case BPF_S_ANC_VLAN_TAG:
- 		case BPF_S_ANC_VLAN_TAG_PRESENT:
- 		case BPF_S_ANC_QUEUE:
- 		case BPF_S_ANC_PKTTYPE:
- 		case BPF_S_LD_W_ABS:
- 		case BPF_S_LD_H_ABS:
- 		case BPF_S_LD_B_ABS:
- 			/* first instruction sets A register (or is RET 'constant') */
+ 			if (BPF_CLASS(insn->code) == BPF_ALU64)
+ 				/* div r11 */
+ 				EMIT3(0x49, 0xF7, 0xF3);
+ 			else
+ 				/* div r11d */
+ 				EMIT3(0x41, 0xF7, 0xF3);
+ 
+ 			if (BPF_OP(insn->code) == BPF_MOD)
+ 				/* mov r11, rdx */
+ 				EMIT3(0x49, 0x89, 0xD3);
+ 			else
+ 				/* mov r11, rax */
+ 				EMIT3(0x49, 0x89, 0xC3);
+ 
+ 			EMIT1(0x5A); /* pop rdx */
+ 			EMIT1(0x58); /* pop rax */
+ 
+ 			/* mov A, r11 */
+ 			EMIT_mov(a_reg, AUX_REG);
  			break;
- 		default:
- 			/* make sure we dont leak kernel information to user */
- 			CLEAR_A(); /* A = 0 */
- 		}
  
- 		for (i = 0; i < flen; i++) {
- 			unsigned int K = filter[i].k;
+ 		case BPF_ALU | BPF_MUL | BPF_K:
+ 		case BPF_ALU | BPF_MUL | BPF_X:
+ 		case BPF_ALU64 | BPF_MUL | BPF_K:
+ 		case BPF_ALU64 | BPF_MUL | BPF_X:
+ 			EMIT1(0x50); /* push rax */
+ 			EMIT1(0x52); /* push rdx */
+ 
+ 			/* mov r11, A */
+ 			EMIT_mov(AUX_REG, a_reg);
+ 
+ 			if (BPF_SRC(insn->code) == BPF_X)
+ 				/* mov rax, X */
+ 				EMIT_mov(BPF_REG_0, x_reg);
+ 			else
+ 				/* mov rax, K */
+ 				EMIT3_off32(0x48, 0xC7, 0xC0, K);
+ 
+ 			if (BPF_CLASS(insn->code) == BPF_ALU64)
+ 				EMIT1(add_1mod(0x48, AUX_REG));
+ 			else if (is_ereg(AUX_REG))
+ 				EMIT1(add_1mod(0x40, AUX_REG));
+ 			/* mul(q) r11 */
+ 			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
+ 
+ 			/* mov r11, rax */
+ 			EMIT_mov(AUX_REG, BPF_REG_0);
+ 
+ 			EMIT1(0x5A); /* pop rdx */
+ 			EMIT1(0x58); /* pop rax */
+ 
+ 			/* mov A, r11 */
+ 			EMIT_mov(a_reg, AUX_REG);
+ 			break;
  
- 			switch (filter[i].code) {
- 			case BPF_S_ALU_ADD_X: /* A += X; */
- 				seen |= SEEN_XREG;
- 				EMIT2(0x01, 0xd8);		/* add %ebx,%eax */
- 				break;
- 			case BPF_S_ALU_ADD_K: /* A += K; */
- 				if (!K)
- 					break;
- 				if (is_imm8(K))
- 					EMIT3(0x83, 0xc0, K);	/* add imm8,%eax */
- 				else
- 					EMIT1_off32(0x05, K);	/* add imm32,%eax */
- 				break;
- 			case BPF_S_ALU_SUB_X: /* A -= X; */
- 				seen |= SEEN_XREG;
- 				EMIT2(0x29, 0xd8);		/* sub    %ebx,%eax */
- 				break;
- 			case BPF_S_ALU_SUB_K: /* A -= K */
- 				if (!K)
- 					break;
- 				if (is_imm8(K))
- 					EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
- 				else
- 					EMIT1_off32(0x2d, K); /* sub imm32,%eax */
- 				break;
- 			case BPF_S_ALU_MUL_X: /* A *= X; */
- 				seen |= SEEN_XREG;
- 				EMIT3(0x0f, 0xaf, 0xc3);	/* imul %ebx,%eax */
- 				break;
- 			case BPF_S_ALU_MUL_K: /* A *= K */
- 				if (is_imm8(K))
- 					EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
- 				else {
- 					EMIT2(0x69, 0xc0);		/* imul imm32,%eax */
- 					EMIT(K, 4);
- 				}
- 				break;
- 			case BPF_S_ALU_DIV_X: /* A /= X; */
- 				seen |= SEEN_XREG;
- 				EMIT2(0x85, 0xdb);	/* test %ebx,%ebx */
- 				if (pc_ret0 > 0) {
- 					/* addrs[pc_ret0 - 1] is start address of target
- 					 * (addrs[i] - 4) is the address following this jmp
- 					 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
- 					 */
- 					EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
- 								(addrs[i] - 4));
- 				} else {
- 					EMIT_COND_JMP(X86_JNE, 2 + 5);
- 					CLEAR_A();
- 					EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
- 				}
- 				EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
- 				break;
- 			case BPF_S_ALU_MOD_X: /* A %= X; */
- 				seen |= SEEN_XREG;
- 				EMIT2(0x85, 0xdb);	/* test %ebx,%ebx */
- 				if (pc_ret0 > 0) {
- 					/* addrs[pc_ret0 - 1] is start address of target
- 					 * (addrs[i] - 6) is the address following this jmp
- 					 * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
- 					 */
- 					EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
- 								(addrs[i] - 6));
- 				} else {
- 					EMIT_COND_JMP(X86_JNE, 2 + 5);
- 					CLEAR_A();
- 					EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */
- 				}
- 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
- 				EMIT2(0xf7, 0xf3);	/* div %ebx */
- 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
- 				break;
- 			case BPF_S_ALU_MOD_K: /* A %= K; */
- 				if (K == 1) {
- 					CLEAR_A();
- 					break;
- 				}
- 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
- 				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
- 				EMIT2(0xf7, 0xf1);	/* div %ecx */
- 				EMIT2(0x89, 0xd0);	/* mov %edx,%eax */
- 				break;
- 			case BPF_S_ALU_DIV_K: /* A /= K */
- 				if (K == 1)
- 					break;
- 				EMIT2(0x31, 0xd2);	/* xor %edx,%edx */
- 				EMIT1(0xb9);EMIT(K, 4);	/* mov imm32,%ecx */
- 				EMIT2(0xf7, 0xf1);	/* div %ecx */
- 				break;
- 			case BPF_S_ALU_AND_X:
- 				seen |= SEEN_XREG;
- 				EMIT2(0x21, 0xd8);		/* and %ebx,%eax */
- 				break;
- 			case BPF_S_ALU_AND_K:
- 				if (K >= 0xFFFFFF00) {
- 					EMIT2(0x24, K & 0xFF); /* and imm8,%al */
- 				} else if (K >= 0xFFFF0000) {
- 					EMIT2(0x66, 0x25);	/* and imm16,%ax */
- 					EMIT(K, 2);
- 				} else {
- 					EMIT1_off32(0x25, K);	/* and imm32,%eax */
- 				}
- 				break;
- 			case BPF_S_ALU_OR_X:
- 				seen |= SEEN_XREG;
- 				EMIT2(0x09, 0xd8);		/* or %ebx,%eax */
- 				break;
- 			case BPF_S_ALU_OR_K:
- 				if (is_imm8(K))
- 					EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
- 				else
- 					EMIT1_off32(0x0d, K);	/* or imm32,%eax */
- 				break;
- 			case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
- 			case BPF_S_ALU_XOR_X:
- 				seen |= SEEN_XREG;
- 				EMIT2(0x31, 0xd8);		/* xor %ebx,%eax */
- 				break;
- 			case BPF_S_ALU_XOR_K: /* A ^= K; */
- 				if (K == 0)
- 					break;
- 				if (is_imm8(K))
- 					EMIT3(0x83, 0xf0, K);	/* xor imm8,%eax */
- 				else
- 					EMIT1_off32(0x35, K);	/* xor imm32,%eax */
- 				break;
- 			case BPF_S_ALU_LSH_X: /* A <<= X; */
- 				seen |= SEEN_XREG;
- 				EMIT4(0x89, 0xd9, 0xd3, 0xe0);	/* mov %ebx,%ecx; shl %cl,%eax */
- 				break;
- 			case BPF_S_ALU_LSH_K:
- 				if (K == 0)
- 					break;
- 				else if (K == 1)
- 					EMIT2(0xd1, 0xe0); /* shl %eax */
- 				else
- 					EMIT3(0xc1, 0xe0, K);
- 				break;
- 			case BPF_S_ALU_RSH_X: /* A >>= X; */
- 				seen |= SEEN_XREG;
- 				EMIT4(0x89, 0xd9, 0xd3, 0xe8);	/* mov %ebx,%ecx; shr %cl,%eax */
- 				break;
- 			case BPF_S_ALU_RSH_K: /* A >>= K; */
- 				if (K == 0)
- 					break;
- 				else if (K == 1)
- 					EMIT2(0xd1, 0xe8); /* shr %eax */
- 				else
- 					EMIT3(0xc1, 0xe8, K);
- 				break;
- 			case BPF_S_ALU_NEG:
- 				EMIT2(0xf7, 0xd8);		/* neg %eax */
- 				break;
- 			case BPF_S_RET_K:
- 				if (!K) {
- 					if (pc_ret0 == -1)
- 						pc_ret0 = i;
- 					CLEAR_A();
- 				} else {
- 					EMIT1_off32(0xb8, K);	/* mov $imm32,%eax */
- 				}
- 				/* fallinto */
- 			case BPF_S_RET_A:
- 				if (seen_or_pass0) {
- 					if (i != flen - 1) {
- 						EMIT_JMP(cleanup_addr - addrs[i]);
- 						break;
- 					}
- 					if (seen_or_pass0 & SEEN_XREG)
- 						EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
- 					EMIT1(0xc9);		/* leaveq */
- 				}
- 				EMIT1(0xc3);		/* ret */
- 				break;
- 			case BPF_S_MISC_TAX: /* X = A */
- 				seen |= SEEN_XREG;
- 				EMIT2(0x89, 0xc3);	/* mov    %eax,%ebx */
- 				break;
- 			case BPF_S_MISC_TXA: /* A = X */
- 				seen |= SEEN_XREG;
- 				EMIT2(0x89, 0xd8);	/* mov    %ebx,%eax */
- 				break;
- 			case BPF_S_LD_IMM: /* A = K */
- 				if (!K)
- 					CLEAR_A();
- 				else
- 					EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
- 				break;
- 			case BPF_S_LDX_IMM: /* X = K */
- 				seen |= SEEN_XREG;
- 				if (!K)
- 					CLEAR_X();
+ 			/* shifts */
+ 		case BPF_ALU | BPF_LSH | BPF_K:
+ 		case BPF_ALU | BPF_RSH | BPF_K:
+ 		case BPF_ALU | BPF_ARSH | BPF_K:
+ 		case BPF_ALU64 | BPF_LSH | BPF_K:
+ 		case BPF_ALU64 | BPF_RSH | BPF_K:
+ 		case BPF_ALU64 | BPF_ARSH | BPF_K:
+ 			if (BPF_CLASS(insn->code) == BPF_ALU64)
+ 				EMIT1(add_1mod(0x48, a_reg));
+ 			else if (is_ereg(a_reg))
+ 				EMIT1(add_1mod(0x40, a_reg));
+ 
+ 			switch (BPF_OP(insn->code)) {
+ 			case BPF_LSH: b3 = 0xE0; break;
+ 			case BPF_RSH: b3 = 0xE8; break;
+ 			case BPF_ARSH: b3 = 0xF8; break;
+ 			}
+ 			EMIT3(0xC1, add_1reg(b3, a_reg), K);
+ 			break;
+ 
+ 		case BPF_ALU | BPF_END | BPF_FROM_BE:
+ 			switch (K) {
+ 			case 16:
+ 				/* emit 'ror %ax, 8' to swap lower 2 bytes */
+ 				EMIT1(0x66);
+ 				if (is_ereg(a_reg))
+ 					EMIT1(0x41);
+ 				EMIT3(0xC1, add_1reg(0xC8, a_reg), 8);
+ 				break;
+ 			case 32:
+ 				/* emit 'bswap eax' to swap lower 4 bytes */
+ 				if (is_ereg(a_reg))
+ 					EMIT2(0x41, 0x0F);
  				else
- 					EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
- 				break;
- 			case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
- 				seen |= SEEN_MEM;
- 				EMIT3(0x8b, 0x45, 0xf0 - K*4);
- 				break;
- 			case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
- 				seen |= SEEN_XREG | SEEN_MEM;
- 				EMIT3(0x8b, 0x5d, 0xf0 - K*4);
- 				break;
- 			case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
- 				seen |= SEEN_MEM;
- 				EMIT3(0x89, 0x45, 0xf0 - K*4);
- 				break;
- 			case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
- 				seen |= SEEN_XREG | SEEN_MEM;
- 				EMIT3(0x89, 0x5d, 0xf0 - K*4);
- 				break;
- 			case BPF_S_LD_W_LEN: /*	A = skb->len; */
- 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
- 				if (is_imm8(offsetof(struct sk_buff, len)))
- 					/* mov    off8(%rdi),%eax */
- 					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
- 				else {
- 					EMIT2(0x8b, 0x87);
- 					EMIT(offsetof(struct sk_buff, len), 4);
- 				}
- 				break;
- 			case BPF_S_LDX_W_LEN: /* X = skb->len; */
- 				seen |= SEEN_XREG;
- 				if (is_imm8(offsetof(struct sk_buff, len)))
- 					/* mov off8(%rdi),%ebx */
- 					EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
- 				else {
- 					EMIT2(0x8b, 0x9f);
- 					EMIT(offsetof(struct sk_buff, len), 4);
- 				}
- 				break;
- 			case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
- 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
- 				if (is_imm8(offsetof(struct sk_buff, protocol))) {
- 					/* movzwl off8(%rdi),%eax */
- 					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
- 				} else {
- 					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
- 					EMIT(offsetof(struct sk_buff, protocol), 4);
- 				}
- 				EMIT2(0x86, 0xc4); /* ntohs() : xchg   %al,%ah */
- 				break;
- 			case BPF_S_ANC_IFINDEX:
- 				if (is_imm8(offsetof(struct sk_buff, dev))) {
- 					/* movq off8(%rdi),%rax */
- 					EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
- 				} else {
- 					EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
- 					EMIT(offsetof(struct sk_buff, dev), 4);
- 				}
- 				EMIT3(0x48, 0x85, 0xc0);	/* test %rax,%rax */
- 				EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
- 				BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
- 				EMIT2(0x8b, 0x80);	/* mov off32(%rax),%eax */
- 				EMIT(offsetof(struct net_device, ifindex), 4);
+ 					EMIT1(0x0F);
+ 				EMIT1(add_1reg(0xC8, a_reg));
  				break;
- 			case BPF_S_ANC_MARK:
- 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
- 				if (is_imm8(offsetof(struct sk_buff, mark))) {
- 					/* mov off8(%rdi),%eax */
- 					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
- 				} else {
- 					EMIT2(0x8b, 0x87);
- 					EMIT(offsetof(struct sk_buff, mark), 4);
- 				}
- 				break;
- 			case BPF_S_ANC_RXHASH:
- 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- 				if (is_imm8(offsetof(struct sk_buff, hash))) {
- 					/* mov off8(%rdi),%eax */
- 					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash));
- 				} else {
- 					EMIT2(0x8b, 0x87);
- 					EMIT(offsetof(struct sk_buff, hash), 4);
- 				}
- 				break;
- 			case BPF_S_ANC_QUEUE:
- 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
- 				if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
- 					/* movzwl off8(%rdi),%eax */
- 					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
- 				} else {
- 					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
- 					EMIT(offsetof(struct sk_buff, queue_mapping), 4);
- 				}
- 				break;
- 			case BPF_S_ANC_CPU:
- #ifdef CONFIG_SMP
- 				EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
- 				EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
- #else
- 				CLEAR_A();
- #endif
- 				break;
- 			case BPF_S_ANC_VLAN_TAG:
- 			case BPF_S_ANC_VLAN_TAG_PRESENT:
- 				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- 				if (is_imm8(offsetof(struct sk_buff, vlan_tci))) {
- 					/* movzwl off8(%rdi),%eax */
- 					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci));
- 				} else {
- 					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
- 					EMIT(offsetof(struct sk_buff, vlan_tci), 4);
- 				}
- 				BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
- 				if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
- 					EMIT3(0x80, 0xe4, 0xef); /* and    $0xef,%ah */
- 				} else {
- 					EMIT3(0xc1, 0xe8, 0x0c); /* shr    $0xc,%eax */
- 					EMIT3(0x83, 0xe0, 0x01); /* and    $0x1,%eax */
- 				}
- 				break;
- 			case BPF_S_ANC_PKTTYPE:
- 			{
- 				int off = pkt_type_offset();
- 
- 				if (off < 0)
- 					goto out;
- 				if (is_imm8(off)) {
- 					/* movzbl off8(%rdi),%eax */
- 					EMIT4(0x0f, 0xb6, 0x47, off);
- 				} else {
- 					/* movbl off32(%rdi),%eax */
- 					EMIT3(0x0f, 0xb6, 0x87);
- 					EMIT(off, 4);
- 				}
- 				EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and    $0x7,%eax */
+ 			case 64:
+ 				/* emit 'bswap rax' to swap 8 bytes */
+ 				EMIT3(add_1mod(0x48, a_reg), 0x0F,
+ 				      add_1reg(0xC8, a_reg));
  				break;
  			}
- 			case BPF_S_LD_W_ABS:
- 				func = CHOOSE_LOAD_FUNC(K, sk_load_word);
- common_load:			seen |= SEEN_DATAREF;
- 				t_offset = func - (image + addrs[i]);
- 				EMIT1_off32(0xbe, K); /* mov imm32,%esi */
- 				EMIT1_off32(0xe8, t_offset); /* call */
- 				break;
- 			case BPF_S_LD_H_ABS:
- 				func = CHOOSE_LOAD_FUNC(K, sk_load_half);
- 				goto common_load;
- 			case BPF_S_LD_B_ABS:
- 				func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
- 				goto common_load;
- 			case BPF_S_LDX_B_MSH:
- 				func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
- 				seen |= SEEN_DATAREF | SEEN_XREG;
- 				t_offset = func - (image + addrs[i]);
- 				EMIT1_off32(0xbe, K);	/* mov imm32,%esi */
- 				EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
- 				break;
- 			case BPF_S_LD_W_IND:
- 				func = sk_load_word;
- common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
- 				t_offset = func - (image + addrs[i]);
- 				if (K) {
- 					if (is_imm8(K)) {
- 						EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
- 					} else {
- 						EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
- 						EMIT(K, 4);
- 					}
- 				} else {
- 					EMIT2(0x89,0xde); /* mov %ebx,%esi */
- 				}
- 				EMIT1_off32(0xe8, t_offset);	/* call sk_load_xxx_ind */
- 				break;
- 			case BPF_S_LD_H_IND:
- 				func = sk_load_half;
- 				goto common_load_ind;
- 			case BPF_S_LD_B_IND:
- 				func = sk_load_byte;
- 				goto common_load_ind;
- 			case BPF_S_JMP_JA:
- 				t_offset = addrs[i + K] - addrs[i];
- 				EMIT_JMP(t_offset);
- 				break;
- 			COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
- 			COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
- 			COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
- 			COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
- 			COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
- 			COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
- 			COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
- 			COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
- 
- cond_branch:			f_offset = addrs[i + filter[i].jf] - addrs[i];
- 				t_offset = addrs[i + filter[i].jt] - addrs[i];
- 
- 				/* same targets, can avoid doing the test :) */
- 				if (filter[i].jt == filter[i].jf) {
- 					EMIT_JMP(t_offset);
- 					break;
- 				}
+ 			break;
+ 
+ 		case BPF_ALU | BPF_END | BPF_FROM_LE:
+ 			break;
  
- 				switch (filter[i].code) {
- 				case BPF_S_JMP_JGT_X:
- 				case BPF_S_JMP_JGE_X:
- 				case BPF_S_JMP_JEQ_X:
- 					seen |= SEEN_XREG;
- 					EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
- 					break;
- 				case BPF_S_JMP_JSET_X:
- 					seen |= SEEN_XREG;
- 					EMIT2(0x85, 0xd8); /* test %ebx,%eax */
- 					break;
- 				case BPF_S_JMP_JEQ_K:
- 					if (K == 0) {
- 						EMIT2(0x85, 0xc0); /* test   %eax,%eax */
- 						break;
- 					}
- 				case BPF_S_JMP_JGT_K:
- 				case BPF_S_JMP_JGE_K:
- 					if (K <= 127)
- 						EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
+ 			/* ST: *(u8*)(a_reg + off) = imm */
+ 		case BPF_ST | BPF_MEM | BPF_B:
+ 			if (is_ereg(a_reg))
+ 				EMIT2(0x41, 0xC6);
+ 			else
+ 				EMIT1(0xC6);
+ 			goto st;
+ 		case BPF_ST | BPF_MEM | BPF_H:
+ 			if (is_ereg(a_reg))
+ 				EMIT3(0x66, 0x41, 0xC7);
+ 			else
+ 				EMIT2(0x66, 0xC7);
+ 			goto st;
+ 		case BPF_ST | BPF_MEM | BPF_W:
+ 			if (is_ereg(a_reg))
+ 				EMIT2(0x41, 0xC7);
+ 			else
+ 				EMIT1(0xC7);
+ 			goto st;
+ 		case BPF_ST | BPF_MEM | BPF_DW:
+ 			EMIT2(add_1mod(0x48, a_reg), 0xC7);
+ 
+ st:			if (is_imm8(insn->off))
+ 				EMIT2(add_1reg(0x40, a_reg), insn->off);
+ 			else
+ 				EMIT1_off32(add_1reg(0x80, a_reg), insn->off);
+ 
+ 			EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
+ 			break;
+ 
+ 			/* STX: *(u8*)(a_reg + off) = x_reg */
+ 		case BPF_STX | BPF_MEM | BPF_B:
+ 			/* emit 'mov byte ptr [rax + off], al' */
+ 			if (is_ereg(a_reg) || is_ereg(x_reg) ||
+ 			    /* have to add extra byte for x86 SIL, DIL regs */
+ 			    x_reg == BPF_REG_1 || x_reg == BPF_REG_2)
+ 				EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88);
+ 			else
+ 				EMIT1(0x88);
+ 			goto stx;
+ 		case BPF_STX | BPF_MEM | BPF_H:
+ 			if (is_ereg(a_reg) || is_ereg(x_reg))
+ 				EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89);
+ 			else
+ 				EMIT2(0x66, 0x89);
+ 			goto stx;
+ 		case BPF_STX | BPF_MEM | BPF_W:
+ 			if (is_ereg(a_reg) || is_ereg(x_reg))
+ 				EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89);
+ 			else
+ 				EMIT1(0x89);
+ 			goto stx;
+ 		case BPF_STX | BPF_MEM | BPF_DW:
+ 			EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89);
+ stx:			if (is_imm8(insn->off))
+ 				EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off);
+ 			else
+ 				EMIT1_off32(add_2reg(0x80, a_reg, x_reg),
+ 					    insn->off);
+ 			break;
+ 
+ 			/* LDX: a_reg = *(u8*)(x_reg + off) */
+ 		case BPF_LDX | BPF_MEM | BPF_B:
+ 			/* emit 'movzx rax, byte ptr [rax + off]' */
+ 			EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6);
+ 			goto ldx;
+ 		case BPF_LDX | BPF_MEM | BPF_H:
+ 			/* emit 'movzx rax, word ptr [rax + off]' */
+ 			EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7);
+ 			goto ldx;
+ 		case BPF_LDX | BPF_MEM | BPF_W:
+ 			/* emit 'mov eax, dword ptr [rax+0x14]' */
+ 			if (is_ereg(a_reg) || is_ereg(x_reg))
+ 				EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B);
+ 			else
+ 				EMIT1(0x8B);
+ 			goto ldx;
+ 		case BPF_LDX | BPF_MEM | BPF_DW:
+ 			/* emit 'mov rax, qword ptr [rax+0x14]' */
+ 			EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B);
+ ldx:			/* if insn->off == 0 we can save one extra byte, but
+ 			 * special case of x86 r13 which always needs an offset
+ 			 * is not worth the hassle
+ 			 */
+ 			if (is_imm8(insn->off))
+ 				EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off);
+ 			else
+ 				EMIT1_off32(add_2reg(0x80, x_reg, a_reg),
+ 					    insn->off);
+ 			break;
+ 
+ 			/* STX XADD: lock *(u32*)(a_reg + off) += x_reg */
+ 		case BPF_STX | BPF_XADD | BPF_W:
+ 			/* emit 'lock add dword ptr [rax + off], eax' */
+ 			if (is_ereg(a_reg) || is_ereg(x_reg))
+ 				EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01);
+ 			else
+ 				EMIT2(0xF0, 0x01);
+ 			goto xadd;
+ 		case BPF_STX | BPF_XADD | BPF_DW:
+ 			EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01);
+ xadd:			if (is_imm8(insn->off))
+ 				EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off);
+ 			else
+ 				EMIT1_off32(add_2reg(0x80, a_reg, x_reg),
+ 					    insn->off);
+ 			break;
+ 
+ 			/* call */
+ 		case BPF_JMP | BPF_CALL:
+ 			func = (u8 *) __bpf_call_base + K;
+ 			jmp_offset = func - (image + addrs[i]);
+ 			if (ctx->seen_ld_abs) {
+ 				EMIT2(0x41, 0x52); /* push %r10 */
+ 				EMIT2(0x41, 0x51); /* push %r9 */
+ 				/* need to adjust jmp offset, since
+ 				 * pop %r9, pop %r10 take 4 bytes after call insn
+ 				 */
+ 				jmp_offset += 4;
+ 			}
+ 			if (!K || !is_simm32(jmp_offset)) {
+ 				pr_err("unsupported bpf func %d addr %p image %p\n",
+ 				       K, func, image);
+ 				return -EINVAL;
+ 			}
+ 			EMIT1_off32(0xE8, jmp_offset);
+ 			if (ctx->seen_ld_abs) {
+ 				EMIT2(0x41, 0x59); /* pop %r9 */
+ 				EMIT2(0x41, 0x5A); /* pop %r10 */
+ 			}
+ 			break;
+ 
+ 			/* cond jump */
+ 		case BPF_JMP | BPF_JEQ | BPF_X:
+ 		case BPF_JMP | BPF_JNE | BPF_X:
+ 		case BPF_JMP | BPF_JGT | BPF_X:
+ 		case BPF_JMP | BPF_JGE | BPF_X:
+ 		case BPF_JMP | BPF_JSGT | BPF_X:
+ 		case BPF_JMP | BPF_JSGE | BPF_X:
+ 			/* cmp a_reg, x_reg */
+ 			EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39,
+ 			      add_2reg(0xC0, a_reg, x_reg));
+ 			goto emit_cond_jmp;
+ 
+ 		case BPF_JMP | BPF_JSET | BPF_X:
+ 			/* test a_reg, x_reg */
+ 			EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85,
+ 			      add_2reg(0xC0, a_reg, x_reg));
+ 			goto emit_cond_jmp;
+ 
+ 		case BPF_JMP | BPF_JSET | BPF_K:
+ 			/* test a_reg, imm32 */
+ 			EMIT1(add_1mod(0x48, a_reg));
+ 			EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K);
+ 			goto emit_cond_jmp;
+ 
+ 		case BPF_JMP | BPF_JEQ | BPF_K:
+ 		case BPF_JMP | BPF_JNE | BPF_K:
+ 		case BPF_JMP | BPF_JGT | BPF_K:
+ 		case BPF_JMP | BPF_JGE | BPF_K:
+ 		case BPF_JMP | BPF_JSGT | BPF_K:
+ 		case BPF_JMP | BPF_JSGE | BPF_K:
+ 			/* cmp a_reg, imm8/32 */
+ 			EMIT1(add_1mod(0x48, a_reg));
+ 
+ 			if (is_imm8(K))
+ 				EMIT3(0x83, add_1reg(0xF8, a_reg), K);
+ 			else
+ 				EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K);
+ 
+ emit_cond_jmp:		/* convert BPF opcode to x86 */
+ 			switch (BPF_OP(insn->code)) {
+ 			case BPF_JEQ:
+ 				jmp_cond = X86_JE;
+ 				break;
+ 			case BPF_JSET:
+ 			case BPF_JNE:
+ 				jmp_cond = X86_JNE;
+ 				break;
+ 			case BPF_JGT:
+ 				/* GT is unsigned '>', JA in x86 */
+ 				jmp_cond = X86_JA;
+ 				break;
+ 			case BPF_JGE:
+ 				/* GE is unsigned '>=', JAE in x86 */
+ 				jmp_cond = X86_JAE;
+ 				break;
+ 			case BPF_JSGT:
+ 				/* signed '>', GT in x86 */
+ 				jmp_cond = X86_JG;
+ 				break;
+ 			case BPF_JSGE:
+ 				/* signed '>=', GE in x86 */
+ 				jmp_cond = X86_JGE;
+ 				break;
+ 			default: /* to silence gcc warning */
+ 				return -EFAULT;
+ 			}
+ 			jmp_offset = addrs[i + insn->off] - addrs[i];
+ 			if (is_imm8(jmp_offset)) {
+ 				EMIT2(jmp_cond, jmp_offset);
+ 			} else if (is_simm32(jmp_offset)) {
+ 				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
+ 			} else {
+ 				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
+ 				return -EFAULT;
+ 			}
+ 
+ 			break;
+ 
+ 		case BPF_JMP | BPF_JA:
+ 			jmp_offset = addrs[i + insn->off] - addrs[i];
+ 			if (!jmp_offset)
+ 				/* optimize out nop jumps */
+ 				break;
+ emit_jmp:
+ 			if (is_imm8(jmp_offset)) {
+ 				EMIT2(0xEB, jmp_offset);
+ 			} else if (is_simm32(jmp_offset)) {
+ 				EMIT1_off32(0xE9, jmp_offset);
+ 			} else {
+ 				pr_err("jmp gen bug %llx\n", jmp_offset);
+ 				return -EFAULT;
+ 			}
+ 			break;
+ 
+ 		case BPF_LD | BPF_IND | BPF_W:
+ 			func = sk_load_word;
+ 			goto common_load;
+ 		case BPF_LD | BPF_ABS | BPF_W:
+ 			func = CHOOSE_LOAD_FUNC(K, sk_load_word);
+ common_load:		ctx->seen_ld_abs = true;
+ 			jmp_offset = func - (image + addrs[i]);
+ 			if (!func || !is_simm32(jmp_offset)) {
+ 				pr_err("unsupported bpf func %d addr %p image %p\n",
+ 				       K, func, image);
+ 				return -EINVAL;
+ 			}
+ 			if (BPF_MODE(insn->code) == BPF_ABS) {
+ 				/* mov %esi, imm32 */
+ 				EMIT1_off32(0xBE, K);
+ 			} else {
+ 				/* mov %rsi, x_reg */
+ 				EMIT_mov(BPF_REG_2, x_reg);
+ 				if (K) {
+ 					if (is_imm8(K))
+ 						/* add %esi, imm8 */
+ 						EMIT3(0x83, 0xC6, K);
  					else
- 						EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
- 					break;
- 				case BPF_S_JMP_JSET_K:
- 					if (K <= 0xFF)
- 						EMIT2(0xa8, K); /* test imm8,%al */
- 					else if (!(K & 0xFFFF00FF))
- 						EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
- 					else if (K <= 0xFFFF) {
- 						EMIT2(0x66, 0xa9); /* test imm16,%ax */
- 						EMIT(K, 2);
- 					} else {
- 						EMIT1_off32(0xa9, K); /* test imm32,%eax */
- 					}
- 					break;
+ 						/* add %esi, imm32 */
+ 						EMIT2_off32(0x81, 0xC6, K);
  				}
- 				if (filter[i].jt != 0) {
- 					if (filter[i].jf && f_offset)
- 						t_offset += is_near(f_offset) ? 2 : 5;
- 					EMIT_COND_JMP(t_op, t_offset);
- 					if (filter[i].jf)
- 						EMIT_JMP(f_offset);
- 					break;
- 				}
- 				EMIT_COND_JMP(f_op, f_offset);
- 				break;
- 			default:
- 				/* hmm, too complex filter, give up with jit compiler */
- 				goto out;
  			}
- 			ilen = prog - temp;
- 			if (image) {
- 				if (unlikely(proglen + ilen > oldproglen)) {
- 					pr_err("bpb_jit_compile fatal error\n");
- 					kfree(addrs);
- 					module_free(NULL, header);
- 					return;
- 				}
- 				memcpy(image + proglen, temp, ilen);
+ 			/* skb pointer is in R6 (%rbx), it will be copied into
+ 			 * %rdi if skb_copy_bits() call is necessary.
+ 			 * sk_load_* helpers also use %r10 and %r9d.
+ 			 * See bpf_jit.S
+ 			 */
+ 			EMIT1_off32(0xE8, jmp_offset); /* call */
+ 			break;
+ 
+ 		case BPF_LD | BPF_IND | BPF_H:
+ 			func = sk_load_half;
+ 			goto common_load;
+ 		case BPF_LD | BPF_ABS | BPF_H:
+ 			func = CHOOSE_LOAD_FUNC(K, sk_load_half);
+ 			goto common_load;
+ 		case BPF_LD | BPF_IND | BPF_B:
+ 			func = sk_load_byte;
+ 			goto common_load;
+ 		case BPF_LD | BPF_ABS | BPF_B:
+ 			func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
+ 			goto common_load;
+ 
+ 		case BPF_JMP | BPF_EXIT:
+ 			if (i != insn_cnt - 1) {
+ 				jmp_offset = ctx->cleanup_addr - addrs[i];
+ 				goto emit_jmp;
  			}
- 			proglen += ilen;
- 			addrs[i] = proglen;
- 			prog = temp;
+ 			/* update cleanup_addr */
+ 			ctx->cleanup_addr = proglen;
+ 			/* mov rbx, qword ptr [rbp-X] */
+ 			EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize);
+ 			/* mov r13, qword ptr [rbp-X] */
+ 			EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8);
+ 			/* mov r14, qword ptr [rbp-X] */
+ 			EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16);
+ 			/* mov r15, qword ptr [rbp-X] */
+ 			EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24);
+ 
+ 			EMIT1(0xC9); /* leave */
+ 			EMIT1(0xC3); /* ret */
+ 			break;
+ 
+ 		default:
+ 			/* By design x64 JIT should support all BPF instructions
+ 			 * This error will be seen if new instruction was added
+ 			 * to interpreter, but not to JIT
+ 			 * or if there is junk in sk_filter
+ 			 */
+ 			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
+ 			return -EINVAL;
  		}
- 		/* last bpf instruction is always a RET :
- 		 * use it to give the cleanup instruction(s) addr
- 		 */
- 		cleanup_addr = proglen - 1; /* ret */
- 		if (seen_or_pass0)
- 			cleanup_addr -= 1; /* leaveq */
- 		if (seen_or_pass0 & SEEN_XREG)
- 			cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
  
+ 		ilen = prog - temp;
+ 		if (image) {
+ 			if (unlikely(proglen + ilen > oldproglen)) {
+ 				pr_err("bpf_jit_compile fatal error\n");
+ 				return -EFAULT;
+ 			}
+ 			memcpy(image + proglen, temp, ilen);
+ 		}
+ 		proglen += ilen;
+ 		addrs[i] = proglen;
+ 		prog = temp;
+ 	}
+ 	return proglen;
+ }
+ 
+ void bpf_jit_compile(struct sk_filter *prog)
+ {
+ }
+ 
+ void bpf_int_jit_compile(struct sk_filter *prog)
+ {
+ 	struct bpf_binary_header *header = NULL;
+ 	int proglen, oldproglen = 0;
+ 	struct jit_context ctx = {};
+ 	u8 *image = NULL;
+ 	int *addrs;
+ 	int pass;
+ 	int i;
+ 
+ 	if (!bpf_jit_enable)
+ 		return;
+ 
+ 	if (!prog || !prog->len)
+ 		return;
+ 
+ 	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
+ 	if (!addrs)
+ 		return;
+ 
+ 	/* Before first pass, make a rough estimation of addrs[]
+ 	 * each bpf instruction is translated to less than 64 bytes
+ 	 */
+ 	for (proglen = 0, i = 0; i < prog->len; i++) {
+ 		proglen += 64;
+ 		addrs[i] = proglen;
+ 	}
+ 	ctx.cleanup_addr = proglen;
+ 
+ 	for (pass = 0; pass < 10; pass++) {
+ 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+ 		if (proglen <= 0) {
+ 			image = NULL;
+ 			if (header)
+ 				module_free(NULL, header);
+ 			goto out;
+ 		}
  		if (image) {
  			if (proglen != oldproglen)
- 				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
+ 				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
+ 				       proglen, oldproglen);
  			break;
  		}
  		if (proglen == oldproglen) {
@@@ -766,17 -918,16 +918,16 @@@
  	}
  
  	if (bpf_jit_enable > 1)
- 		bpf_jit_dump(flen, proglen, pass, image);
+ 		bpf_jit_dump(prog->len, proglen, 0, image);
  
  	if (image) {
  		bpf_flush_icache(header, image + proglen);
  		set_memory_ro((unsigned long)header, header->pages);
- 		fp->bpf_func = (void *)image;
- 		fp->jited = 1;
+ 		prog->bpf_func = (void *)image;
+ 		prog->jited = 1;
  	}
  out:
  	kfree(addrs);
- 	return;
  }
  
  static void bpf_jit_free_deferred(struct work_struct *work)
diff --combined drivers/net/bonding/bond_options.c
index 8320702,6dc49da..03bea52
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@@ -70,6 -70,8 +70,8 @@@ static int bond_option_mode_set(struct 
  				const struct bond_opt_value *newval);
  static int bond_option_slaves_set(struct bonding *bond,
  				  const struct bond_opt_value *newval);
+ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ 				  const struct bond_opt_value *newval);
  
  
  static const struct bond_opt_value bond_mode_tbl[] = {
@@@ -125,7 -127,6 +127,7 @@@ static const struct bond_opt_value bond
  static const struct bond_opt_value bond_intmax_tbl[] = {
  	{ "off",     0,       BOND_VALFLAG_DEFAULT},
  	{ "maxval",  INT_MAX, BOND_VALFLAG_MAX},
 +	{ NULL,      -1,      0}
  };
  
  static const struct bond_opt_value bond_lacp_rate_tbl[] = {
@@@ -180,6 -181,12 +182,12 @@@ static const struct bond_opt_value bond
  	{ NULL,      -1,      0},
  };
  
+ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = {
+ 	{ "off", 0,  0},
+ 	{ "on",  1,  BOND_VALFLAG_DEFAULT},
+ 	{ NULL,  -1, 0}
+ };
+ 
  static const struct bond_option bond_opts[] = {
  	[BOND_OPT_MODE] = {
  		.id = BOND_OPT_MODE,
@@@ -200,7 -207,7 +208,7 @@@
  	[BOND_OPT_XMIT_HASH] = {
  		.id = BOND_OPT_XMIT_HASH,
  		.name = "xmit_hash_policy",
- 		.desc = "balance-xor and 802.3ad hashing method",
+ 		.desc = "balance-xor, 802.3ad, and tlb hashing method",
  		.values = bond_xmit_hashtype_tbl,
  		.set = bond_option_xmit_hash_policy_set
  	},
@@@ -365,9 -372,33 +373,33 @@@
  		.flags = BOND_OPTFLAG_RAWVAL,
  		.set = bond_option_slaves_set
  	},
+ 	[BOND_OPT_TLB_DYNAMIC_LB] = {
+ 		.id = BOND_OPT_TLB_DYNAMIC_LB,
+ 		.name = "tlb_dynamic_lb",
+ 		.desc = "Enable dynamic flow shuffling",
+ 		.unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)),
+ 		.values = bond_tlb_dynamic_lb_tbl,
+ 		.flags = BOND_OPTFLAG_IFDOWN,
+ 		.set = bond_option_tlb_dynamic_lb_set,
+ 	},
  	{ }
  };
  
+ /* Searches for an option by name */
+ const struct bond_option *bond_opt_get_by_name(const char *name)
+ {
+ 	const struct bond_option *opt;
+ 	int option;
+ 
+ 	for (option = 0; option < BOND_OPT_LAST; option++) {
+ 		opt = bond_opt_get(option);
+ 		if (opt && !strcmp(opt->name, name))
+ 			return opt;
+ 	}
+ 
+ 	return NULL;
+ }
+ 
  /* Searches for a value in opt's values[] table */
  const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val)
  {
@@@ -746,6 -777,10 +778,10 @@@ static int bond_option_active_slave_set
  	return ret;
  }
  
+ /* There are two tricky bits here.  First, if MII monitoring is activated, then
+  * we must disable ARP monitoring.  Second, if the timer isn't running, we must
+  * start it.
+  */
  static int bond_option_miimon_set(struct bonding *bond,
  				  const struct bond_opt_value *newval)
  {
@@@ -784,6 -819,10 +820,10 @@@
  	return 0;
  }
  
+ /* Set up and down delays. These must be multiples of the
+  * MII monitoring value, and are stored internally as the multiplier.
+  * Thus, we must translate to MS for the real world.
+  */
  static int bond_option_updelay_set(struct bonding *bond,
  				   const struct bond_opt_value *newval)
  {
@@@ -842,6 -881,10 +882,10 @@@ static int bond_option_use_carrier_set(
  	return 0;
  }
  
+ /* There are two tricky bits here.  First, if ARP monitoring is activated, then
+  * we must disable MII monitoring.  Second, if the ARP timer isn't running,
+  * we must start it.
+  */
  static int bond_option_arp_interval_set(struct bonding *bond,
  					const struct bond_opt_value *newval)
  {
@@@ -1338,3 -1381,13 +1382,13 @@@ err_no_cmd
  	ret = -EPERM;
  	goto out;
  }
+ 
+ static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
+ 					  const struct bond_opt_value *newval)
+ {
+ 	pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
+ 		bond->dev->name, newval->string, newval->value);
+ 	bond->params.tlb_dynamic_lb = newval->value;
+ 
+ 	return 0;
+ }
diff --combined drivers/net/ethernet/altera/altera_sgdma.c
index 99cc56f,dbd40e1..5d2d911
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@@ -20,8 -20,8 +20,8 @@@
  #include "altera_sgdmahw.h"
  #include "altera_sgdma.h"
  
 -static void sgdma_setup_descrip(struct sgdma_descrip *desc,
 -				struct sgdma_descrip *ndesc,
 +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
 +				struct sgdma_descrip __iomem *ndesc,
  				dma_addr_t ndesc_phys,
  				dma_addr_t raddr,
  				dma_addr_t waddr,
@@@ -31,17 -31,17 +31,17 @@@
  				int wfixed);
  
  static int sgdma_async_write(struct altera_tse_private *priv,
 -			      struct sgdma_descrip *desc);
 +			      struct sgdma_descrip __iomem *desc);
  
  static int sgdma_async_read(struct altera_tse_private *priv);
  
  static dma_addr_t
  sgdma_txphysaddr(struct altera_tse_private *priv,
 -		 struct sgdma_descrip *desc);
 +		 struct sgdma_descrip __iomem *desc);
  
  static dma_addr_t
  sgdma_rxphysaddr(struct altera_tse_private *priv,
 -		 struct sgdma_descrip *desc);
 +		 struct sgdma_descrip __iomem *desc);
  
  static int sgdma_txbusy(struct altera_tse_private *priv);
  
@@@ -79,8 -79,7 +79,8 @@@ int sgdma_initialize(struct altera_tse_
  	priv->rxdescphys = (dma_addr_t) 0;
  	priv->txdescphys = (dma_addr_t) 0;
  
 -	priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
 +	priv->rxdescphys = dma_map_single(priv->device,
 +					  (void __force *)priv->rx_dma_desc,
  					  priv->rxdescmem, DMA_BIDIRECTIONAL);
  
  	if (dma_mapping_error(priv->device, priv->rxdescphys)) {
@@@ -89,8 -88,7 +89,8 @@@
  		return -EINVAL;
  	}
  
 -	priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
 +	priv->txdescphys = dma_map_single(priv->device,
 +					  (void __force *)priv->tx_dma_desc,
  					  priv->txdescmem, DMA_TO_DEVICE);
  
  	if (dma_mapping_error(priv->device, priv->txdescphys)) {
@@@ -100,8 -98,8 +100,8 @@@
  	}
  
  	/* Initialize descriptor memory to all 0's, sync memory to cache */
 -	memset(priv->tx_dma_desc, 0, priv->txdescmem);
 -	memset(priv->rx_dma_desc, 0, priv->rxdescmem);
 +	memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
 +	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  
  	dma_sync_single_for_device(priv->device, priv->txdescphys,
  				   priv->txdescmem, DMA_TO_DEVICE);
@@@ -128,15 -126,22 +128,15 @@@ void sgdma_uninitialize(struct altera_t
   */
  void sgdma_reset(struct altera_tse_private *priv)
  {
 -	u32 *ptxdescripmem = priv->tx_dma_desc;
 -	u32 txdescriplen   = priv->txdescmem;
 -	u32 *prxdescripmem = priv->rx_dma_desc;
 -	u32 rxdescriplen   = priv->rxdescmem;
 -	struct sgdma_csr *ptxsgdma = priv->tx_dma_csr;
 -	struct sgdma_csr *prxsgdma = priv->rx_dma_csr;
 -
  	/* Initialize descriptor memory to 0 */
 -	memset(ptxdescripmem, 0, txdescriplen);
 -	memset(prxdescripmem, 0, rxdescriplen);
 +	memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
 +	memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
  
 -	iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
 -	iowrite32(0, &ptxsgdma->control);
 +	csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control));
 +	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
  
 -	iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
 -	iowrite32(0, &prxsgdma->control);
 +	csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control));
 +	csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  }
  
  /* For SGDMA, interrupts remain enabled after initially enabling,
@@@ -162,14 -167,14 +162,14 @@@ void sgdma_disable_txirq(struct altera_
  
  void sgdma_clear_rxirq(struct altera_tse_private *priv)
  {
 -	struct sgdma_csr *csr = priv->rx_dma_csr;
 -	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
 +	tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control),
 +		    SGDMA_CTRLREG_CLRINT);
  }
  
  void sgdma_clear_txirq(struct altera_tse_private *priv)
  {
 -	struct sgdma_csr *csr = priv->tx_dma_csr;
 -	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
 +	tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control),
 +		    SGDMA_CTRLREG_CLRINT);
  }
  
  /* transmits buffer through SGDMA. Returns number of buffers
@@@ -179,11 -184,11 +179,10 @@@
   */
  int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
  {
- 	struct sgdma_descrip __iomem *descbase =
- 		(struct sgdma_descrip __iomem *)priv->tx_dma_desc;
 -	int pktstx = 0;
 -	struct sgdma_descrip *descbase = priv->tx_dma_desc;
++	struct sgdma_descrip __iomem *descbase = priv->tx_dma_desc;
  
 -	struct sgdma_descrip *cdesc = &descbase[0];
 -	struct sgdma_descrip *ndesc = &descbase[1];
 +	struct sgdma_descrip __iomem *cdesc = &descbase[0];
 +	struct sgdma_descrip __iomem *ndesc = &descbase[1];
  
  	/* wait 'til the tx sgdma is ready for the next transmit request */
  	if (sgdma_txbusy(priv))
@@@ -199,7 -204,7 +198,7 @@@
  			    0,				/* read fixed */
  			    SGDMA_CONTROL_WR_FIXED);	/* Generate SOP */
  
 -	pktstx = sgdma_async_write(priv, cdesc);
 +	sgdma_async_write(priv, cdesc);
  
  	/* enqueue the request to the pending transmit queue */
  	queue_tx(priv, buffer);
@@@ -213,10 -218,10 +212,10 @@@
  u32 sgdma_tx_completions(struct altera_tse_private *priv)
  {
  	u32 ready = 0;
 -	struct sgdma_descrip *desc = priv->tx_dma_desc;
  
  	if (!sgdma_txbusy(priv) &&
 -	    ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
 +	    ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control))
 +	     & SGDMA_CONTROL_HW_OWNED) == 0) &&
  	    (dequeue_tx(priv))) {
  		ready = 1;
  	}
@@@ -240,31 -245,32 +239,30 @@@ void sgdma_add_rx_desc(struct altera_ts
   */
  u32 sgdma_rx_status(struct altera_tse_private *priv)
  {
- 	struct sgdma_descrip __iomem *base =
- 		(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 -	struct sgdma_csr *csr = priv->rx_dma_csr;
 -	struct sgdma_descrip *base = priv->rx_dma_desc;
 -	struct sgdma_descrip *desc = NULL;
 -	int pktsrx;
 -	unsigned int rxstatus = 0;
 -	unsigned int pktlength = 0;
 -	unsigned int pktstatus = 0;
++	struct sgdma_descrip __iomem *base = priv->rx_dma_desc;
 +	struct sgdma_descrip __iomem *desc = NULL;
  	struct tse_buffer *rxbuffer = NULL;
 +	unsigned int rxstatus = 0;
  
 -	u32 sts = ioread32(&csr->status);
 +	u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
  
  	desc = &base[0];
  	if (sts & SGDMA_STSREG_EOP) {
 +		unsigned int pktlength = 0;
 +		unsigned int pktstatus = 0;
  		dma_sync_single_for_cpu(priv->device,
  					priv->rxdescphys,
  					priv->sgdmadesclen,
  					DMA_FROM_DEVICE);
  
 -		pktlength = desc->bytes_xferred;
 -		pktstatus = desc->status & 0x3f;
 -		rxstatus = pktstatus;
 +		pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
 +		pktstatus = csrrd8(desc, sgdma_descroffs(status));
 +		rxstatus = pktstatus & ~SGDMA_STATUS_EOP;
  		rxstatus = rxstatus << 16;
  		rxstatus |= (pktlength & 0xffff);
  
  		if (rxstatus) {
 -			desc->status = 0;
 +			csrwr8(0, desc, sgdma_descroffs(status));
  
  			rxbuffer = dequeue_rx(priv);
  			if (rxbuffer == NULL)
@@@ -272,12 -278,12 +270,12 @@@
  					    "sgdma rx and rx queue empty!\n");
  
  			/* Clear control */
 -			iowrite32(0, &csr->control);
 +			csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control));
  			/* clear status */
 -			iowrite32(0xf, &csr->status);
 +			csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
  
  			/* kick the rx sgdma after reaping this descriptor */
 -			pktsrx = sgdma_async_read(priv);
 +			sgdma_async_read(priv);
  
  		} else {
  			/* If the SGDMA indicated an end of packet on recv,
@@@ -291,11 -297,10 +289,11 @@@
  			 */
  			netdev_err(priv->dev,
  				   "SGDMA RX Error Info: %x, %x, %x\n",
 -				   sts, desc->status, rxstatus);
 +				   sts, csrrd8(desc, sgdma_descroffs(status)),
 +				   rxstatus);
  		}
  	} else if (sts == 0) {
 -		pktsrx = sgdma_async_read(priv);
 +		sgdma_async_read(priv);
  	}
  
  	return rxstatus;
@@@ -303,8 -308,8 +301,8 @@@
  
  
  /* Private functions */
 -static void sgdma_setup_descrip(struct sgdma_descrip *desc,
 -				struct sgdma_descrip *ndesc,
 +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc,
 +				struct sgdma_descrip __iomem *ndesc,
  				dma_addr_t ndesc_phys,
  				dma_addr_t raddr,
  				dma_addr_t waddr,
@@@ -314,30 -319,27 +312,30 @@@
  				int wfixed)
  {
  	/* Clear the next descriptor as not owned by hardware */
 -	u32 ctrl = ndesc->control;
 +
 +	u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control));
  	ctrl &= ~SGDMA_CONTROL_HW_OWNED;
 -	ndesc->control = ctrl;
 +	csrwr8(ctrl, ndesc, sgdma_descroffs(control));
  
 -	ctrl = 0;
  	ctrl = SGDMA_CONTROL_HW_OWNED;
  	ctrl |= generate_eop;
  	ctrl |= rfixed;
  	ctrl |= wfixed;
  
  	/* Channel is implicitly zero, initialized to 0 by default */
 -
 -	desc->raddr = raddr;
 -	desc->waddr = waddr;
 -	desc->next = lower_32_bits(ndesc_phys);
 -	desc->control = ctrl;
 -	desc->status = 0;
 -	desc->rburst = 0;
 -	desc->wburst = 0;
 -	desc->bytes = length;
 -	desc->bytes_xferred = 0;
 +	csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr));
 +	csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr));
 +
 +	csrwr32(0, desc, sgdma_descroffs(pad1));
 +	csrwr32(0, desc, sgdma_descroffs(pad2));
 +	csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next));
 +
 +	csrwr8(ctrl, desc, sgdma_descroffs(control));
 +	csrwr8(0, desc, sgdma_descroffs(status));
 +	csrwr8(0, desc, sgdma_descroffs(wburst));
 +	csrwr8(0, desc, sgdma_descroffs(rburst));
 +	csrwr16(length, desc, sgdma_descroffs(bytes));
 +	csrwr16(0, desc, sgdma_descroffs(bytes_xferred));
  }
  
  /* If hardware is busy, don't restart async read.
@@@ -348,11 -350,10 +346,10 @@@
   */
  static int sgdma_async_read(struct altera_tse_private *priv)
  {
- 	struct sgdma_descrip __iomem *descbase =
- 		(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
 -	struct sgdma_csr *csr = priv->rx_dma_csr;
 -	struct sgdma_descrip *descbase = priv->rx_dma_desc;
 -	struct sgdma_descrip *cdesc = &descbase[0];
 -	struct sgdma_descrip *ndesc = &descbase[1];
++	struct sgdma_descrip __iomem *descbase = priv->rx_dma_desc;
 +
 +	struct sgdma_descrip __iomem *cdesc = &descbase[0];
 +	struct sgdma_descrip __iomem *ndesc = &descbase[1];
  
  	struct tse_buffer *rxbuffer = NULL;
  
@@@ -378,13 -379,11 +375,13 @@@
  					   priv->sgdmadesclen,
  					   DMA_TO_DEVICE);
  
 -		iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
 -			  &csr->next_descrip);
 +		csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
 +			priv->rx_dma_csr,
 +			sgdma_csroffs(next_descrip));
  
 -		iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
 -			  &csr->control);
 +		csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START),
 +			priv->rx_dma_csr,
 +			sgdma_csroffs(control));
  
  		return 1;
  	}
@@@ -393,32 -392,32 +390,32 @@@
  }
  
  static int sgdma_async_write(struct altera_tse_private *priv,
 -			     struct sgdma_descrip *desc)
 +			     struct sgdma_descrip __iomem *desc)
  {
 -	struct sgdma_csr *csr = priv->tx_dma_csr;
 -
  	if (sgdma_txbusy(priv))
  		return 0;
  
  	/* clear control and status */
 -	iowrite32(0, &csr->control);
 -	iowrite32(0x1f, &csr->status);
 +	csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
 +	csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
  
  	dma_sync_single_for_device(priv->device, priv->txdescphys,
  				   priv->sgdmadesclen, DMA_TO_DEVICE);
  
 -	iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
 -		  &csr->next_descrip);
 +	csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
 +		priv->tx_dma_csr,
 +		sgdma_csroffs(next_descrip));
  
 -	iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
 -		  &csr->control);
 +	csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START),
 +		priv->tx_dma_csr,
 +		sgdma_csroffs(control));
  
  	return 1;
  }
  
  static dma_addr_t
  sgdma_txphysaddr(struct altera_tse_private *priv,
 -		 struct sgdma_descrip *desc)
 +		 struct sgdma_descrip __iomem *desc)
  {
  	dma_addr_t paddr = priv->txdescmem_busaddr;
  	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
@@@ -427,7 -426,7 +424,7 @@@
  
  static dma_addr_t
  sgdma_rxphysaddr(struct altera_tse_private *priv,
 -		 struct sgdma_descrip *desc)
 +		 struct sgdma_descrip __iomem *desc)
  {
  	dma_addr_t paddr = priv->rxdescmem_busaddr;
  	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
@@@ -516,8 -515,8 +513,8 @@@ queue_rx_peekhead(struct altera_tse_pri
   */
  static int sgdma_rxbusy(struct altera_tse_private *priv)
  {
 -	struct sgdma_csr *csr = priv->rx_dma_csr;
 -	return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
 +	return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
 +		       & SGDMA_STSREG_BUSY;
  }
  
  /* waits for the tx sgdma to finish it's current operation, returns 0
@@@ -526,14 -525,13 +523,14 @@@
  static int sgdma_txbusy(struct altera_tse_private *priv)
  {
  	int delay = 0;
 -	struct sgdma_csr *csr = priv->tx_dma_csr;
  
  	/* if DMA is busy, wait for current transactino to finish */
 -	while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
 +	while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
 +		& SGDMA_STSREG_BUSY) && (delay++ < 100))
  		udelay(1);
  
 -	if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
 +	if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
 +	    & SGDMA_STSREG_BUSY) {
  		netdev_err(priv->dev, "timeout waiting for tx dma\n");
  		return 1;
  	}
diff --combined drivers/net/ethernet/altera/altera_tse_ethtool.c
index 54c25ef,d817e28..be72e1e
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@@ -96,89 -96,54 +96,89 @@@ static void tse_fill_stats(struct net_d
  			   u64 *buf)
  {
  	struct altera_tse_private *priv = netdev_priv(dev);
 -	struct altera_tse_mac *mac = priv->mac_dev;
  	u64 ext;
  
 -	buf[0] = ioread32(&mac->frames_transmitted_ok);
 -	buf[1] = ioread32(&mac->frames_received_ok);
 -	buf[2] = ioread32(&mac->frames_check_sequence_errors);
 -	buf[3] = ioread32(&mac->alignment_errors);
 +	buf[0] = csrrd32(priv->mac_dev,
 +			 tse_csroffs(frames_transmitted_ok));
 +	buf[1] = csrrd32(priv->mac_dev,
 +			 tse_csroffs(frames_received_ok));
 +	buf[2] = csrrd32(priv->mac_dev,
 +			 tse_csroffs(frames_check_sequence_errors));
 +	buf[3] = csrrd32(priv->mac_dev,
 +			 tse_csroffs(alignment_errors));
  
  	/* Extended aOctetsTransmittedOK counter */
 -	ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
 -	ext |= ioread32(&mac->octets_transmitted_ok);
 +	ext = (u64) csrrd32(priv->mac_dev,
 +			    tse_csroffs(msb_octets_transmitted_ok)) << 32;
 +
 +	ext |= csrrd32(priv->mac_dev,
 +		       tse_csroffs(octets_transmitted_ok));
  	buf[4] = ext;
  
  	/* Extended aOctetsReceivedOK counter */
 -	ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
 -	ext |= ioread32(&mac->octets_received_ok);
 +	ext = (u64) csrrd32(priv->mac_dev,
 +			    tse_csroffs(msb_octets_received_ok)) << 32;
 +
 +	ext |= csrrd32(priv->mac_dev,
 +		       tse_csroffs(octets_received_ok));
  	buf[5] = ext;
  
 -	buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
 -	buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
 -	buf[8] = ioread32(&mac->if_in_errors);
 -	buf[9] = ioread32(&mac->if_out_errors);
 -	buf[10] = ioread32(&mac->if_in_ucast_pkts);
 -	buf[11] = ioread32(&mac->if_in_multicast_pkts);
 -	buf[12] = ioread32(&mac->if_in_broadcast_pkts);
 -	buf[13] = ioread32(&mac->if_out_discards);
 -	buf[14] = ioread32(&mac->if_out_ucast_pkts);
 -	buf[15] = ioread32(&mac->if_out_multicast_pkts);
 -	buf[16] = ioread32(&mac->if_out_broadcast_pkts);
 -	buf[17] = ioread32(&mac->ether_stats_drop_events);
 +	buf[6] = csrrd32(priv->mac_dev,
 +			 tse_csroffs(tx_pause_mac_ctrl_frames));
 +	buf[7] = csrrd32(priv->mac_dev,
 +			 tse_csroffs(rx_pause_mac_ctrl_frames));
 +	buf[8] = csrrd32(priv->mac_dev,
 +			 tse_csroffs(if_in_errors));
 +	buf[9] = csrrd32(priv->mac_dev,
 +			 tse_csroffs(if_out_errors));
 +	buf[10] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(if_in_ucast_pkts));
 +	buf[11] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(if_in_multicast_pkts));
 +	buf[12] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(if_in_broadcast_pkts));
 +	buf[13] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(if_out_discards));
 +	buf[14] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(if_out_ucast_pkts));
 +	buf[15] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(if_out_multicast_pkts));
 +	buf[16] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(if_out_broadcast_pkts));
 +	buf[17] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_drop_events));
  
  	/* Extended etherStatsOctets counter */
 -	ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
 -	ext |= ioread32(&mac->ether_stats_octets);
 +	ext = (u64) csrrd32(priv->mac_dev,
 +			    tse_csroffs(msb_ether_stats_octets)) << 32;
 +	ext |= csrrd32(priv->mac_dev,
 +		       tse_csroffs(ether_stats_octets));
  	buf[18] = ext;
  
 -	buf[19] = ioread32(&mac->ether_stats_pkts);
 -	buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
 -	buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
 -	buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
 -	buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
 -	buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
 -	buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
 -	buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
 -	buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
 -	buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
 -	buf[29] = ioread32(&mac->ether_stats_jabbers);
 -	buf[30] = ioread32(&mac->ether_stats_fragments);
 +	buf[19] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_pkts));
 +	buf[20] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_undersize_pkts));
 +	buf[21] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_oversize_pkts));
 +	buf[22] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_pkts_64_octets));
 +	buf[23] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_pkts_65to127_octets));
 +	buf[24] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_pkts_128to255_octets));
 +	buf[25] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_pkts_256to511_octets));
 +	buf[26] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_pkts_512to1023_octets));
 +	buf[27] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_pkts_1024to1518_octets));
 +	buf[28] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_pkts_1519tox_octets));
 +	buf[29] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_jabbers));
 +	buf[30] = csrrd32(priv->mac_dev,
 +			  tse_csroffs(ether_stats_fragments));
  }
  
  static int tse_sset_count(struct net_device *dev, int sset)
@@@ -213,6 -178,7 +213,6 @@@ static void tse_get_regs(struct net_dev
  {
  	int i;
  	struct altera_tse_private *priv = netdev_priv(dev);
 -	u32 *tse_mac_regs = (u32 *)priv->mac_dev;
  	u32 *buf = regbuf;
  
  	/* Set version to a known value, so ethtool knows
@@@ -230,7 -196,7 +230,7 @@@
  	regs->version = 1;
  
  	for (i = 0; i < TSE_NUM_REGS; i++)
 -		buf[i] = ioread32(&tse_mac_regs[i]);
 +		buf[i] = csrrd32(priv->mac_dev, i * 4);
  }
  
  static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@@ -271,5 -237,5 +271,5 @@@ static const struct ethtool_ops tse_eth
  
  void altera_tse_set_ethtool_ops(struct net_device *netdev)
  {
- 	SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+ 	netdev->ethtool_ops = &tse_ethtool_ops;
  }
diff --combined drivers/net/ethernet/emulex/benet/be_main.c
index dc19bc5,dcc5e5c..0640f12
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@@ -134,7 -134,7 +134,7 @@@ static void be_queue_free(struct be_ada
  }
  
  static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
- 		u16 len, u16 entry_size)
+ 			  u16 len, u16 entry_size)
  {
  	struct be_dma_mem *mem = &q->dma_mem;
  
@@@ -154,7 -154,7 +154,7 @@@ static void be_reg_intr_set(struct be_a
  	u32 reg, enabled;
  
  	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
- 				&reg);
+ 			      &reg);
  	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
  
  	if (!enabled && enable)
@@@ -165,7 -165,7 +165,7 @@@
  		return;
  
  	pci_write_config_dword(adapter->pdev,
- 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+ 			       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
  }
  
  static void be_intr_set(struct be_adapter *adapter, bool enable)
@@@ -206,12 -206,11 +206,11 @@@ static void be_txq_notify(struct be_ada
  }
  
  static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- 		bool arm, bool clear_int, u16 num_popped)
+ 			 bool arm, bool clear_int, u16 num_popped)
  {
  	u32 val = 0;
  	val |= qid & DB_EQ_RING_ID_MASK;
- 	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
- 			DB_EQ_RING_ID_EXT_MASK_SHIFT);
+ 	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
  
  	if (adapter->eeh_error)
  		return;
@@@ -477,7 -476,7 +476,7 @@@ static void populate_be_v2_stats(struc
  	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
  	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
  	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
- 	if (be_roce_supported(adapter))  {
+ 	if (be_roce_supported(adapter)) {
  		drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
  		drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
  		drvs->rx_roce_frames = port_stats->roce_frames_received;
@@@ -491,8 -490,7 +490,7 @@@ static void populate_lancer_stats(struc
  {
  
  	struct be_drv_stats *drvs = &adapter->drv_stats;
- 	struct lancer_pport_stats *pport_stats =
- 					pport_stats_from_cmd(adapter);
+ 	struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
  
  	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
  	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@@ -539,8 -537,7 +537,7 @@@ static void accumulate_16bit_val(u32 *a
  }
  
  static void populate_erx_stats(struct be_adapter *adapter,
- 			struct be_rx_obj *rxo,
- 			u32 erx_stat)
+ 			       struct be_rx_obj *rxo, u32 erx_stat)
  {
  	if (!BEx_chip(adapter))
  		rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@@ -579,7 -576,7 +576,7 @@@ void be_parse_stats(struct be_adapter *
  }
  
  static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
- 					struct rtnl_link_stats64 *stats)
+ 						struct rtnl_link_stats64 *stats)
  {
  	struct be_adapter *adapter = netdev_priv(netdev);
  	struct be_drv_stats *drvs = &adapter->drv_stats;
@@@ -660,7 -657,8 +657,8 @@@ void be_link_status_update(struct be_ad
  }
  
  static void be_tx_stats_update(struct be_tx_obj *txo,
- 			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+ 			       u32 wrb_cnt, u32 copied, u32 gso_segs,
+ 			       bool stopped)
  {
  	struct be_tx_stats *stats = tx_stats(txo);
  
@@@ -676,7 -674,7 +674,7 @@@
  
  /* Determine number of WRB entries needed to xmit data in an skb */
  static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- 								bool *dummy)
+ 			   bool *dummy)
  {
  	int cnt = (skb->len > skb->data_len);
  
@@@ -704,7 -702,7 +702,7 @@@ static inline void wrb_fill(struct be_e
  }
  
  static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
- 					struct sk_buff *skb)
+ 				     struct sk_buff *skb)
  {
  	u8 vlan_prio;
  	u16 vlan_tag;
@@@ -733,7 -731,8 +731,8 @@@ static u16 skb_ip_proto(struct sk_buff 
  }
  
  static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- 		struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
+ 			 struct sk_buff *skb, u32 wrb_cnt, u32 len,
+ 			 bool skip_hw_vlan)
  {
  	u16 vlan_tag, proto;
  
@@@ -774,7 -773,7 +773,7 @@@
  }
  
  static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
- 		bool unmap_single)
+ 			  bool unmap_single)
  {
  	dma_addr_t dma;
  
@@@ -791,8 -790,8 +790,8 @@@
  }
  
  static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- 		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
- 		bool skip_hw_vlan)
+ 			struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+ 			bool skip_hw_vlan)
  {
  	dma_addr_t busaddr;
  	int i, copied = 0;
@@@ -821,8 -820,7 +820,7 @@@
  	}
  
  	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- 		const struct skb_frag_struct *frag =
- 			&skb_shinfo(skb)->frags[i];
+ 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
  		busaddr = skb_frag_dma_map(dev, frag, 0,
  					   skb_frag_size(frag), DMA_TO_DEVICE);
  		if (dma_mapping_error(dev, busaddr))
@@@ -927,8 -925,7 +925,7 @@@ static int be_vlan_tag_tx_chk(struct be
  	return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
  }
  
- static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
- 				struct sk_buff *skb)
+ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
  {
  	return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
  }
@@@ -959,7 -956,7 +956,7 @@@ static struct sk_buff *be_lancer_xmit_w
  	 */
  	if (be_pvid_tagging_enabled(adapter) &&
  	    veh->h_vlan_proto == htons(ETH_P_8021Q))
- 			*skip_hw_vlan = true;
+ 		*skip_hw_vlan = true;
  
  	/* HW has a bug wherein it will calculate CSUM for VLAN
  	 * pkts even though it is disabled.
@@@ -1077,16 -1074,15 +1074,15 @@@ static int be_change_mtu(struct net_dev
  {
  	struct be_adapter *adapter = netdev_priv(netdev);
  	if (new_mtu < BE_MIN_MTU ||
- 			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
- 					(ETH_HLEN + ETH_FCS_LEN))) {
+ 	    new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
  		dev_info(&adapter->pdev->dev,
- 			"MTU must be between %d and %d bytes\n",
- 			BE_MIN_MTU,
- 			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ 			 "MTU must be between %d and %d bytes\n",
+ 			 BE_MIN_MTU,
+ 			 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
  		return -EINVAL;
  	}
  	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
- 			netdev->mtu, new_mtu);
+ 		 netdev->mtu, new_mtu);
  	netdev->mtu = new_mtu;
  	return 0;
  }
@@@ -1098,7 -1094,7 +1094,7 @@@
  static int be_vid_config(struct be_adapter *adapter)
  {
  	u16 vids[BE_NUM_VLANS_SUPPORTED];
- 	u16 num = 0, i;
+ 	u16 num = 0, i = 0;
  	int status = 0;
  
  	/* No need to further configure vids if in promiscuous mode */
@@@ -1109,13 -1105,10 +1105,10 @@@
  		goto set_vlan_promisc;
  
  	/* Construct VLAN Table to give to HW */
- 	for (i = 0; i < VLAN_N_VID; i++)
- 		if (adapter->vlan_tag[i])
- 			vids[num++] = cpu_to_le16(i);
- 
- 	status = be_cmd_vlan_config(adapter, adapter->if_handle,
- 				    vids, num, 0);
+ 	for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+ 		vids[num++] = cpu_to_le16(i);
  
+ 	status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
  	if (status) {
  		/* Set to VLAN promisc mode as setting VLAN filter failed */
  		if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
@@@ -1160,16 -1153,16 +1153,16 @@@ static int be_vlan_add_vid(struct net_d
  	if (lancer_chip(adapter) && vid == 0)
  		return status;
  
- 	if (adapter->vlan_tag[vid])
+ 	if (test_bit(vid, adapter->vids))
  		return status;
  
- 	adapter->vlan_tag[vid] = 1;
+ 	set_bit(vid, adapter->vids);
  	adapter->vlans_added++;
  
  	status = be_vid_config(adapter);
  	if (status) {
  		adapter->vlans_added--;
- 		adapter->vlan_tag[vid] = 0;
+ 		clear_bit(vid, adapter->vids);
  	}
  
  	return status;
@@@ -1184,12 -1177,12 +1177,12 @@@ static int be_vlan_rem_vid(struct net_d
  	if (lancer_chip(adapter) && vid == 0)
  		goto ret;
  
- 	adapter->vlan_tag[vid] = 0;
+ 	clear_bit(vid, adapter->vids);
  	status = be_vid_config(adapter);
  	if (!status)
  		adapter->vlans_added--;
  	else
- 		adapter->vlan_tag[vid] = 1;
+ 		set_bit(vid, adapter->vids);
  ret:
  	return status;
  }
@@@ -1254,8 -1247,10 +1247,10 @@@ static void be_set_rx_mode(struct net_d
  
  	/* Set to MCAST promisc mode if setting MULTICAST address fails */
  	if (status) {
- 		dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
- 		dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
+ 		dev_info(&adapter->pdev->dev,
+ 			 "Exhausted multicast HW filters.\n");
+ 		dev_info(&adapter->pdev->dev,
+ 			 "Disabling HW multicast filtering.\n");
  		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
  	}
  done:
@@@ -1287,7 -1282,7 +1282,7 @@@ static int be_set_vf_mac(struct net_dev
  
  	if (status)
  		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
- 				mac, vf);
+ 			mac, vf);
  	else
  		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
  
@@@ -1295,7 -1290,7 +1290,7 @@@
  }
  
  static int be_get_vf_config(struct net_device *netdev, int vf,
- 			struct ifla_vf_info *vi)
+ 			    struct ifla_vf_info *vi)
  {
  	struct be_adapter *adapter = netdev_priv(netdev);
  	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@@ -1316,8 -1311,7 +1311,7 @@@
  	return 0;
  }
  
- static int be_set_vf_vlan(struct net_device *netdev,
- 			int vf, u16 vlan, u8 qos)
+ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
  {
  	struct be_adapter *adapter = netdev_priv(netdev);
  	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@@ -1348,8 -1342,7 +1342,7 @@@
  	return status;
  }
  
- static int be_set_vf_tx_rate(struct net_device *netdev,
- 			int vf, int rate)
+ static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate)
  {
  	struct be_adapter *adapter = netdev_priv(netdev);
  	int status = 0;
@@@ -1369,7 -1362,7 +1362,7 @@@
  	status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
  	if (status)
  		dev_err(&adapter->pdev->dev,
- 				"tx rate %d on VF %d failed\n", rate, vf);
+ 			"tx rate %d on VF %d failed\n", rate, vf);
  	else
  		adapter->vf_cfg[vf].tx_rate = rate;
  	return status;
@@@ -1469,7 -1462,7 +1462,7 @@@ modify_eqd
  }
  
  static void be_rx_stats_update(struct be_rx_obj *rxo,
- 		struct be_rx_compl_info *rxcp)
+ 			       struct be_rx_compl_info *rxcp)
  {
  	struct be_rx_stats *stats = rx_stats(rxo);
  
@@@ -1566,7 -1559,8 +1559,8 @@@ static void skb_fill_rx_data(struct be_
  		skb_frag_set_page(skb, 0, page_info->page);
  		skb_shinfo(skb)->frags[0].page_offset =
  					page_info->page_offset + hdr_len;
- 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
+ 		skb_frag_size_set(&skb_shinfo(skb)->frags[0],
+ 				  curr_frag_len - hdr_len);
  		skb->data_len = curr_frag_len - hdr_len;
  		skb->truesize += rx_frag_size;
  		skb->tail += hdr_len;
@@@ -1725,8 -1719,8 +1719,8 @@@ static void be_parse_rx_compl_v1(struc
  	if (rxcp->vlanf) {
  		rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
  					  compl);
- 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
- 					       compl);
+ 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
+ 					       vlan_tag, compl);
  	}
  	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
  	rxcp->tunneled =
@@@ -1757,8 -1751,8 +1751,8 @@@ static void be_parse_rx_compl_v0(struc
  	if (rxcp->vlanf) {
  		rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
  					  compl);
- 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
- 					       compl);
+ 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+ 					       vlan_tag, compl);
  	}
  	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
  	rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@@ -1799,7 -1793,7 +1793,7 @@@ static struct be_rx_compl_info *be_rx_c
  			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
  
  		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
- 		    !adapter->vlan_tag[rxcp->vlan_tag])
+ 		    !test_bit(rxcp->vlan_tag, adapter->vids))
  			rxcp->vlanf = 0;
  	}
  
@@@ -1915,7 -1909,7 +1909,7 @@@ static struct be_eth_tx_compl *be_tx_co
  }
  
  static u16 be_tx_compl_process(struct be_adapter *adapter,
- 		struct be_tx_obj *txo, u16 last_index)
+ 			       struct be_tx_obj *txo, u16 last_index)
  {
  	struct be_queue_info *txq = &txo->q;
  	struct be_eth_wrb *wrb;
@@@ -2122,7 -2116,7 +2116,7 @@@ static int be_evt_queues_create(struct 
  
  		eq = &eqo->q;
  		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- 					sizeof(struct be_eq_entry));
+ 				    sizeof(struct be_eq_entry));
  		if (rc)
  			return rc;
  
@@@ -2155,7 -2149,7 +2149,7 @@@ static int be_mcc_queues_create(struct 
  
  	cq = &adapter->mcc_obj.cq;
  	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
- 			sizeof(struct be_mcc_compl)))
+ 			   sizeof(struct be_mcc_compl)))
  		goto err;
  
  	/* Use the default EQ for MCC completions */
@@@ -2275,7 -2269,7 +2269,7 @@@ static int be_rx_cqs_create(struct be_a
  		rxo->adapter = adapter;
  		cq = &rxo->cq;
  		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
- 				sizeof(struct be_eth_rx_compl));
+ 				    sizeof(struct be_eth_rx_compl));
  		if (rc)
  			return rc;
  
@@@ -2339,7 -2333,7 +2333,7 @@@ static inline bool do_gro(struct be_rx_
  }
  
  static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- 			int budget, int polling)
+ 			 int budget, int polling)
  {
  	struct be_adapter *adapter = rxo->adapter;
  	struct be_queue_info *rx_cq = &rxo->cq;
@@@ -2365,7 -2359,7 +2359,7 @@@
  		 * promiscuous mode on some skews
  		 */
  		if (unlikely(rxcp->port != adapter->port_num &&
- 				!lancer_chip(adapter))) {
+ 			     !lancer_chip(adapter))) {
  			be_rx_compl_discard(rxo, rxcp);
  			goto loop_continue;
  		}
@@@ -2405,8 -2399,9 +2399,9 @@@ static bool be_process_tx(struct be_ada
  		if (!txcp)
  			break;
  		num_wrbs += be_tx_compl_process(adapter, txo,
- 				AMAP_GET_BITS(struct amap_eth_tx_compl,
- 					wrb_index, txcp));
+ 						AMAP_GET_BITS(struct
+ 							      amap_eth_tx_compl,
+ 							      wrb_index, txcp));
  	}
  
  	if (work_done) {
@@@ -2416,7 -2411,7 +2411,7 @@@
  		/* As Tx wrbs have been freed up, wake up netdev queue
  		 * if it was stopped due to lack of tx wrbs.  */
  		if (__netif_subqueue_stopped(adapter->netdev, idx) &&
- 			atomic_read(&txo->q.used) < txo->q.len / 2) {
+ 		    atomic_read(&txo->q.used) < txo->q.len / 2) {
  			netif_wake_subqueue(adapter->netdev, idx);
  		}
  
@@@ -2510,9 -2505,9 +2505,9 @@@ void be_detect_error(struct be_adapter 
  		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
  		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
  			sliport_err1 = ioread32(adapter->db +
- 					SLIPORT_ERROR1_OFFSET);
+ 						SLIPORT_ERROR1_OFFSET);
  			sliport_err2 = ioread32(adapter->db +
- 					SLIPORT_ERROR2_OFFSET);
+ 						SLIPORT_ERROR2_OFFSET);
  			adapter->hw_error = true;
  			/* Do not log error messages if its a FW reset */
  			if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@@ -2531,13 -2526,13 +2526,13 @@@
  		}
  	} else {
  		pci_read_config_dword(adapter->pdev,
- 				PCICFG_UE_STATUS_LOW, &ue_lo);
+ 				      PCICFG_UE_STATUS_LOW, &ue_lo);
  		pci_read_config_dword(adapter->pdev,
- 				PCICFG_UE_STATUS_HIGH, &ue_hi);
+ 				      PCICFG_UE_STATUS_HIGH, &ue_hi);
  		pci_read_config_dword(adapter->pdev,
- 				PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+ 				      PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
  		pci_read_config_dword(adapter->pdev,
- 				PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ 				      PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
  
  		ue_lo = (ue_lo & ~ue_lo_mask);
  		ue_hi = (ue_hi & ~ue_hi_mask);
@@@ -2624,7 -2619,7 +2619,7 @@@ fail
  }
  
  static inline int be_msix_vec_get(struct be_adapter *adapter,
- 				struct be_eq_obj *eqo)
+ 				  struct be_eq_obj *eqo)
  {
  	return adapter->msix_entries[eqo->msix_idx].vector;
  }
@@@ -2648,7 -2643,7 +2643,7 @@@ err_msix
  	for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
  		free_irq(be_msix_vec_get(adapter, eqo), eqo);
  	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
- 		status);
+ 		 status);
  	be_msix_disable(adapter);
  	return status;
  }
@@@ -2774,7 -2769,8 +2769,8 @@@ static int be_rx_qs_create(struct be_ad
  {
  	struct be_rx_obj *rxo;
  	int rc, i, j;
- 	u8 rsstable[128];
+ 	u8 rss_hkey[RSS_HASH_KEY_LEN];
+ 	struct rss_info *rss = &adapter->rss_info;
  
  	for_all_rx_queues(adapter, rxo, i) {
  		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@@ -2799,31 -2795,36 +2795,36 @@@
  	}
  
  	if (be_multi_rxq(adapter)) {
- 		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ 		for (j = 0; j < RSS_INDIR_TABLE_LEN;
+ 			j += adapter->num_rx_qs - 1) {
  			for_all_rss_queues(adapter, rxo, i) {
- 				if ((j + i) >= 128)
+ 				if ((j + i) >= RSS_INDIR_TABLE_LEN)
  					break;
- 				rsstable[j + i] = rxo->rss_id;
+ 				rss->rsstable[j + i] = rxo->rss_id;
+ 				rss->rss_queue[j + i] = i;
  			}
  		}
- 		adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
- 					RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+ 		rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ 			RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
  
  		if (!BEx_chip(adapter))
- 			adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
- 						RSS_ENABLE_UDP_IPV6;
+ 			rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ 				RSS_ENABLE_UDP_IPV6;
  	} else {
  		/* Disable RSS, if only default RX Q is created */
- 		adapter->rss_flags = RSS_ENABLE_NONE;
+ 		rss->rss_flags = RSS_ENABLE_NONE;
  	}
  
- 	rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 			       128);
+ 	get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+ 	rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ 			       128, rss_hkey);
  	if (rc) {
- 		adapter->rss_flags = RSS_ENABLE_NONE;
+ 		rss->rss_flags = RSS_ENABLE_NONE;
  		return rc;
  	}
  
+ 	memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+ 
  	/* First time posting */
  	for_all_rx_queues(adapter, rxo, i)
  		be_post_rx_frags(rxo, GFP_KERNEL);
@@@ -2896,7 -2897,8 +2897,8 @@@ static int be_setup_wol(struct be_adapt
  
  	if (enable) {
  		status = pci_write_config_dword(adapter->pdev,
- 			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+ 						PCICFG_PM_CONTROL_OFFSET,
+ 						PCICFG_PM_CONTROL_MASK);
  		if (status) {
  			dev_err(&adapter->pdev->dev,
  				"Could not enable Wake-on-lan\n");
@@@ -2905,7 -2907,8 +2907,8 @@@
  			return status;
  		}
  		status = be_cmd_enable_magic_wol(adapter,
- 				adapter->netdev->dev_addr, &cmd);
+ 						 adapter->netdev->dev_addr,
+ 						 &cmd);
  		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
  		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
  	} else {
@@@ -2944,7 -2947,8 +2947,8 @@@ static int be_vf_eth_addr_config(struc
  
  		if (status)
  			dev_err(&adapter->pdev->dev,
- 			"Mac address assignment failed for VF %d\n", vf);
+ 				"Mac address assignment failed for VF %d\n",
+ 				vf);
  		else
  			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
  
@@@ -3086,9 -3090,11 +3090,11 @@@ static int be_vfs_if_create(struct be_a
  
  		/* If a FW profile exists, then cap_flags are updated */
  		en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- 			   BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
- 		status = be_cmd_if_create(adapter, cap_flags, en_flags,
- 					  &vf_cfg->if_handle, vf + 1);
+ 					BE_IF_FLAGS_BROADCAST |
+ 					BE_IF_FLAGS_MULTICAST);
+ 		status =
+ 		    be_cmd_if_create(adapter, cap_flags, en_flags,
+ 				     &vf_cfg->if_handle, vf + 1);
  		if (status)
  			goto err;
  	}
@@@ -3594,8 -3600,8 +3600,8 @@@ static void be_netpoll(struct net_devic
  static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
  
  static bool be_flash_redboot(struct be_adapter *adapter,
- 			const u8 *p, u32 img_start, int image_size,
- 			int hdr_size)
+ 			     const u8 *p, u32 img_start, int image_size,
+ 			     int hdr_size)
  {
  	u32 crc_offset;
  	u8 flashed_crc[4];
@@@ -3605,11 -3611,10 +3611,10 @@@
  
  	p += crc_offset;
  
- 	status = be_cmd_get_flash_crc(adapter, flashed_crc,
- 			(image_size - 4));
+ 	status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4));
  	if (status) {
  		dev_err(&adapter->pdev->dev,
- 		"could not get crc from flash, not flashing redboot\n");
+ 			"could not get crc from flash, not flashing redboot\n");
  		return false;
  	}
  
@@@ -3649,8 -3654,8 +3654,8 @@@ static bool is_comp_in_ufi(struct be_ad
  }
  
  static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
- 					 int header_size,
- 					 const struct firmware *fw)
+ 						int header_size,
+ 						const struct firmware *fw)
  {
  	struct flash_section_info *fsec = NULL;
  	const u8 *p = fw->data;
@@@ -3666,7 -3671,7 +3671,7 @@@
  }
  
  static int be_flash(struct be_adapter *adapter, const u8 *img,
- 		struct be_dma_mem *flash_cmd, int optype, int img_size)
+ 		    struct be_dma_mem *flash_cmd, int optype, int img_size)
  {
  	u32 total_bytes = 0, flash_op, num_bytes = 0;
  	int status = 0;
@@@ -3693,7 -3698,7 +3698,7 @@@
  		memcpy(req->data_buf, img, num_bytes);
  		img += num_bytes;
  		status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
- 						flash_op, num_bytes);
+ 					       flash_op, num_bytes);
  		if (status) {
  			if (status == ILLEGAL_IOCTL_REQ &&
  			    optype == OPTYPE_PHY_FW)
@@@ -3708,10 -3713,8 +3713,8 @@@
  
  /* For BE2, BE3 and BE3-R */
  static int be_flash_BEx(struct be_adapter *adapter,
- 			 const struct firmware *fw,
- 			 struct be_dma_mem *flash_cmd,
- 			 int num_of_images)
- 
+ 			const struct firmware *fw,
+ 			struct be_dma_mem *flash_cmd, int num_of_images)
  {
  	int status = 0, i, filehdr_size = 0;
  	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
@@@ -3793,8 -3796,10 +3796,10 @@@
  
  		if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
  			redboot = be_flash_redboot(adapter, fw->data,
- 				pflashcomp[i].offset, pflashcomp[i].size,
- 				filehdr_size + img_hdrs_size);
+ 						   pflashcomp[i].offset,
+ 						   pflashcomp[i].size,
+ 						   filehdr_size +
+ 						   img_hdrs_size);
  			if (!redboot)
  				continue;
  		}
@@@ -3805,7 -3810,7 +3810,7 @@@
  			return -1;
  
  		status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
- 					pflashcomp[i].size);
+ 				  pflashcomp[i].size);
  		if (status) {
  			dev_err(&adapter->pdev->dev,
  				"Flashing section type %d failed.\n",
@@@ -3817,8 -3822,8 +3822,8 @@@
  }
  
  static int be_flash_skyhawk(struct be_adapter *adapter,
- 		const struct firmware *fw,
- 		struct be_dma_mem *flash_cmd, int num_of_images)
+ 			    const struct firmware *fw,
+ 			    struct be_dma_mem *flash_cmd, int num_of_images)
  {
  	int status = 0, i, filehdr_size = 0;
  	int img_offset, img_size, img_optype, redboot;
@@@ -3866,8 -3871,9 +3871,9 @@@
  
  		if (img_optype == OPTYPE_REDBOOT) {
  			redboot = be_flash_redboot(adapter, fw->data,
- 					img_offset, img_size,
- 					filehdr_size + img_hdrs_size);
+ 						   img_offset, img_size,
+ 						   filehdr_size +
+ 						   img_hdrs_size);
  			if (!redboot)
  				continue;
  		}
@@@ -3889,7 -3895,7 +3895,7 @@@
  }
  
  static int lancer_fw_download(struct be_adapter *adapter,
- 				const struct firmware *fw)
+ 			      const struct firmware *fw)
  {
  #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
  #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
@@@ -3955,7 -3961,7 +3961,7 @@@
  	}
  
  	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- 				flash_cmd.dma);
+ 			  flash_cmd.dma);
  	if (status) {
  		dev_err(&adapter->pdev->dev,
  			"Firmware load error. "
@@@ -3976,9 -3982,8 +3982,8 @@@
  			goto lancer_fw_exit;
  		}
  	} else if (change_status != LANCER_NO_RESET_NEEDED) {
- 			dev_err(&adapter->pdev->dev,
- 				"System reboot required for new FW"
- 				" to be active\n");
+ 		dev_err(&adapter->pdev->dev,
+ 			"System reboot required for new FW to be active\n");
  	}
  
  	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@@ -4042,7 -4047,7 +4047,7 @@@ static int be_fw_download(struct be_ada
  			switch (ufi_type) {
  			case UFI_TYPE4:
  				status = be_flash_skyhawk(adapter, fw,
- 							&flash_cmd, num_imgs);
+ 							  &flash_cmd, num_imgs);
  				break;
  			case UFI_TYPE3R:
  				status = be_flash_BEx(adapter, fw, &flash_cmd,
@@@ -4112,8 -4117,7 +4117,7 @@@ fw_exit
  	return status;
  }
  
- static int be_ndo_bridge_setlink(struct net_device *dev,
- 				    struct nlmsghdr *nlh)
+ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
  {
  	struct be_adapter *adapter = netdev_priv(dev);
  	struct nlattr *attr, *br_spec;
@@@ -4155,8 -4159,7 +4159,7 @@@ err
  }
  
  static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- 				    struct net_device *dev,
- 				    u32 filter_mask)
+ 				 struct net_device *dev, u32 filter_mask)
  {
  	struct be_adapter *adapter = netdev_priv(dev);
  	int status = 0;
@@@ -4301,7 -4304,7 +4304,7 @@@ static void be_netdev_init(struct net_d
  
  	netdev->netdev_ops = &be_netdev_ops;
  
- 	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+ 	netdev->ethtool_ops = &be_ethtool_ops;
  }
  
  static void be_unmap_pci_bars(struct be_adapter *adapter)
@@@ -4870,7 -4873,7 +4873,7 @@@ static void be_shutdown(struct pci_dev 
  }
  
  static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
- 				pci_channel_state_t state)
+ 					    pci_channel_state_t state)
  {
  	struct be_adapter *adapter = pci_get_drvdata(pdev);
  	struct net_device *netdev =  adapter->netdev;
@@@ -4949,12 -4952,6 +4952,12 @@@ static void be_eeh_resume(struct pci_de
  	if (status)
  		goto err;
  
 +	/* On some BE3 FW versions, after a HW reset,
 +	 * interrupts will remain disabled for each function.
 +	 * So, explicitly enable interrupts
 +	 */
 +	be_intr_set(adapter, true);
 +
  	/* tell fw we're ready to fire cmds */
  	status = be_cmd_fw_init(adapter);
  	if (status)
diff --combined drivers/net/ethernet/mellanox/mlx4/cmd.c
index 92d3249,357dcb0..161bbc8
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@@ -212,8 -212,7 +212,7 @@@ static int mlx4_comm_cmd_poll(struct ml
  
  	/* First, verify that the master reports correct status */
  	if (comm_pending(dev)) {
- 		mlx4_warn(dev, "Communication channel is not idle."
- 			  "my toggle is %d (cmd:0x%x)\n",
+ 		mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
  			  priv->cmd.comm_toggle, cmd);
  		return -EAGAIN;
  	}
@@@ -422,9 -421,8 +421,8 @@@ static int mlx4_slave_cmd(struct mlx4_d
  					*out_param =
  						be64_to_cpu(vhcr->out_param);
  				else {
- 					mlx4_err(dev, "response expected while"
- 						 "output mailbox is NULL for "
- 						 "command 0x%x\n", op);
+ 					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ 						 op);
  					vhcr->status = CMD_STAT_BAD_PARAM;
  				}
  			}
@@@ -439,16 -437,15 +437,15 @@@
  					*out_param =
  						be64_to_cpu(vhcr->out_param);
  				else {
- 					mlx4_err(dev, "response expected while"
- 						 "output mailbox is NULL for "
- 						 "command 0x%x\n", op);
+ 					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ 						 op);
  					vhcr->status = CMD_STAT_BAD_PARAM;
  				}
  			}
  			ret = mlx4_status_to_errno(vhcr->status);
  		} else
- 			mlx4_err(dev, "failed execution of VHCR_POST command"
- 				 "opcode 0x%x\n", op);
+ 			mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
+ 				 op);
  	}
  
  	mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@@ -476,6 -473,13 +473,13 @@@ static int mlx4_cmd_poll(struct mlx4_de
  		goto out;
  	}
  
+ 	if (out_is_imm && !out_param) {
+ 		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ 			 op);
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+ 
  	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
  	if (err)
@@@ -554,6 -558,13 +558,13 @@@ static int mlx4_cmd_wait(struct mlx4_de
  	cmd->free_head = context->next;
  	spin_unlock(&cmd->context_lock);
  
+ 	if (out_is_imm && !out_param) {
+ 		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ 			 op);
+ 		err = -EINVAL;
+ 		goto out;
+ 	}
+ 
  	init_completion(&context->done);
  
  	mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@@ -625,9 -636,8 +636,8 @@@ static int mlx4_ACCESS_MEM(struct mlx4_
  
  	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
  	    (slave & ~0x7f) | (size & 0xff)) {
- 		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
- 			      "master_addr:0x%llx slave_id:%d size:%d\n",
- 			      slave_addr, master_addr, slave, size);
+ 		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
+ 			 slave_addr, master_addr, slave, size);
  		return -EINVAL;
  	}
  
@@@ -788,8 -798,7 +798,7 @@@ static int mlx4_MAD_IFC_wrapper(struct 
  	    ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
  	     (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
  	      smp->method == IB_MGMT_METHOD_SET))) {
- 		mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
- 			 "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
+ 		mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x for attr 0x%x - Rejecting\n",
  			 slave, smp->method, smp->mgmt_class,
  			 be16_to_cpu(smp->attr_id));
  		return -EPERM;
@@@ -1253,12 -1262,12 +1262,12 @@@ static struct mlx4_cmd_info cmd_info[] 
  	},
  	{
  		.opcode = MLX4_CMD_UPDATE_QP,
 -		.has_inbox = false,
 +		.has_inbox = true,
  		.has_outbox = false,
  		.out_is_imm = false,
  		.encode_slave_id = false,
  		.verify = NULL,
 -		.wrapper = mlx4_CMD_EPERM_wrapper
 +		.wrapper = mlx4_UPDATE_QP_wrapper
  	},
  	{
  		.opcode = MLX4_CMD_GET_OP_REQ,
@@@ -1409,8 -1418,8 +1418,8 @@@ static int mlx4_master_process_vhcr(str
  				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
  					    MLX4_ACCESS_MEM_ALIGN), 1);
  		if (ret) {
- 			mlx4_err(dev, "%s:Failed reading vhcr"
- 				 "ret: 0x%x\n", __func__, ret);
+ 			mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+ 				 __func__, ret);
  			kfree(vhcr);
  			return ret;
  		}
@@@ -1461,9 -1470,8 +1470,8 @@@
  
  	/* Apply permission and bound checks if applicable */
  	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
- 		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
- 			  "checks for resource_id:%d\n", vhcr->op, slave,
- 			  vhcr->in_modifier);
+ 		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
+ 			  vhcr->op, slave, vhcr->in_modifier);
  		vhcr_cmd->status = CMD_STAT_BAD_OP;
  		goto out_status;
  	}
@@@ -1502,8 -1510,7 +1510,7 @@@
  	}
  
  	if (err) {
- 		mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
- 			  " error:%d, status %d\n",
+ 		mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
  			  vhcr->op, slave, vhcr->errno, err);
  		vhcr_cmd->status = mlx4_errno_to_status(err);
  		goto out_status;
@@@ -1537,8 -1544,8 +1544,8 @@@ out_status
  				 __func__);
  		else if (vhcr->e_bit &&
  			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
- 				mlx4_warn(dev, "Failed to generate command completion "
- 					  "eqe for slave %d\n", slave);
+ 				mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
+ 					  slave);
  	}
  
  out:
@@@ -1577,8 -1584,9 +1584,9 @@@ static int mlx4_master_immediate_activa
  
  	mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
  		 slave, port);
- 	mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
- 		 vp_admin->default_qos, vp_admin->link_state);
+ 	mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
+ 		 vp_admin->default_vlan, vp_admin->default_qos,
+ 		 vp_admin->link_state);
  
  	work = kzalloc(sizeof(*work), GFP_KERNEL);
  	if (!work)
@@@ -1591,7 -1599,7 +1599,7 @@@
  						   &admin_vlan_ix);
  			if (err) {
  				kfree(work);
- 				mlx4_warn((&priv->dev),
+ 				mlx4_warn(&priv->dev,
  					  "No vlan resources slave %d, port %d\n",
  					  slave, port);
  				return err;
@@@ -1600,7 -1608,7 +1608,7 @@@
  			admin_vlan_ix = NO_INDX;
  		}
  		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
- 		mlx4_dbg((&(priv->dev)),
+ 		mlx4_dbg(&priv->dev,
  			 "alloc vlan %d idx  %d slave %d port %d\n",
  			 (int)(vp_admin->default_vlan),
  			 admin_vlan_ix, slave, port);
@@@ -1661,12 -1669,12 +1669,12 @@@ static int mlx4_master_activate_admin_s
  						   vp_admin->default_vlan, &(vp_oper->vlan_idx));
  			if (err) {
  				vp_oper->vlan_idx = NO_INDX;
- 				mlx4_warn((&priv->dev),
+ 				mlx4_warn(&priv->dev,
  					  "No vlan resorces slave %d, port %d\n",
  					  slave, port);
  				return err;
  			}
- 			mlx4_dbg((&(priv->dev)), "alloc vlan %d idx  %d slave %d port %d\n",
+ 			mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
  				 (int)(vp_oper->state.default_vlan),
  				 vp_oper->vlan_idx, slave, port);
  		}
@@@ -1677,12 -1685,12 +1685,12 @@@
  			if (0 > vp_oper->mac_idx) {
  				err = vp_oper->mac_idx;
  				vp_oper->mac_idx = NO_INDX;
- 				mlx4_warn((&priv->dev),
+ 				mlx4_warn(&priv->dev,
  					  "No mac resorces slave %d, port %d\n",
  					  slave, port);
  				return err;
  			}
- 			mlx4_dbg((&(priv->dev)), "alloc mac %llx idx  %d slave %d port %d\n",
+ 			mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
  				 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
  		}
  	}
@@@ -1731,8 -1739,8 +1739,8 @@@ static void mlx4_master_do_cmd(struct m
  	slave_state[slave].comm_toggle ^= 1;
  	reply = (u32) slave_state[slave].comm_toggle << 31;
  	if (toggle != slave_state[slave].comm_toggle) {
- 		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
- 			  "STATE COMPROMISIED ***\n", toggle, slave);
+ 		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
+ 			  toggle, slave);
  		goto reset_slave;
  	}
  	if (cmd == MLX4_COMM_CMD_RESET) {
@@@ -1759,8 -1767,8 +1767,8 @@@
  	/*command from slave in the middle of FLR*/
  	if (cmd != MLX4_COMM_CMD_RESET &&
  	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
- 		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
- 			  "in the middle of FLR\n", slave, cmd);
+ 		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
+ 			  slave, cmd);
  		return;
  	}
  
@@@ -1798,8 -1806,8 +1806,8 @@@
  
  		mutex_lock(&priv->cmd.slave_cmd_mutex);
  		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
- 			mlx4_err(dev, "Failed processing vhcr for slave:%d,"
- 				 " resetting slave.\n", slave);
+ 			mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
+ 				 slave);
  			mutex_unlock(&priv->cmd.slave_cmd_mutex);
  			goto reset_slave;
  		}
@@@ -1816,8 -1824,7 +1824,7 @@@
  		is_going_down = 1;
  	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
  	if (is_going_down) {
- 		mlx4_warn(dev, "Slave is going down aborting command(%d)"
- 			  " executing from slave:%d\n",
+ 		mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
  			  cmd, slave);
  		return;
  	}
@@@ -1880,9 -1887,8 +1887,8 @@@ void mlx4_master_comm_channel(struct wo
  			if (toggle != slt) {
  				if (master->slave_state[slave].comm_toggle
  				    != slt) {
- 					printk(KERN_INFO "slave %d out of sync."
- 					       " read toggle %d, state toggle %d. "
- 					       "Resynching.\n", slave, slt,
+ 					printk(KERN_INFO "slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+ 					       slave, slt,
  					       master->slave_state[slave].comm_toggle);
  					master->slave_state[slave].comm_toggle =
  						slt;
@@@ -1896,8 -1902,7 +1902,7 @@@
  	}
  
  	if (reported && reported != served)
- 		mlx4_warn(dev, "Got command event with bitmask from %d slaves"
- 			  " but %d were served\n",
+ 		mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
  			  reported, served);
  
  	if (mlx4_ARM_COMM_CHANNEL(dev))
@@@ -1953,7 -1958,7 +1958,7 @@@ int mlx4_multi_func_init(struct mlx4_de
  		ioremap(pci_resource_start(dev->pdev, 2) +
  			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
  	if (!priv->mfunc.comm) {
- 		mlx4_err(dev, "Couldn't map communication vector.\n");
+ 		mlx4_err(dev, "Couldn't map communication vector\n");
  		goto err_vhcr;
  	}
  
@@@ -2080,7 -2085,7 +2085,7 @@@ int mlx4_cmd_init(struct mlx4_dev *dev
  		priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
  					MLX4_HCR_BASE, MLX4_HCR_SIZE);
  		if (!priv->cmd.hcr) {
- 			mlx4_err(dev, "Couldn't map command register.\n");
+ 			mlx4_err(dev, "Couldn't map command register\n");
  			return -ENOMEM;
  		}
  	}
diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 212cea4,52c1e7d..9dd1b30
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@@ -216,18 -216,19 +216,19 @@@ extern int mlx4_debug_level
  #define mlx4_debug_level	(0)
  #endif /* CONFIG_MLX4_DEBUG */
  
- #define mlx4_dbg(mdev, format, arg...)					\
+ #define mlx4_dbg(mdev, format, ...)					\
  do {									\
  	if (mlx4_debug_level)						\
- 		dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+ 		dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format,	\
+ 			   ##__VA_ARGS__);				\
  } while (0)
  
- #define mlx4_err(mdev, format, arg...) \
- 	dev_err(&mdev->pdev->dev, format, ##arg)
- #define mlx4_info(mdev, format, arg...) \
- 	dev_info(&mdev->pdev->dev, format, ##arg)
- #define mlx4_warn(mdev, format, arg...) \
- 	dev_warn(&mdev->pdev->dev, format, ##arg)
+ #define mlx4_err(mdev, format, ...)					\
+ 	dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+ #define mlx4_info(mdev, format, ...)					\
+ 	dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+ #define mlx4_warn(mdev, format, ...)					\
+ 	dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
  
  extern int mlx4_log_num_mgm_entry_size;
  extern int log_mtts_per_seg;
@@@ -1195,12 -1196,6 +1196,12 @@@ int mlx4_QP_ATTACH_wrapper(struct mlx4_
  			   struct mlx4_cmd_mailbox *outbox,
  			   struct mlx4_cmd_info *cmd);
  
 +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
 +			   struct mlx4_vhcr *vhcr,
 +			   struct mlx4_cmd_mailbox *inbox,
 +			   struct mlx4_cmd_mailbox *outbox,
 +			   struct mlx4_cmd_info *cmd);
 +
  int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
  			 struct mlx4_vhcr *vhcr,
  			 struct mlx4_cmd_mailbox *inbox,
diff --combined drivers/net/ethernet/mellanox/mlx4/qp.c
index fbd32af,9bdb6ae..1d3234a
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@@ -264,8 -264,8 +264,8 @@@ void mlx4_qp_release_range(struct mlx4_
  			       MLX4_CMD_FREE_RES,
  			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  		if (err) {
- 			mlx4_warn(dev, "Failed to release qp range"
- 				  " base:%d cnt:%d\n", base_qpn, cnt);
+ 			mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
+ 				  base_qpn, cnt);
  		}
  	} else
  		 __mlx4_qp_release_range(dev, base_qpn, cnt);
@@@ -389,41 -389,6 +389,41 @@@ err_icm
  
  EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
  
 +#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
 +int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
 +		   enum mlx4_update_qp_attr attr,
 +		   struct mlx4_update_qp_params *params)
 +{
 +	struct mlx4_cmd_mailbox *mailbox;
 +	struct mlx4_update_qp_context *cmd;
 +	u64 pri_addr_path_mask = 0;
 +	int err = 0;
 +
 +	mailbox = mlx4_alloc_cmd_mailbox(dev);
 +	if (IS_ERR(mailbox))
 +		return PTR_ERR(mailbox);
 +
 +	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
 +
 +	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
 +		return -EINVAL;
 +
 +	if (attr & MLX4_UPDATE_QP_SMAC) {
 +		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
 +		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
 +	}
 +
 +	cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
 +
 +	err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0,
 +		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
 +		       MLX4_CMD_NATIVE);
 +
 +	mlx4_free_cmd_mailbox(dev, mailbox);
 +	return err;
 +}
 +EXPORT_SYMBOL_GPL(mlx4_update_qp);
 +
  void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
  {
  	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
@@@ -612,8 -577,7 +612,7 @@@ int mlx4_qp_to_ready(struct mlx4_dev *d
  		err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
  				     context, 0, 0, qp);
  		if (err) {
- 			mlx4_err(dev, "Failed to bring QP to state: "
- 				 "%d with error: %d\n",
+ 			mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
  				 states[i + 1], err);
  			return err;
  		}
diff --combined drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 8f1254a,a95df9d..ce0e249
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@@ -3880,7 -3880,7 +3880,7 @@@ static int add_eth_header(struct mlx4_d
  		}
  	}
  	if (!be_mac) {
- 		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+ 		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
  		       port);
  		return -EINVAL;
  	}
@@@ -3895,60 -3895,6 +3895,60 @@@
  
  }
  
 +#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
 +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
 +			   struct mlx4_vhcr *vhcr,
 +			   struct mlx4_cmd_mailbox *inbox,
 +			   struct mlx4_cmd_mailbox *outbox,
 +			   struct mlx4_cmd_info *cmd_info)
 +{
 +	int err;
 +	u32 qpn = vhcr->in_modifier & 0xffffff;
 +	struct res_qp *rqp;
 +	u64 mac;
 +	unsigned port;
 +	u64 pri_addr_path_mask;
 +	struct mlx4_update_qp_context *cmd;
 +	int smac_index;
 +
 +	cmd = (struct mlx4_update_qp_context *)inbox->buf;
 +
 +	pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
 +	if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
 +	    (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
 +		return -EPERM;
 +
 +	/* Just change the smac for the QP */
 +	err = get_res(dev, slave, qpn, RES_QP, &rqp);
 +	if (err) {
 +		mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
 +		return err;
 +	}
 +
 +	port = (rqp->sched_queue >> 6 & 1) + 1;
 +	smac_index = cmd->qp_context.pri_path.grh_mylmc;
 +	err = mac_find_smac_ix_in_slave(dev, slave, port,
 +					smac_index, &mac);
 +	if (err) {
 +		mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
 +			 qpn, smac_index);
 +		goto err_mac;
 +	}
 +
 +	err = mlx4_cmd(dev, inbox->dma,
 +		       vhcr->in_modifier, 0,
 +		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
 +		       MLX4_CMD_NATIVE);
 +	if (err) {
 +		mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
 +		goto err_mac;
 +	}
 +
 +err_mac:
 +	put_res(dev, slave, qpn, RES_QP);
 +	return err;
 +}
 +
  int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
  					 struct mlx4_vhcr *vhcr,
  					 struct mlx4_cmd_mailbox *inbox,
@@@ -3977,7 -3923,7 +3977,7 @@@
  	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
  	err = get_res(dev, slave, qpn, RES_QP, &rqp);
  	if (err) {
- 		pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ 		pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
  		return err;
  	}
  	rule_header = (struct _rule_hw *)(ctrl + 1);
@@@ -3995,7 -3941,7 +3995,7 @@@
  	case MLX4_NET_TRANS_RULE_ID_IPV4:
  	case MLX4_NET_TRANS_RULE_ID_TCP:
  	case MLX4_NET_TRANS_RULE_ID_UDP:
- 		pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+ 		pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
  		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
  			err = -EINVAL;
  			goto err_put;
@@@ -4004,7 -3950,7 +4004,7 @@@
  			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
  		break;
  	default:
- 		pr_err("Corrupted mailbox.\n");
+ 		pr_err("Corrupted mailbox\n");
  		err = -EINVAL;
  		goto err_put;
  	}
@@@ -4018,7 -3964,7 +4018,7 @@@
  
  	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
  	if (err) {
- 		mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ 		mlx4_err(dev, "Fail to add flow steering resources\n");
  		/* detach rule*/
  		mlx4_cmd(dev, vhcr->out_param, 0, 0,
  			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@@ -4056,7 -4002,7 +4056,7 @@@ int mlx4_QP_FLOW_STEERING_DETACH_wrappe
  
  	err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
  	if (err) {
- 		mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ 		mlx4_err(dev, "Fail to remove flow steering resources\n");
  		goto out;
  	}
  
@@@ -4185,8 -4131,8 +4185,8 @@@ static void rem_slave_qps(struct mlx4_d
  
  	err = move_all_busy(dev, slave, RES_QP);
  	if (err)
- 		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
- 			  "for slave %d\n", slave);
+ 		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
+ 			  slave);
  
  	spin_lock_irq(mlx4_tlock(dev));
  	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@@ -4224,10 -4170,8 +4224,8 @@@
  						       MLX4_CMD_TIME_CLASS_A,
  						       MLX4_CMD_NATIVE);
  					if (err)
- 						mlx4_dbg(dev, "rem_slave_qps: failed"
- 							 " to move slave %d qpn %d to"
- 							 " reset\n", slave,
- 							 qp->local_qpn);
+ 						mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
+ 							 slave, qp->local_qpn);
  					atomic_dec(&qp->rcq->ref_count);
  					atomic_dec(&qp->scq->ref_count);
  					atomic_dec(&qp->mtt->ref_count);
@@@ -4261,8 -4205,8 +4259,8 @@@ static void rem_slave_srqs(struct mlx4_
  
  	err = move_all_busy(dev, slave, RES_SRQ);
  	if (err)
- 		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
- 			  "busy for slave %d\n", slave);
+ 		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
+ 			  slave);
  
  	spin_lock_irq(mlx4_tlock(dev));
  	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@@ -4292,9 -4236,7 +4290,7 @@@
  						       MLX4_CMD_TIME_CLASS_A,
  						       MLX4_CMD_NATIVE);
  					if (err)
- 						mlx4_dbg(dev, "rem_slave_srqs: failed"
- 							 " to move slave %d srq %d to"
- 							 " SW ownership\n",
+ 						mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
  							 slave, srqn);
  
  					atomic_dec(&srq->mtt->ref_count);
@@@ -4329,8 -4271,8 +4325,8 @@@ static void rem_slave_cqs(struct mlx4_d
  
  	err = move_all_busy(dev, slave, RES_CQ);
  	if (err)
- 		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
- 			  "busy for slave %d\n", slave);
+ 		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
+ 			  slave);
  
  	spin_lock_irq(mlx4_tlock(dev));
  	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@@ -4360,9 -4302,7 +4356,7 @@@
  						       MLX4_CMD_TIME_CLASS_A,
  						       MLX4_CMD_NATIVE);
  					if (err)
- 						mlx4_dbg(dev, "rem_slave_cqs: failed"
- 							 " to move slave %d cq %d to"
- 							 " SW ownership\n",
+ 						mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
  							 slave, cqn);
  					atomic_dec(&cq->mtt->ref_count);
  					state = RES_CQ_ALLOCATED;
@@@ -4394,8 -4334,8 +4388,8 @@@ static void rem_slave_mrs(struct mlx4_d
  
  	err = move_all_busy(dev, slave, RES_MPT);
  	if (err)
- 		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
- 			  "busy for slave %d\n", slave);
+ 		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
+ 			  slave);
  
  	spin_lock_irq(mlx4_tlock(dev));
  	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@@ -4430,9 -4370,7 +4424,7 @@@
  						     MLX4_CMD_TIME_CLASS_A,
  						     MLX4_CMD_NATIVE);
  					if (err)
- 						mlx4_dbg(dev, "rem_slave_mrs: failed"
- 							 " to move slave %d mpt %d to"
- 							 " SW ownership\n",
+ 						mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
  							 slave, mptn);
  					if (mpt->mtt)
  						atomic_dec(&mpt->mtt->ref_count);
@@@ -4464,8 -4402,8 +4456,8 @@@ static void rem_slave_mtts(struct mlx4_
  
  	err = move_all_busy(dev, slave, RES_MTT);
  	if (err)
- 		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
- 			  "busy for slave %d\n", slave);
+ 		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
+ 			  slave);
  
  	spin_lock_irq(mlx4_tlock(dev));
  	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@@ -4567,8 -4505,8 +4559,8 @@@ static void rem_slave_eqs(struct mlx4_d
  
  	err = move_all_busy(dev, slave, RES_EQ);
  	if (err)
- 		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
- 			  "busy for slave %d\n", slave);
+ 		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
+ 			  slave);
  
  	spin_lock_irq(mlx4_tlock(dev));
  	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@@ -4600,9 -4538,8 +4592,8 @@@
  							   MLX4_CMD_TIME_CLASS_A,
  							   MLX4_CMD_NATIVE);
  					if (err)
- 						mlx4_dbg(dev, "rem_slave_eqs: failed"
- 							 " to move slave %d eqs %d to"
- 							 " SW ownership\n", slave, eqn);
+ 						mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
+ 							 slave, eqn);
  					mlx4_free_cmd_mailbox(dev, mailbox);
  					atomic_dec(&eq->mtt->ref_count);
  					state = RES_EQ_RESERVED;
@@@ -4631,8 -4568,8 +4622,8 @@@ static void rem_slave_counters(struct m
  
  	err = move_all_busy(dev, slave, RES_COUNTER);
  	if (err)
- 		mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
- 			  "busy for slave %d\n", slave);
+ 		mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+ 			  slave);
  
  	spin_lock_irq(mlx4_tlock(dev));
  	list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@@ -4662,8 -4599,8 +4653,8 @@@ static void rem_slave_xrcdns(struct mlx
  
  	err = move_all_busy(dev, slave, RES_XRCD);
  	if (err)
- 		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
- 			  "busy for slave %d\n", slave);
+ 		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
+ 			  slave);
  
  	spin_lock_irq(mlx4_tlock(dev));
  	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@@ -4808,10 -4745,8 +4799,8 @@@ void mlx4_vf_immed_vlan_work_handler(st
  				       0, MLX4_CMD_UPDATE_QP,
  				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
  			if (err) {
- 				mlx4_info(dev, "UPDATE_QP failed for slave %d, "
- 					  "port %d, qpn %d (%d)\n",
- 					  work->slave, port, qp->local_qpn,
- 					  err);
+ 				mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
+ 					  work->slave, port, qp->local_qpn, err);
  				errors++;
  			}
  		}
diff --combined drivers/net/macvlan.c
index d53e299,e03707d..b4a569a
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@@ -30,6 -30,7 +30,7 @@@
  #include <linux/if_link.h>
  #include <linux/if_macvlan.h>
  #include <linux/hash.h>
+ #include <linux/workqueue.h>
  #include <net/rtnetlink.h>
  #include <net/xfrm.h>
  
@@@ -40,10 -41,18 +41,18 @@@ struct macvlan_port 
  	struct hlist_head	vlan_hash[MACVLAN_HASH_SIZE];
  	struct list_head	vlans;
  	struct rcu_head		rcu;
+ 	struct sk_buff_head	bc_queue;
+ 	struct work_struct	bc_work;
  	bool 			passthru;
  	int			count;
  };
  
+ struct macvlan_skb_cb {
+ 	const struct macvlan_dev *src;
+ };
+ 
+ #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0]))
+ 
  static void macvlan_port_destroy(struct net_device *dev);
  
  static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
@@@ -120,7 -129,7 +129,7 @@@ static int macvlan_broadcast_one(struc
  	struct net_device *dev = vlan->dev;
  
  	if (local)
- 		return dev_forward_skb(dev, skb);
+ 		return __dev_forward_skb(dev, skb);
  
  	skb->dev = dev;
  	if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
@@@ -128,7 -137,7 +137,7 @@@
  	else
  		skb->pkt_type = PACKET_MULTICAST;
  
- 	return netif_rx(skb);
+ 	return 0;
  }
  
  static u32 macvlan_hash_mix(const struct macvlan_dev *vlan)
@@@ -175,32 -184,32 +184,32 @@@ static void macvlan_broadcast(struct sk
  			if (likely(nskb))
  				err = macvlan_broadcast_one(
  					nskb, vlan, eth,
- 					mode == MACVLAN_MODE_BRIDGE);
+ 					mode == MACVLAN_MODE_BRIDGE) ?:
+ 				      netif_rx_ni(nskb);
  			macvlan_count_rx(vlan, skb->len + ETH_HLEN,
  					 err == NET_RX_SUCCESS, 1);
  		}
  	}
  }
  
- /* called under rcu_read_lock() from netif_receive_skb */
- static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ static void macvlan_process_broadcast(struct work_struct *w)
  {
- 	struct macvlan_port *port;
- 	struct sk_buff *skb = *pskb;
- 	const struct ethhdr *eth = eth_hdr(skb);
- 	const struct macvlan_dev *vlan;
- 	const struct macvlan_dev *src;
- 	struct net_device *dev;
- 	unsigned int len = 0;
- 	int ret = NET_RX_DROP;
+ 	struct macvlan_port *port = container_of(w, struct macvlan_port,
+ 						 bc_work);
+ 	struct sk_buff *skb;
+ 	struct sk_buff_head list;
+ 
+ 	skb_queue_head_init(&list);
+ 
+ 	spin_lock_bh(&port->bc_queue.lock);
+ 	skb_queue_splice_tail_init(&port->bc_queue, &list);
+ 	spin_unlock_bh(&port->bc_queue.lock);
+ 
+ 	while ((skb = __skb_dequeue(&list))) {
+ 		const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+ 
+ 		rcu_read_lock();
  
- 	port = macvlan_port_get_rcu(skb->dev);
- 	if (is_multicast_ether_addr(eth->h_dest)) {
- 		skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
- 		if (!skb)
- 			return RX_HANDLER_CONSUMED;
- 		eth = eth_hdr(skb);
- 		src = macvlan_hash_lookup(port, eth->h_source);
  		if (!src)
  			/* frame comes from an external address */
  			macvlan_broadcast(skb, port, NULL,
@@@ -213,20 -222,80 +222,80 @@@
  			macvlan_broadcast(skb, port, src->dev,
  					  MACVLAN_MODE_VEPA |
  					  MACVLAN_MODE_BRIDGE);
- 		else if (src->mode == MACVLAN_MODE_BRIDGE)
+ 		else
  			/*
  			 * flood only to VEPA ports, bridge ports
  			 * already saw the frame on the way out.
  			 */
  			macvlan_broadcast(skb, port, src->dev,
  					  MACVLAN_MODE_VEPA);
- 		else {
+ 
+ 		rcu_read_unlock();
+ 
+ 		kfree_skb(skb);
+ 	}
+ }
+ 
+ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
+ 				      struct sk_buff *skb)
+ {
+ 	struct sk_buff *nskb;
+ 	int err = -ENOMEM;
+ 
+ 	nskb = skb_clone(skb, GFP_ATOMIC);
+ 	if (!nskb)
+ 		goto err;
+ 
+ 	spin_lock(&port->bc_queue.lock);
+ 	if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) {
+ 		__skb_queue_tail(&port->bc_queue, nskb);
+ 		err = 0;
+ 	}
+ 	spin_unlock(&port->bc_queue.lock);
+ 
+ 	if (err)
+ 		goto free_nskb;
+ 
+ 	schedule_work(&port->bc_work);
+ 	return;
+ 
+ free_nskb:
+ 	kfree_skb(nskb);
+ err:
+ 	atomic_long_inc(&skb->dev->rx_dropped);
+ }
+ 
+ /* called under rcu_read_lock() from netif_receive_skb */
+ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ {
+ 	struct macvlan_port *port;
+ 	struct sk_buff *skb = *pskb;
+ 	const struct ethhdr *eth = eth_hdr(skb);
+ 	const struct macvlan_dev *vlan;
+ 	const struct macvlan_dev *src;
+ 	struct net_device *dev;
+ 	unsigned int len = 0;
+ 	int ret = NET_RX_DROP;
+ 
+ 	port = macvlan_port_get_rcu(skb->dev);
+ 	if (is_multicast_ether_addr(eth->h_dest)) {
+ 		skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
+ 		if (!skb)
+ 			return RX_HANDLER_CONSUMED;
+ 		eth = eth_hdr(skb);
+ 		src = macvlan_hash_lookup(port, eth->h_source);
+ 		if (src && src->mode != MACVLAN_MODE_VEPA &&
+ 		    src->mode != MACVLAN_MODE_BRIDGE) {
  			/* forward to original port. */
  			vlan = src;
- 			ret = macvlan_broadcast_one(skb, vlan, eth, 0);
+ 			ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
+ 			      netif_rx(skb);
  			goto out;
  		}
  
+ 		MACVLAN_SKB_CB(skb)->src = src;
+ 		macvlan_broadcast_enqueue(port, skb);
+ 
  		return RX_HANDLER_PASS;
  	}
  
@@@ -458,10 -527,8 +527,10 @@@ static void macvlan_change_rx_flags(str
  	struct macvlan_dev *vlan = netdev_priv(dev);
  	struct net_device *lowerdev = vlan->lowerdev;
  
 -	if (change & IFF_ALLMULTI)
 -		dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
 +	if (dev->flags & IFF_UP) {
 +		if (change & IFF_ALLMULTI)
 +			dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
 +	}
  }
  
  static void macvlan_set_mac_lists(struct net_device *dev)
@@@ -517,11 -584,6 +586,11 @@@ static struct lock_class_key macvlan_ne
  #define MACVLAN_STATE_MASK \
  	((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
  
 +static int macvlan_get_nest_level(struct net_device *dev)
 +{
 +	return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
 +}
 +
  static void macvlan_set_lockdep_class_one(struct net_device *dev,
  					  struct netdev_queue *txq,
  					  void *_unused)
@@@ -532,9 -594,8 +601,9 @@@
  
  static void macvlan_set_lockdep_class(struct net_device *dev)
  {
 -	lockdep_set_class(&dev->addr_list_lock,
 -			  &macvlan_netdev_addr_lock_key);
 +	lockdep_set_class_and_subclass(&dev->addr_list_lock,
 +				       &macvlan_netdev_addr_lock_key,
 +				       macvlan_get_nest_level(dev));
  	netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
  }
  
@@@ -729,7 -790,6 +798,7 @@@ static const struct net_device_ops macv
  	.ndo_fdb_add		= macvlan_fdb_add,
  	.ndo_fdb_del		= macvlan_fdb_del,
  	.ndo_fdb_dump		= ndo_dflt_fdb_dump,
 +	.ndo_get_lock_subclass  = macvlan_get_nest_level,
  };
  
  void macvlan_common_setup(struct net_device *dev)
@@@ -770,6 -830,9 +839,9 @@@ static int macvlan_port_create(struct n
  	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
  		INIT_HLIST_HEAD(&port->vlan_hash[i]);
  
+ 	skb_queue_head_init(&port->bc_queue);
+ 	INIT_WORK(&port->bc_work, macvlan_process_broadcast);
+ 
  	err = netdev_rx_handler_register(dev, macvlan_handle_frame, port);
  	if (err)
  		kfree(port);
@@@ -782,6 -845,7 +854,7 @@@ static void macvlan_port_destroy(struc
  {
  	struct macvlan_port *port = macvlan_port_get_rtnl(dev);
  
+ 	cancel_work_sync(&port->bc_work);
  	dev->priv_flags &= ~IFF_MACVLAN_PORT;
  	netdev_rx_handler_unregister(dev);
  	kfree_rcu(port, rcu);
@@@ -858,7 -922,6 +931,7 @@@ int macvlan_common_newlink(struct net *
  	vlan->dev      = dev;
  	vlan->port     = port;
  	vlan->set_features = MACVLAN_FEATURES;
 +	vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
  
  	vlan->mode     = MACVLAN_MODE_VEPA;
  	if (data && data[IFLA_MACVLAN_MODE])
@@@ -1028,6 -1091,13 +1101,13 @@@ static int macvlan_device_event(struct 
  			netdev_update_features(vlan->dev);
  		}
  		break;
+ 	case NETDEV_CHANGEMTU:
+ 		list_for_each_entry(vlan, &port->vlans, list) {
+ 			if (vlan->dev->mtu <= dev->mtu)
+ 				continue;
+ 			dev_set_mtu(vlan->dev, dev->mtu);
+ 		}
+ 		break;
  	case NETDEV_UNREGISTER:
  		/* twiddle thumbs on netns device moves */
  		if (dev->reg_state != NETREG_UNREGISTERING)
diff --combined drivers/net/phy/phy_device.c
index 4987a1c,466ae3e..4383434
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@@ -614,8 -614,8 +614,8 @@@ int phy_attach_direct(struct net_devic
  	err = phy_init_hw(phydev);
  	if (err)
  		phy_detach(phydev);
 -
 -	phy_resume(phydev);
 +	else
 +		phy_resume(phydev);
  
  	return err;
  }
@@@ -1067,7 -1067,7 +1067,7 @@@ int genphy_soft_reset(struct phy_devic
  }
  EXPORT_SYMBOL(genphy_soft_reset);
  
- static int genphy_config_init(struct phy_device *phydev)
+ int genphy_config_init(struct phy_device *phydev)
  {
  	int val;
  	u32 features;
@@@ -1118,6 -1118,7 +1118,7 @@@ static int gen10g_soft_reset(struct phy
  	/* Do nothing for now */
  	return 0;
  }
+ EXPORT_SYMBOL(genphy_config_init);
  
  static int gen10g_config_init(struct phy_device *phydev)
  {
diff --combined drivers/net/wireless/iwlwifi/mvm/coex.c
index 0489314,8f4b03d..4284672
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@@ -104,11 -104,8 +104,8 @@@ static const u8 iwl_bt_prio_tbl[BT_COEX
  #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD	(-65)
  #define BT_ANTENNA_COUPLING_THRESHOLD		(30)
  
- int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+ static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
  {
- 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- 		return 0;
- 
  	return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
  				    sizeof(struct iwl_bt_coex_prio_tbl_cmd),
  				    &iwl_bt_prio_tbl);
@@@ -573,8 -570,9 +570,9 @@@ int iwl_send_bt_init_conf(struct iwl_mv
  	int ret;
  	u32 flags;
  
- 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- 		return 0;
+ 	ret = iwl_send_bt_prio_tbl(mvm);
+ 	if (ret)
+ 		return ret;
  
  	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
  	if (!bt_cmd)
@@@ -582,10 -580,12 +580,12 @@@
  	cmd.data[0] = bt_cmd;
  
  	bt_cmd->max_kill = 5;
- 	bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
- 	bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
- 	bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
- 	bt_cmd->bt4_tx_rx_max_freq0 = 15,
+ 	bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD;
+ 	bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
+ 	bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
+ 	bt_cmd->bt4_tx_rx_max_freq0 = 15;
+ 	bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
+ 	bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
  
  	flags = iwlwifi_mod_params.bt_coex_active ?
  			BT_COEX_NW : BT_COEX_DISABLE;
@@@ -611,14 -611,14 +611,14 @@@
  		bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
  
  	if (IWL_MVM_BT_COEX_CORUNNING) {
 -		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
 -						    BT_VALID_CORUN_LUT_40);
 +		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
 +						     BT_VALID_CORUN_LUT_40);
  		bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
  	}
  
  	if (IWL_MVM_BT_COEX_MPLUT) {
  		bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
 -		bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
 +		bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
  	}
  
  	if (mvm->cfg->bt_shared_single_ant)
@@@ -1215,6 -1215,17 +1215,17 @@@ bool iwl_mvm_bt_coex_is_mimo_allowed(st
  	return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
  }
  
+ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ 				    enum ieee80211_band band)
+ {
+ 	u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+ 
+ 	if (band != IEEE80211_BAND_2GHZ)
+ 		return false;
+ 
+ 	return bt_activity >= BT_LOW_TRAFFIC;
+ }
+ 
  u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
  			   struct ieee80211_tx_info *info, u8 ac)
  {
@@@ -1249,9 -1260,6 +1260,6 @@@
  
  void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
  {
- 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
- 		return;
- 
  	iwl_mvm_bt_coex_notif_handle(mvm);
  }
  
diff --combined drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index d73a89e,6174c02..6959fda
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@@ -169,8 -169,12 +169,12 @@@ enum iwl_scan_type 
  	SCAN_TYPE_DISCOVERY_FORCED	= 6,
  }; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
  
- /* Maximal number of channels to scan */
- #define MAX_NUM_SCAN_CHANNELS 0x24
+ /**
+  * Maximal number of channels to scan
+  * it should be equal to:
+  * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000)
+  */
+ #define MAX_NUM_SCAN_CHANNELS 50
  
  /**
   * struct iwl_scan_cmd - scan request command
@@@ -183,9 -187,9 +187,9 @@@
   *	this number of packets were received (typically 1)
   * @passive2active: is auto switching from passive to active during scan allowed
   * @rxchain_sel_flags: RXON_RX_CHAIN_*
 - * @max_out_time: in usecs, max out of serving channel time
 + * @max_out_time: in TUs, max out of serving channel time
   * @suspend_time: how long to pause scan when returning to service channel:
 - *	bits 0-19: beacon interal in usecs (suspend before executing)
 + *	bits 0-19: beacon interal in TUs (suspend before executing)
   *	bits 20-23: reserved
   *	bits 24-31: number of beacons (suspend between channels)
   * @rxon_flags: RXON_FLG_*
@@@ -383,8 -387,8 +387,8 @@@ enum scan_framework_client 
   * @quiet_plcp_th:	quiet channel num of packets threshold
   * @good_CRC_th:	passive to active promotion threshold
   * @rx_chain:		RXON rx chain.
 - * @max_out_time:	max uSec to be out of assoceated channel
 - * @suspend_time:	pause scan this long when returning to service channel
 + * @max_out_time:	max TUs to be out of assoceated channel
 + * @suspend_time:	pause scan this TUs when returning to service channel
   * @flags:		RXON flags
   * @filter_flags:	RXONfilter
   * @tx_cmd:		tx command for active scan; for 2GHz and for 5GHz.
@@@ -534,13 -538,16 +538,16 @@@ struct iwl_scan_offload_schedule 
   *
   * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering.
   * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan.
-  * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan
-  *	on A band.
+  * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical
+  *	beacon period. Finding channel activity in this mode is not guaranteed.
+  * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec.
+  *	Assuming beacon period is 100ms finding channel activity is guaranteed.
   */
  enum iwl_scan_offload_flags {
  	IWL_SCAN_OFFLOAD_FLAG_PASS_ALL		= BIT(0),
  	IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL	= BIT(2),
- 	IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN	= BIT(3),
+ 	IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE	= BIT(5),
+ 	IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE	= BIT(6),
  };
  
  /**
@@@ -563,17 -570,24 +570,24 @@@ enum iwl_scan_offload_compleate_status 
  	IWL_SCAN_OFFLOAD_ABORTED	= 2,
  };
  
+ enum iwl_scan_ebs_status {
+ 	IWL_SCAN_EBS_SUCCESS,
+ 	IWL_SCAN_EBS_FAILED,
+ 	IWL_SCAN_EBS_CHAN_NOT_FOUND,
+ };
+ 
  /**
   * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
   * @last_schedule_line:		last schedule line executed (fast or regular)
   * @last_schedule_iteration:	last scan iteration executed before scan abort
   * @status:			enum iwl_scan_offload_compleate_status
+  * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
   */
  struct iwl_scan_offload_complete {
  	u8 last_schedule_line;
  	u8 last_schedule_iteration;
  	u8 status;
- 	u8 reserved;
+ 	u8 ebs_status;
  } __packed;
  
  /**
diff --combined drivers/net/wireless/iwlwifi/mvm/mac80211.c
index b41dc84,97c3dea..32682ed
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@@ -276,6 -276,7 +276,7 @@@ int iwl_mvm_mac_setup_register(struct i
  		    IEEE80211_HW_AMPDU_AGGREGATION |
  		    IEEE80211_HW_TIMING_BEACON_ONLY |
  		    IEEE80211_HW_CONNECTION_MONITOR |
+ 		    IEEE80211_HW_SUPPORTS_UAPSD |
  		    IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
  		    IEEE80211_HW_SUPPORTS_STATIC_SMPS;
  
@@@ -285,6 -286,8 +286,8 @@@
  				    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
  	hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
  	hw->rate_control_algorithm = "iwl-mvm-rs";
+ 	hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+ 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
  
  	/*
  	 * Enable 11w if advertised by firmware and software crypto
@@@ -295,11 -298,9 +298,9 @@@
  	    !iwlwifi_mod_params.sw_crypto)
  		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
  
- 	if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
- 		hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
- 		hw->uapsd_queues = IWL_UAPSD_AC_INFO;
- 		hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- 	}
+ 	/* Disable uAPSD due to firmware issues */
+ 	if (true)
+ 		hw->flags &= ~IEEE80211_HW_SUPPORTS_UAPSD;
  
  	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
  	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
@@@ -309,11 -310,8 +310,8 @@@
  		BIT(NL80211_IFTYPE_P2P_CLIENT) |
  		BIT(NL80211_IFTYPE_AP) |
  		BIT(NL80211_IFTYPE_P2P_GO) |
- 		BIT(NL80211_IFTYPE_P2P_DEVICE);
- 
- 	/* IBSS has bugs in older versions */
- 	if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
- 		hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ 		BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ 		BIT(NL80211_IFTYPE_ADHOC);
  
  	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
  	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
@@@ -365,14 -363,11 +363,11 @@@
  	else
  		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
  
- 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
- 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
- 		hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
- 		hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
- 		/* we create the 802.11 header and zero length SSID IE. */
- 		hw->wiphy->max_sched_scan_ie_len =
- 					SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
- 	}
+ 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+ 	hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+ 	hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+ 	/* we create the 802.11 header and zero length SSID IE. */
+ 	hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
  
  	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
  			       NL80211_FEATURE_P2P_GO_OPPPS;
@@@ -386,7 -381,11 +381,11 @@@
  	}
  
  #ifdef CONFIG_PM_SLEEP
- 	if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+ 	if (iwl_mvm_is_d0i3_supported(mvm) &&
+ 	    device_can_wakeup(mvm->trans->dev)) {
+ 		mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+ 		hw->wiphy->wowlan = &mvm->wowlan;
+ 	} else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
  	    mvm->trans->ops->d3_suspend &&
  	    mvm->trans->ops->d3_resume &&
  	    device_can_wakeup(mvm->trans->dev)) {
@@@ -827,8 -826,7 +826,7 @@@ static int iwl_mvm_mac_add_interface(st
  		goto out_remove_mac;
  
  	if (!mvm->bf_allowed_vif &&
- 	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
- 	    mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
+ 	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
  		mvm->bf_allowed_vif = mvmvif;
  		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
  				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
@@@ -1007,7 -1005,7 +1005,7 @@@ static void iwl_mvm_mc_iface_iterator(v
  	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
  	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
  
 -	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd);
 +	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
  	if (ret)
  		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
  }
@@@ -1023,7 -1021,7 +1021,7 @@@ static void iwl_mvm_recalc_multicast(st
  	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
  		return;
  
 -	ieee80211_iterate_active_interfaces(
 +	ieee80211_iterate_active_interfaces_atomic(
  		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
  		iwl_mvm_mc_iface_iterator, &iter_data);
  }
@@@ -1223,6 -1221,10 +1221,10 @@@ static int iwl_mvm_configure_bcast_filt
  	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
  		return 0;
  
+ 	/* bcast filtering isn't supported for P2P client */
+ 	if (vif->p2p)
+ 		return 0;
+ 
  	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
  		return 0;
  
@@@ -1697,6 -1699,11 +1699,11 @@@ static int iwl_mvm_mac_sta_state(struc
  		ret = iwl_mvm_add_sta(mvm, vif, sta);
  	} else if (old_state == IEEE80211_STA_NONE &&
  		   new_state == IEEE80211_STA_AUTH) {
+ 		/*
+ 		 * EBS may be disabled due to previous failures reported by FW.
+ 		 * Reset EBS status here assuming environment has been changed.
+ 		 */
+ 		mvm->last_ebs_successful = true;
  		ret = 0;
  	} else if (old_state == IEEE80211_STA_AUTH &&
  		   new_state == IEEE80211_STA_ASSOC) {
@@@ -1807,11 -1814,6 +1814,11 @@@ static int iwl_mvm_mac_sched_scan_start
  
  	mutex_lock(&mvm->mutex);
  
 +	if (!iwl_mvm_is_idle(mvm)) {
 +		ret = -EBUSY;
 +		goto out;
 +	}
 +
  	switch (mvm->scan_status) {
  	case IWL_MVM_SCAN_OS:
  		IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
diff --combined drivers/net/wireless/iwlwifi/mvm/mvm.h
index f1ec098,17c42da..107d864
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@@ -164,7 -164,6 +164,6 @@@ enum iwl_dbgfs_pm_mask 
  	MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
  	MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
  	MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
- 	MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
  	MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
  	MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
  	MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
@@@ -177,7 -176,6 +176,6 @@@ struct iwl_dbgfs_pm 
  	u32 tx_data_timeout;
  	bool skip_over_dtim;
  	u8 skip_dtim_periods;
- 	bool disable_power_off;
  	bool lprx_ena;
  	u32 lprx_rssi_threshold;
  	bool snooze_ena;
@@@ -232,6 -230,7 +230,7 @@@ enum iwl_mvm_ref_type 
  	IWL_MVM_REF_USER,
  	IWL_MVM_REF_TX,
  	IWL_MVM_REF_TX_AGG,
+ 	IWL_MVM_REF_EXIT_WORK,
  
  	IWL_MVM_REF_COUNT,
  };
@@@ -265,6 -264,7 +264,7 @@@ struct iwl_mvm_vif_bf_data 
   * @uploaded: indicates the MAC context has been added to the device
   * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
   *	should get quota etc.
+  * @pm_enabled - Indicate if MAC power management is allowed
   * @monitor_active: indicates that monitor context is configured, and that the
   *	interface should get quota etc.
   * @low_latency: indicates that this interface is in low-latency mode
@@@ -283,6 -283,7 +283,7 @@@ struct iwl_mvm_vif 
  
  	bool uploaded;
  	bool ap_ibss_active;
+ 	bool pm_enabled;
  	bool monitor_active;
  	bool low_latency;
  	struct iwl_mvm_vif_bf_data bf_data;
@@@ -451,6 -452,11 +452,11 @@@ struct iwl_mvm_frame_stats 
  	int last_frame_idx;
  };
  
+ enum {
+ 	D0I3_DEFER_WAKEUP,
+ 	D0I3_PENDING_WAKEUP,
+ };
+ 
  struct iwl_mvm {
  	/* for logger access */
  	struct device *dev;
@@@ -535,6 -541,8 +541,8 @@@
  	/* Internal station */
  	struct iwl_mvm_int_sta aux_sta;
  
+ 	bool last_ebs_successful;
+ 
  	u8 scan_last_antenna_idx; /* to toggle TX between antennas */
  	u8 mgmt_last_antenna_idx;
  
@@@ -578,6 -586,8 +586,8 @@@
  	void *fw_error_dump;
  	void *fw_error_sram;
  	u32 fw_error_sram_len;
+ 	u32 *fw_error_rxf;
+ 	u32 fw_error_rxf_len;
  
  	struct led_classdev led;
  
@@@ -601,6 -611,9 +611,9 @@@
  	bool d0i3_offloading;
  	struct work_struct d0i3_exit_work;
  	struct sk_buff_head d0i3_tx;
+ 	/* protect d0i3_suspend_flags */
+ 	struct mutex d0i3_suspend_mutex;
+ 	unsigned long d0i3_suspend_flags;
  	/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
  	spinlock_t d0i3_tx_lock;
  	wait_queue_head_t d0i3_exit_waitq;
@@@ -629,8 -642,6 +642,6 @@@
  
  	/* Indicate if device power save is allowed */
  	bool ps_disabled;
- 	/* Indicate if device power management is allowed */
- 	bool pm_disabled;
  };
  
  /* Extract MVM priv from op_mode and _hw */
@@@ -705,6 -716,7 +716,7 @@@ void iwl_mvm_dump_nic_error_log(struct 
  #ifdef CONFIG_IWLWIFI_DEBUGFS
  void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
  void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
+ void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm);
  #endif
  u8 first_antenna(u8 mask);
  u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@@ -874,8 -886,6 +886,6 @@@ void iwl_mvm_update_frame_stats(struct 
  int rs_pretty_print_rate(char *buf, const u32 rate);
  
  /* power management */
- int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
- 
  int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
  int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
  int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@@ -922,9 -932,9 +932,9 @@@ int iwl_mvm_send_proto_offload(struct i
  void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
  void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
  void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
  
  /* BT Coex */
- int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
  int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
  int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
  			     struct iwl_rx_cmd_buffer *rxb,
@@@ -936,6 -946,8 +946,8 @@@ u16 iwl_mvm_coex_agg_time_limit(struct 
  				struct ieee80211_sta *sta);
  bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
  				     struct ieee80211_sta *sta);
+ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+ 				    enum ieee80211_band band);
  u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
  			   struct ieee80211_tx_info *info, u8 ac);
  int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
@@@ -1003,9 -1015,6 +1015,9 @@@ static inline bool iwl_mvm_vif_low_late
  	return mvmvif->low_latency;
  }
  
 +/* Assoc status */
 +bool iwl_mvm_is_idle(struct iwl_mvm *mvm);
 +
  /* Thermal management and CT-kill */
  void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
  void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
diff --combined drivers/net/wireless/iwlwifi/mvm/rs.c
index e1c8388,d44b2b3..857ddaf
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@@ -527,6 -527,9 +527,9 @@@ static void rs_rate_scale_clear_tbl_win
  	IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
  	for (i = 0; i < IWL_RATE_COUNT; i++)
  		rs_rate_scale_clear_window(&tbl->win[i]);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+ 		rs_rate_scale_clear_window(&tbl->tpc_win[i]);
  }
  
  static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
@@@ -656,17 -659,34 +659,34 @@@ static int _rs_collect_tx_data(struct i
  	return 0;
  }
  
- static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- 			      int scale_index, int attempts, int successes)
+ static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta,
+ 			      struct iwl_scale_tbl_info *tbl,
+ 			      int scale_index, int attempts, int successes,
+ 			      u8 reduced_txp)
  {
  	struct iwl_rate_scale_data *window = NULL;
+ 	int ret;
  
  	if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
  		return -EINVAL;
  
+ 	if (tbl->column != RS_COLUMN_INVALID) {
+ 		lq_sta->tx_stats[tbl->column][scale_index].total += attempts;
+ 		lq_sta->tx_stats[tbl->column][scale_index].success += successes;
+ 	}
+ 
  	/* Select window for current tx bit rate */
  	window = &(tbl->win[scale_index]);
  
+ 	ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes,
+ 				  window);
+ 	if (ret)
+ 		return ret;
+ 
+ 	if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+ 		return -EINVAL;
+ 
+ 	window = &tbl->tpc_win[reduced_txp];
  	return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
  				   window);
  }
@@@ -1000,6 -1020,7 +1020,7 @@@ static void rs_tx_status(void *mvm_r, s
  	u32 ucode_rate;
  	struct rs_rate rate;
  	struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ 	u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
  
  	/* Treat uninitialized rate scaling data same as non-existing. */
  	if (!lq_sta) {
@@@ -1010,7 -1031,7 +1031,7 @@@
  		return;
  	}
  
 -#ifdef CPTCFG_MAC80211_DEBUGFS
 +#ifdef CONFIG_MAC80211_DEBUGFS
  	/* Disable last tx check if we are debugging with fixed rate */
  	if (lq_sta->dbg_fixed_rate) {
  		IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
@@@ -1141,9 -1162,10 +1162,10 @@@
  	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
  		ucode_rate = le32_to_cpu(table->rs_table[0]);
  		rs_rate_from_ucode_rate(ucode_rate, info->band, &rate);
- 		rs_collect_tx_data(curr_tbl, rate.index,
+ 		rs_collect_tx_data(lq_sta, curr_tbl, rate.index,
  				   info->status.ampdu_len,
- 				   info->status.ampdu_ack_len);
+ 				   info->status.ampdu_ack_len,
+ 				   reduced_txp);
  
  		/* Update success/fail counts if not searching for new mode */
  		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@@ -1176,8 -1198,9 +1198,9 @@@
  			else
  				continue;
  
- 			rs_collect_tx_data(tmp_tbl, rate.index, 1,
- 					   i < retries ? 0 : legacy_success);
+ 			rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1,
+ 					   i < retries ? 0 : legacy_success,
+ 					   reduced_txp);
  		}
  
  		/* Update success/fail counts if not searching for new mode */
@@@ -1188,6 -1211,7 +1211,7 @@@
  	}
  	/* The last TX rate is cached in lq_sta; it's set in if/else above */
  	lq_sta->last_rate_n_flags = ucode_rate;
+ 	IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
  done:
  	/* See if there's a better rate or modulation mode to try. */
  	if (sta && sta->supp_rates[sband->band])
@@@ -1769,6 -1793,198 +1793,198 @@@ out
  	return action;
  }
  
+ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+ 				int *weaker, int *stronger)
+ {
+ 	*weaker = index + TPC_TX_POWER_STEP;
+ 	if (*weaker > TPC_MAX_REDUCTION)
+ 		*weaker = TPC_INVALID;
+ 
+ 	*stronger = index - TPC_TX_POWER_STEP;
+ 	if (*stronger < 0)
+ 		*stronger = TPC_INVALID;
+ }
+ 
+ static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct rs_rate *rate,
+ 			   enum ieee80211_band band)
+ {
+ 	int index = rate->index;
+ 
+ 	/*
+ 	 * allow tpc only if power management is enabled, or bt coex
+ 	 * activity grade allows it and we are on 2.4Ghz.
+ 	 */
+ 	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM &&
+ 	    !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+ 		return false;
+ 
+ 	IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+ 	if (is_legacy(rate))
+ 		return index == IWL_RATE_54M_INDEX;
+ 	if (is_ht(rate))
+ 		return index == IWL_RATE_MCS_7_INDEX;
+ 	if (is_vht(rate))
+ 		return index == IWL_RATE_MCS_7_INDEX ||
+ 		       index == IWL_RATE_MCS_8_INDEX ||
+ 		       index == IWL_RATE_MCS_9_INDEX;
+ 
+ 	WARN_ON_ONCE(1);
+ 	return false;
+ }
+ 
+ enum tpc_action {
+ 	TPC_ACTION_STAY,
+ 	TPC_ACTION_DECREASE,
+ 	TPC_ACTION_INCREASE,
+ 	TPC_ACTION_NO_RESTIRCTION,
+ };
+ 
+ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+ 					 s32 sr, int weak, int strong,
+ 					 int current_tpt,
+ 					 int weak_tpt, int strong_tpt)
+ {
+ 	/* stay until we have valid tpt */
+ 	if (current_tpt == IWL_INVALID_VALUE) {
+ 		IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+ 		return TPC_ACTION_STAY;
+ 	}
+ 
+ 	/* Too many failures, increase txp */
+ 	if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) {
+ 		IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+ 		return TPC_ACTION_NO_RESTIRCTION;
+ 	}
+ 
+ 	/* try decreasing first if applicable */
+ 	if (weak != TPC_INVALID) {
+ 		if (weak_tpt == IWL_INVALID_VALUE &&
+ 		    (strong_tpt == IWL_INVALID_VALUE ||
+ 		     current_tpt >= strong_tpt)) {
+ 			IWL_DEBUG_RATE(mvm,
+ 				       "no weak txp measurement. decrease txp\n");
+ 			return TPC_ACTION_DECREASE;
+ 		}
+ 
+ 		if (weak_tpt > current_tpt) {
+ 			IWL_DEBUG_RATE(mvm,
+ 				       "lower txp has better tpt. decrease txp\n");
+ 			return TPC_ACTION_DECREASE;
+ 		}
+ 	}
+ 
+ 	/* next, increase if needed */
+ 	if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) {
+ 		if (weak_tpt == IWL_INVALID_VALUE &&
+ 		    strong_tpt != IWL_INVALID_VALUE &&
+ 		    current_tpt < strong_tpt) {
+ 			IWL_DEBUG_RATE(mvm,
+ 				       "higher txp has better tpt. increase txp\n");
+ 			return TPC_ACTION_INCREASE;
+ 		}
+ 
+ 		if (weak_tpt < current_tpt &&
+ 		    (strong_tpt == IWL_INVALID_VALUE ||
+ 		     strong_tpt > current_tpt)) {
+ 			IWL_DEBUG_RATE(mvm,
+ 				       "lower txp has worse tpt. increase txp\n");
+ 			return TPC_ACTION_INCREASE;
+ 		}
+ 	}
+ 
+ 	IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+ 	return TPC_ACTION_STAY;
+ }
+ 
+ static bool rs_tpc_perform(struct iwl_mvm *mvm,
+ 			   struct ieee80211_sta *sta,
+ 			   struct iwl_lq_sta *lq_sta,
+ 			   struct iwl_scale_tbl_info *tbl)
+ {
+ 	struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
+ 	struct ieee80211_vif *vif = mvm_sta->vif;
+ 	struct ieee80211_chanctx_conf *chanctx_conf;
+ 	enum ieee80211_band band;
+ 	struct iwl_rate_scale_data *window;
+ 	struct rs_rate *rate = &tbl->rate;
+ 	enum tpc_action action;
+ 	s32 sr;
+ 	u8 cur = lq_sta->lq.reduced_tpc;
+ 	int current_tpt;
+ 	int weak, strong;
+ 	int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+ 
+ #ifdef CONFIG_MAC80211_DEBUGFS
+ 	if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+ 		IWL_DEBUG_RATE(mvm, "fixed tpc: %d",
+ 			       lq_sta->dbg_fixed_txp_reduction);
+ 		lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction;
+ 		return cur != lq_sta->dbg_fixed_txp_reduction;
+ 	}
+ #endif
+ 
+ 	rcu_read_lock();
+ 	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ 	if (WARN_ON(!chanctx_conf))
+ 		band = IEEE80211_NUM_BANDS;
+ 	else
+ 		band = chanctx_conf->def.chan->band;
+ 	rcu_read_unlock();
+ 
+ 	if (!rs_tpc_allowed(mvm, rate, band)) {
+ 		IWL_DEBUG_RATE(mvm,
+ 			       "tpc is not allowed. remove txp restrictions");
+ 		lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ 		return cur != TPC_NO_REDUCTION;
+ 	}
+ 
+ 	rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+ 
+ 	/* Collect measured throughputs for current and adjacent rates */
+ 	window = tbl->tpc_win;
+ 	sr = window[cur].success_ratio;
+ 	current_tpt = window[cur].average_tpt;
+ 	if (weak != TPC_INVALID)
+ 		weak_tpt = window[weak].average_tpt;
+ 	if (strong != TPC_INVALID)
+ 		strong_tpt = window[strong].average_tpt;
+ 
+ 	IWL_DEBUG_RATE(mvm,
+ 		       "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+ 		       cur, current_tpt, sr, weak, strong,
+ 		       weak_tpt, strong_tpt);
+ 
+ 	action = rs_get_tpc_action(mvm, sr, weak, strong,
+ 				   current_tpt, weak_tpt, strong_tpt);
+ 
+ 	/* override actions if we are on the edge */
+ 	if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+ 		IWL_DEBUG_RATE(mvm, "already in lowest txp, stay");
+ 		action = TPC_ACTION_STAY;
+ 	} else if (strong == TPC_INVALID &&
+ 		   (action == TPC_ACTION_INCREASE ||
+ 		    action == TPC_ACTION_NO_RESTIRCTION)) {
+ 		IWL_DEBUG_RATE(mvm, "already in highest txp, stay");
+ 		action = TPC_ACTION_STAY;
+ 	}
+ 
+ 	switch (action) {
+ 	case TPC_ACTION_DECREASE:
+ 		lq_sta->lq.reduced_tpc = weak;
+ 		return true;
+ 	case TPC_ACTION_INCREASE:
+ 		lq_sta->lq.reduced_tpc = strong;
+ 		return true;
+ 	case TPC_ACTION_NO_RESTIRCTION:
+ 		lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+ 		return true;
+ 	case TPC_ACTION_STAY:
+ 		/* do nothing */
+ 		break;
+ 	}
+ 	return false;
+ }
+ 
  /*
   * Do rate scaling and search for new modulation mode.
   */
@@@ -2019,6 -2235,8 +2235,8 @@@ static void rs_rate_scale_perform(struc
  		break;
  	case RS_ACTION_STAY:
  		/* No change */
+ 		update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+ 		break;
  	default:
  		break;
  	}
@@@ -2478,6 -2696,7 +2696,7 @@@ void iwl_mvm_rs_rate_init(struct iwl_mv
  	lq_sta->is_agg = 0;
  #ifdef CONFIG_MAC80211_DEBUGFS
  	lq_sta->dbg_fixed_rate = 0;
+ 	lq_sta->dbg_fixed_txp_reduction = TPC_INVALID;
  #endif
  #ifdef CONFIG_IWLWIFI_DEBUGFS
  	iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats);
@@@ -2653,6 -2872,7 +2872,7 @@@ static void rs_fill_lq_cmd(struct iwl_m
  		rs_build_rates_table_from_fixed(mvm, lq_cmd,
  						lq_sta->band,
  						lq_sta->dbg_fixed_rate);
+ 		lq_cmd->reduced_tpc = 0;
  		ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
  			RATE_MCS_ANT_POS;
  	} else
@@@ -2783,7 -3003,6 +3003,6 @@@ static ssize_t rs_sta_dbgfs_scale_table
  	size_t buf_size;
  	u32 parsed_rate;
  
- 
  	mvm = lq_sta->drv;
  	memset(buf, 0, sizeof(buf));
  	buf_size = min(count, sizeof(buf) -  1);
@@@ -2856,6 -3075,7 +3075,7 @@@ static ssize_t rs_sta_dbgfs_scale_table
  			lq_sta->lq.agg_disable_start_th,
  			lq_sta->lq.agg_frame_cnt_limit);
  
+ 	desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc);
  	desc += sprintf(buff+desc,
  			"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
  			lq_sta->lq.initial_rate_index[0],
@@@ -2928,6 -3148,94 +3148,94 @@@ static const struct file_operations rs_
  	.llseek = default_llseek,
  };
  
+ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+ 					      char __user *user_buf,
+ 					      size_t count, loff_t *ppos)
+ {
+ 	static const char * const column_name[] = {
+ 		[RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+ 		[RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+ 		[RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+ 		[RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+ 		[RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+ 		[RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+ 		[RS_COLUMN_MIMO2] = "MIMO2",
+ 		[RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+ 	};
+ 
+ 	static const char * const rate_name[] = {
+ 		[IWL_RATE_1M_INDEX] = "1M",
+ 		[IWL_RATE_2M_INDEX] = "2M",
+ 		[IWL_RATE_5M_INDEX] = "5.5M",
+ 		[IWL_RATE_11M_INDEX] = "11M",
+ 		[IWL_RATE_6M_INDEX] = "6M|MCS0",
+ 		[IWL_RATE_9M_INDEX] = "9M",
+ 		[IWL_RATE_12M_INDEX] = "12M|MCS1",
+ 		[IWL_RATE_18M_INDEX] = "18M|MCS2",
+ 		[IWL_RATE_24M_INDEX] = "24M|MCS3",
+ 		[IWL_RATE_36M_INDEX] = "36M|MCS4",
+ 		[IWL_RATE_48M_INDEX] = "48M|MCS5",
+ 		[IWL_RATE_54M_INDEX] = "54M|MCS6",
+ 		[IWL_RATE_MCS_7_INDEX] = "MCS7",
+ 		[IWL_RATE_MCS_8_INDEX] = "MCS8",
+ 		[IWL_RATE_MCS_9_INDEX] = "MCS9",
+ 	};
+ 
+ 	char *buff, *pos, *endpos;
+ 	int col, rate;
+ 	ssize_t ret;
+ 	struct iwl_lq_sta *lq_sta = file->private_data;
+ 	struct rs_rate_stats *stats;
+ 	static const size_t bufsz = 1024;
+ 
+ 	buff = kmalloc(bufsz, GFP_KERNEL);
+ 	if (!buff)
+ 		return -ENOMEM;
+ 
+ 	pos = buff;
+ 	endpos = pos + bufsz;
+ 
+ 	pos += scnprintf(pos, endpos - pos, "COLUMN,");
+ 	for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+ 		pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+ 	pos += scnprintf(pos, endpos - pos, "\n");
+ 
+ 	for (col = 0; col < RS_COLUMN_COUNT; col++) {
+ 		pos += scnprintf(pos, endpos - pos,
+ 				 "%s,", column_name[col]);
+ 
+ 		for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+ 			stats = &(lq_sta->tx_stats[col][rate]);
+ 			pos += scnprintf(pos, endpos - pos,
+ 					 "%llu/%llu,",
+ 					 stats->success,
+ 					 stats->total);
+ 		}
+ 		pos += scnprintf(pos, endpos - pos, "\n");
+ 	}
+ 
+ 	ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+ 	kfree(buff);
+ 	return ret;
+ }
+ 
+ static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+ 					       const char __user *user_buf,
+ 					       size_t count, loff_t *ppos)
+ {
+ 	struct iwl_lq_sta *lq_sta = file->private_data;
+ 	memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats));
+ 
+ 	return count;
+ }
+ 
+ static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+ 	.read = rs_sta_dbgfs_drv_tx_stats_read,
+ 	.write = rs_sta_dbgfs_drv_tx_stats_write,
+ 	.open = simple_open,
+ 	.llseek = default_llseek,
+ };
+ 
  static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
  {
  	struct iwl_lq_sta *lq_sta = mvm_sta;
@@@ -2937,9 -3245,15 +3245,15 @@@
  	lq_sta->rs_sta_dbgfs_stats_table_file =
  		debugfs_create_file("rate_stats_table", S_IRUSR, dir,
  				    lq_sta, &rs_sta_dbgfs_stats_table_ops);
+ 	lq_sta->rs_sta_dbgfs_drv_tx_stats_file =
+ 		debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir,
+ 				    lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
  	lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
  		debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
  				  &lq_sta->tx_agg_tid_en);
+ 	lq_sta->rs_sta_dbgfs_reduced_txp_file =
+ 		debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir,
+ 				  &lq_sta->dbg_fixed_txp_reduction);
  }
  
  static void rs_remove_debugfs(void *mvm, void *mvm_sta)
@@@ -2947,7 -3261,9 +3261,9 @@@
  	struct iwl_lq_sta *lq_sta = mvm_sta;
  	debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
  	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ 	debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file);
  	debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+ 	debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file);
  }
  #endif
  
diff --combined drivers/net/wireless/iwlwifi/mvm/scan.c
index c28de54,63e7b16..36ae01a
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@@ -277,22 -277,51 +277,22 @@@ static void iwl_mvm_scan_calc_params(st
  					    IEEE80211_IFACE_ITER_NORMAL,
  					    iwl_mvm_scan_condition_iterator,
  					    &global_bound);
 -	/*
 -	 * Under low latency traffic passive scan is fragmented meaning
 -	 * that dwell on a particular channel will be fragmented. Each fragment
 -	 * dwell time is 20ms and fragments period is 105ms. Skipping to next
 -	 * channel will be delayed by the same period - 105ms. So suspend_time
 -	 * parameter describing both fragments and channels skipping periods is
 -	 * set to 105ms. This value is chosen so that overall passive scan
 -	 * duration will not be too long. Max_out_time in this case is set to
 -	 * 70ms, so for active scanning operating channel will be left for 70ms
 -	 * while for passive still for 20ms (fragment dwell).
 -	 */
 -	if (global_bound) {
 -		if (!iwl_mvm_low_latency(mvm)) {
 -			params->suspend_time = ieee80211_tu_to_usec(100);
 -			params->max_out_time = ieee80211_tu_to_usec(600);
 -		} else {
 -			params->suspend_time = ieee80211_tu_to_usec(105);
 -			/* P2P doesn't support fragmented passive scan, so
 -			 * configure max_out_time to be at least longest dwell
 -			 * time for passive scan.
 -			 */
 -			if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
 -				params->max_out_time = ieee80211_tu_to_usec(70);
 -				params->passive_fragmented = true;
 -			} else {
 -				u32 passive_dwell;
  
 -				/*
 -				 * Use band G so that passive channel dwell time
 -				 * will be assigned with maximum value.
 -				 */
 -				band = IEEE80211_BAND_2GHZ;
 -				passive_dwell = iwl_mvm_get_passive_dwell(band);
 -				params->max_out_time =
 -					ieee80211_tu_to_usec(passive_dwell);
 -			}
 -		}
 +	if (!global_bound)
 +		goto not_bound;
 +
 +	params->suspend_time = 100;
 +	params->max_out_time = 600;
 +
 +	if (iwl_mvm_low_latency(mvm)) {
 +		params->suspend_time = 250;
 +		params->max_out_time = 250;
  	}
  
 +not_bound:
 +
  	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
 -		if (params->passive_fragmented)
 -			params->dwell[band].passive = 20;
 -		else
 -			params->dwell[band].passive =
 -				iwl_mvm_get_passive_dwell(band);
 +		params->dwell[band].passive = iwl_mvm_get_passive_dwell(band);
  		params->dwell[band].active = iwl_mvm_get_active_dwell(band,
  								      n_ssids);
  	}
@@@ -319,7 -348,10 +319,10 @@@ int iwl_mvm_scan_request(struct iwl_mv
  	struct iwl_mvm_scan_params params = {};
  
  	lockdep_assert_held(&mvm->mutex);
- 	BUG_ON(mvm->scan_cmd == NULL);
+ 
+ 	/* we should have failed registration if scan_cmd was NULL */
+ 	if (WARN_ON(mvm->scan_cmd == NULL))
+ 		return -ENOMEM;
  
  	IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n");
  	mvm->scan_status = IWL_MVM_SCAN_OS;
@@@ -538,9 -570,13 +541,13 @@@ int iwl_mvm_rx_scan_offload_complete_no
  	/* scan status must be locked for proper checking */
  	lockdep_assert_held(&mvm->mutex);
  
- 	IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+ 	IWL_DEBUG_SCAN(mvm,
+ 		       "Scheduled scan completed, status %s EBS status %s:%d\n",
  		       scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- 		       "completed" : "aborted");
+ 		       "completed" : "aborted", scan_notif->ebs_status ==
+ 		       IWL_SCAN_EBS_SUCCESS ? "success" : "failed",
+ 		       scan_notif->ebs_status);
+ 
  
  	/* only call mac80211 completion if the stop was initiated by FW */
  	if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
@@@ -548,6 -584,8 +555,8 @@@
  		ieee80211_sched_scan_stopped(mvm->hw);
  	}
  
+ 	mvm->last_ebs_successful = !scan_notif->ebs_status;
+ 
  	return 0;
  }
  
@@@ -732,7 -770,7 +741,7 @@@ int iwl_mvm_config_sched_scan(struct iw
  	int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
  	int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
  	int head = 0;
 -	int tail = band_2ghz + band_5ghz;
 +	int tail = band_2ghz + band_5ghz - 1;
  	u32 ssid_bitmap;
  	int cmd_len;
  	int ret;
@@@ -884,6 -922,11 +893,11 @@@ int iwl_mvm_sched_scan_start(struct iwl
  		scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL);
  	}
  
+ 	if (mvm->last_ebs_successful &&
+ 	    mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT)
+ 		scan_req.flags |=
+ 			cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE);
+ 
  	return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
  				    sizeof(scan_req), &scan_req);
  }
diff --combined drivers/net/wireless/iwlwifi/mvm/utils.c
index 2180902,c5f4532..eb2ca64
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@@ -64,6 -64,7 +64,7 @@@
  
  #include "iwl-debug.h"
  #include "iwl-io.h"
+ #include "iwl-prph.h"
  
  #include "mvm.h"
  #include "fw-api-rs.h"
@@@ -469,6 -470,8 +470,8 @@@ void iwl_mvm_dump_nic_error_log(struct 
  			mvm->status, table.valid);
  	}
  
+ 	/* Do not change this output - scripts rely on it */
+ 
  	IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
  
  	trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
@@@ -522,7 -525,7 +525,7 @@@ void iwl_mvm_fw_error_sram_dump(struct 
  	u32 ofs, sram_len;
  	void *sram;
  
- 	if (!mvm->ucode_loaded || mvm->fw_error_sram)
+ 	if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump)
  		return;
  
  	img = &mvm->fw->img[mvm->cur_ucode];
@@@ -538,6 -541,47 +541,47 @@@
  	mvm->fw_error_sram_len = sram_len;
  }
  
+ void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm)
+ {
+ 	int i, reg_val;
+ 	unsigned long flags;
+ 
+ 	if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump)
+ 		return;
+ 
+ 	/* reading buffer size */
+ 	reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR);
+ 	mvm->fw_error_rxf_len =
+ 		(reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS;
+ 
+ 	/* the register holds the value divided by 128 */
+ 	mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7;
+ 
+ 	if (!mvm->fw_error_rxf_len)
+ 		return;
+ 
+ 	mvm->fw_error_rxf =  kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC);
+ 	if (!mvm->fw_error_rxf) {
+ 		mvm->fw_error_rxf_len = 0;
+ 		return;
+ 	}
+ 
+ 	if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) {
+ 		kfree(mvm->fw_error_rxf);
+ 		mvm->fw_error_rxf = NULL;
+ 		mvm->fw_error_rxf_len = 0;
+ 		return;
+ 	}
+ 
+ 	for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) {
+ 		iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR,
+ 				     i * sizeof(u32));
+ 		mvm->fw_error_rxf[i] =
+ 			iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR);
+ 	}
+ 	iwl_trans_release_nic_access(mvm->trans, &flags);
+ }
+ 
  /**
   * iwl_mvm_send_lq_cmd() - Send link quality command
   * @init: This command is sent as part of station initialization right
@@@ -644,22 -688,3 +688,22 @@@ bool iwl_mvm_low_latency(struct iwl_mv
  
  	return result;
  }
 +
 +static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
 +{
 +	bool *idle = _data;
 +
 +	if (!vif->bss_conf.idle)
 +		*idle = false;
 +}
 +
 +bool iwl_mvm_is_idle(struct iwl_mvm *mvm)
 +{
 +	bool idle = true;
 +
 +	ieee80211_iterate_active_interfaces_atomic(
 +			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 +			iwl_mvm_idle_iter, &idle);
 +
 +	return idle;
 +}
diff --combined drivers/net/wireless/iwlwifi/pcie/trans.c
index 2365553,f98ef1e..c76b148
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@@ -103,7 -103,6 +103,6 @@@ static void iwl_pcie_set_pwr(struct iwl
  
  /* PCI registers */
  #define PCI_CFG_RETRY_TIMEOUT	0x041
- #define CPU1_CPU2_SEPARATOR_SECTION	0xFFFFCCCC
  
  static void iwl_pcie_apm_config(struct iwl_trans *trans)
  {
@@@ -1053,6 -1052,12 +1052,12 @@@ static void iwl_trans_pcie_write_prph(s
  	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
  }
  
+ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+ {
+ 	WARN_ON(1);
+ 	return 0;
+ }
+ 
  static void iwl_trans_pcie_configure(struct iwl_trans *trans,
  				     const struct iwl_trans_config *trans_cfg)
  {
@@@ -1079,6 -1084,18 +1084,18 @@@
  
  	trans_pcie->command_names = trans_cfg->command_names;
  	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+ 
+ 	/* Initialize NAPI here - it should be before registering to mac80211
+ 	 * in the opmode but after the HW struct is allocated.
+ 	 * As this function may be called again in some corner cases don't
+ 	 * do anything if NAPI was already initialized.
+ 	 */
+ 	if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+ 		init_dummy_netdev(&trans_pcie->napi_dev);
+ 		iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
+ 				     &trans_pcie->napi_dev,
+ 				     iwl_pcie_dummy_napi_poll, 64);
+ 	}
  }
  
  void iwl_trans_pcie_free(struct iwl_trans *trans)
@@@ -1099,6 -1116,9 +1116,9 @@@
  	pci_disable_device(trans_pcie->pci_dev);
  	kmem_cache_destroy(trans->dev_cmd_pool);
  
+ 	if (trans_pcie->napi.poll)
+ 		netif_napi_del(&trans_pcie->napi);
+ 
  	kfree(trans);
  }
  
@@@ -1237,7 -1257,7 +1257,7 @@@ static int iwl_trans_pcie_write_mem(str
  
  #define IWL_FLUSH_WAIT_MS	2000
  
- static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
+ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
  {
  	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  	struct iwl_txq *txq;
@@@ -1250,13 -1270,31 +1270,31 @@@
  
  	/* waiting for all the tx frames complete might take a while */
  	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+ 		u8 wr_ptr;
+ 
  		if (cnt == trans_pcie->cmd_queue)
  			continue;
+ 		if (!test_bit(cnt, trans_pcie->queue_used))
+ 			continue;
+ 		if (!(BIT(cnt) & txq_bm))
+ 			continue;
+ 
+ 		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
  		txq = &trans_pcie->txq[cnt];
  		q = &txq->q;
- 		while (q->read_ptr != q->write_ptr && !time_after(jiffies,
- 		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
+ 		wr_ptr = ACCESS_ONCE(q->write_ptr);
+ 
+ 		while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+ 		       !time_after(jiffies,
+ 				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+ 			u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+ 
+ 			if (WARN_ONCE(wr_ptr != write_ptr,
+ 				      "WR pointer moved while flushing %d -> %d\n",
+ 				      wr_ptr, write_ptr))
+ 				return -ETIMEDOUT;
  			msleep(1);
+ 		}
  
  		if (q->read_ptr != q->write_ptr) {
  			IWL_ERR(trans,
@@@ -1264,6 -1302,7 +1302,7 @@@
  			ret = -ETIMEDOUT;
  			break;
  		}
+ 		IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
  	}
  
  	if (!ret)
@@@ -1749,10 -1788,6 +1788,10 @@@ struct iwl_trans *iwl_trans_pcie_alloc(
  	 * PCI Tx retries from interfering with C3 CPU state */
  	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
  
 +	trans->dev = &pdev->dev;
 +	trans_pcie->pci_dev = pdev;
 +	iwl_disable_interrupts(trans);
 +
  	err = pci_enable_msi(pdev);
  	if (err) {
  		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
@@@ -1764,6 -1799,8 +1803,6 @@@
  		}
  	}
  
 -	trans->dev = &pdev->dev;
 -	trans_pcie->pci_dev = pdev;
  	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
  	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
  	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
@@@ -1789,6 -1826,8 +1828,6 @@@
  		goto out_pci_disable_msi;
  	}
  
 -	trans_pcie->inta_mask = CSR_INI_SET_MASK;
 -
  	if (iwl_pcie_alloc_ict(trans))
  		goto out_free_cmd_pool;
  
@@@ -1800,8 -1839,6 +1839,8 @@@
  		goto out_free_ict;
  	}
  
 +	trans_pcie->inta_mask = CSR_INI_SET_MASK;
 +
  	return trans;
  
  out_free_ict:
diff --combined drivers/net/xen-netback/interface.c
index 20e9def,a755733..53cdcdf
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@@ -75,8 -75,32 +75,8 @@@ static int xenvif_poll(struct napi_stru
  	work_done = xenvif_tx_action(vif, budget);
  
  	if (work_done < budget) {
 -		int more_to_do = 0;
 -		unsigned long flags;
 -
 -		/* It is necessary to disable IRQ before calling
 -		 * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
 -		 * lose event from the frontend.
 -		 *
 -		 * Consider:
 -		 *   RING_HAS_UNCONSUMED_REQUESTS
 -		 *   <frontend generates event to trigger napi_schedule>
 -		 *   __napi_complete
 -		 *
 -		 * This handler is still in scheduled state so the
 -		 * event has no effect at all. After __napi_complete
 -		 * this handler is descheduled and cannot get
 -		 * scheduled again. We lose event in this case and the ring
 -		 * will be completely stalled.
 -		 */
 -
 -		local_irq_save(flags);
 -
 -		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
 -		if (!more_to_do)
 -			__napi_complete(napi);
 -
 -		local_irq_restore(flags);
 +		napi_complete(napi);
 +		xenvif_napi_schedule_or_enable_events(vif);
  	}
  
  	return work_done;
@@@ -170,7 -194,7 +170,7 @@@ static void xenvif_up(struct xenvif *vi
  	enable_irq(vif->tx_irq);
  	if (vif->tx_irq != vif->rx_irq)
  		enable_irq(vif->rx_irq);
 -	xenvif_check_rx_xenvif(vif);
 +	xenvif_napi_schedule_or_enable_events(vif);
  }
  
  static void xenvif_down(struct xenvif *vif)
@@@ -362,7 -386,7 +362,7 @@@ struct xenvif *xenvif_alloc(struct devi
  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  		NETIF_F_TSO | NETIF_F_TSO6;
  	dev->features = dev->hw_features | NETIF_F_RXCSUM;
- 	SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
+ 	dev->ethtool_ops = &xenvif_ethtool_ops;
  
  	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
  
diff --combined include/linux/if_vlan.h
index 724bde8,8c0fb7f..f63d730
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@@ -73,7 -73,7 +73,7 @@@ static inline struct vlan_ethhdr *vlan_
  /* found in socket.c */
  extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
  
 -static inline int is_vlan_dev(struct net_device *dev)
 +static inline bool is_vlan_dev(struct net_device *dev)
  {
          return dev->priv_flags & IFF_802_1Q_VLAN;
  }
@@@ -106,7 -106,7 +106,7 @@@ struct vlan_pcpu_stats 
  
  #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  
- extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+ extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
  					       __be16 vlan_proto, u16 vlan_id);
  extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
  extern u16 vlan_dev_vlan_id(const struct net_device *dev);
@@@ -159,7 -159,6 +159,7 @@@ struct vlan_dev_priv 
  #ifdef CONFIG_NET_POLL_CONTROLLER
  	struct netpoll				*netpoll;
  #endif
 +	unsigned int				nest_level;
  };
  
  static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@@ -200,7 -199,7 +200,7 @@@ extern void vlan_vids_del_by_dev(struc
  extern bool vlan_uses_dev(const struct net_device *dev);
  #else
  static inline struct net_device *
- __vlan_find_dev_deep(struct net_device *real_dev,
+ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
  		     __be16 vlan_proto, u16 vlan_id)
  {
  	return NULL;
diff --combined include/linux/netdevice.h
index 9d4b1f1,2dea98c..845dc1e
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@@ -56,9 -56,6 +56,6 @@@ struct device
  struct phy_device;
  /* 802.11 specific */
  struct wireless_dev;
- 					/* source back-compat hooks */
- #define SET_ETHTOOL_OPS(netdev,ops) \
- 	( (netdev)->ethtool_ops = (ops) )
  
  void netdev_set_default_ethtool_ops(struct net_device *dev,
  				    const struct ethtool_ops *ops);
@@@ -1144,7 -1141,6 +1141,7 @@@ struct net_device_ops 
  	netdev_tx_t		(*ndo_dfwd_start_xmit) (struct sk_buff *skb,
  							struct net_device *dev,
  							void *priv);
 +	int			(*ndo_get_lock_subclass)(struct net_device *dev);
  };
  
  /**
@@@ -2634,6 -2630,7 +2631,7 @@@ int dev_get_phys_port_id(struct net_dev
  			 struct netdev_phys_port_id *ppid);
  int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
  			struct netdev_queue *txq);
+ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
  bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
  
@@@ -2951,12 -2948,7 +2949,12 @@@ static inline void netif_addr_lock(stru
  
  static inline void netif_addr_lock_nested(struct net_device *dev)
  {
 -	spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
 +	int subclass = SINGLE_DEPTH_NESTING;
 +
 +	if (dev->netdev_ops->ndo_get_lock_subclass)
 +		subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
 +
 +	spin_lock_nested(&dev->addr_list_lock, subclass);
  }
  
  static inline void netif_addr_lock_bh(struct net_device *dev)
@@@ -3083,14 -3075,6 +3081,14 @@@ void *netdev_lower_get_next_private_rcu
  	     priv; \
  	     priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
  
 +void *netdev_lower_get_next(struct net_device *dev,
 +				struct list_head **iter);
 +#define netdev_for_each_lower_dev(dev, ldev, iter) \
 +	for (iter = &(dev)->adj_list.lower, \
 +	     ldev = netdev_lower_get_next(dev, &(iter)); \
 +	     ldev; \
 +	     ldev = netdev_lower_get_next(dev, &(iter)))
 +
  void *netdev_adjacent_get_private(struct list_head *adj_list);
  void *netdev_lower_get_first_private_rcu(struct net_device *dev);
  struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
@@@ -3106,8 -3090,6 +3104,8 @@@ void netdev_upper_dev_unlink(struct net
  void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
  void *netdev_lower_dev_get_private(struct net_device *dev,
  				   struct net_device *lower_dev);
 +int dev_get_nest_level(struct net_device *dev,
 +		       bool (*type_check)(struct net_device *dev));
  int skb_checksum_help(struct sk_buff *skb);
  struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
  				  netdev_features_t features, bool tx_path);
diff --combined include/net/ip6_route.h
index 216cecc,38e41e4..1d09b46
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@@ -127,7 -127,6 +127,7 @@@ int rt6_dump_route(struct rt6_info *rt
  void rt6_ifdown(struct net *net, struct net_device *dev);
  void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
  void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 +void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
  
  
  /*
@@@ -186,7 -185,7 +186,7 @@@ static inline bool ip6_sk_accept_pmtu(c
  	       inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
  }
  
- static inline bool ip6_sk_local_df(const struct sock *sk)
+ static inline bool ip6_sk_ignore_df(const struct sock *sk)
  {
  	return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
  	       inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
diff --combined include/uapi/linux/audit.h
index 1b1efdd,dfa4c86..b21ea45
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@@ -331,17 -331,9 +331,17 @@@ enum 
  #define AUDIT_FAIL_PRINTK	1
  #define AUDIT_FAIL_PANIC	2
  
 +/*
 + * These bits disambiguate different calling conventions that share an
 + * ELF machine type, bitness, and endianness
 + */
 +#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000
 +#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000
 +
  /* distinguish syscall tables */
  #define __AUDIT_ARCH_64BIT 0x80000000
  #define __AUDIT_ARCH_LE	   0x40000000
 +
  #define AUDIT_ARCH_ALPHA	(EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_ARM		(EM_ARM|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_ARMEB	(EM_ARM)
@@@ -354,11 -346,7 +354,11 @@@
  #define AUDIT_ARCH_MIPS		(EM_MIPS)
  #define AUDIT_ARCH_MIPSEL	(EM_MIPS|__AUDIT_ARCH_LE)
  #define AUDIT_ARCH_MIPS64	(EM_MIPS|__AUDIT_ARCH_64BIT)
 +#define AUDIT_ARCH_MIPS64N32	(EM_MIPS|__AUDIT_ARCH_64BIT|\
 +				 __AUDIT_ARCH_CONVENTION_MIPS64_N32)
  #define AUDIT_ARCH_MIPSEL64	(EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 +#define AUDIT_ARCH_MIPSEL64N32	(EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
 +				 __AUDIT_ARCH_CONVENTION_MIPS64_N32)
  #define AUDIT_ARCH_OPENRISC	(EM_OPENRISC)
  #define AUDIT_ARCH_PARISC	(EM_PARISC)
  #define AUDIT_ARCH_PARISC64	(EM_PARISC|__AUDIT_ARCH_64BIT)
@@@ -385,6 -373,14 +385,14 @@@
   */
  #define AUDIT_MESSAGE_TEXT_MAX	8560
  
+ /* Multicast Netlink socket groups (default up to 32) */
+ enum audit_nlgrps {
+ 	AUDIT_NLGRP_NONE,	/* Group 0 not used */
+ 	AUDIT_NLGRP_READLOG,	/* "best effort" read only socket */
+ 	__AUDIT_NLGRP_MAX
+ };
+ #define AUDIT_NLGRP_MAX                (__AUDIT_NLGRP_MAX - 1)
+ 
  struct audit_status {
  	__u32		mask;		/* Bit mask for valid entries */
  	__u32		enabled;	/* 1 = enabled, 0 = disabled */
diff --combined include/uapi/linux/nl80211.h
index 194c1ea,406010d..9922b9b
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@@ -1579,6 -1579,10 +1579,10 @@@ enum nl80211_commands 
   * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32.
   *	As specified in the &enum nl80211_tdls_peer_capability.
   *
+  * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface
+  *	creation then the new interface will be owned by the netlink socket
+  *	that created it and will be destroyed when the socket is closed
+  *
   * @NL80211_ATTR_MAX: highest attribute number currently defined
   * @__NL80211_ATTR_AFTER_LAST: internal use
   */
@@@ -1914,6 -1918,8 +1918,8 @@@ enum nl80211_attrs 
  
  	NL80211_ATTR_TDLS_PEER_CAPABILITY,
  
+ 	NL80211_ATTR_IFACE_SOCKET_OWNER,
+ 
  	/* add attributes here, update the policy in nl80211.c */
  
  	__NL80211_ATTR_AFTER_LAST,
@@@ -2336,9 -2342,34 +2342,34 @@@ enum nl80211_band_attr 
   *	using this channel as the primary or any of the secondary channels
   *	isn't possible
   * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds.
+  * @NL80211_FREQUENCY_ATTR_INDOOR_ONLY: Only indoor use is permitted on this
+  *	channel. A channel that has the INDOOR_ONLY attribute can only be
+  *	used when there is a clear assessment that the device is operating in
+  *	an indoor surroundings, i.e., it is connected to AC power (and not
+  *	through portable DC inverters) or is under the control of a master
+  *	that is acting as an AP and is connected to AC power.
+  * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this
+  *	channel if it's connected concurrently to a BSS on the same channel on
+  *	the 2 GHz band or to a channel in the same UNII band (on the 5 GHz
+  *	band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a
+  *	channel that has the GO_CONCURRENT attribute set can be done when there
+  *	is a clear assessment that the device is operating under the guidance of
+  *	an authorized master, i.e., setting up a GO while the device is also
+  *	connected to an AP with DFS and radar detection on the UNII band (it is
+  *	up to user-space, i.e., wpa_supplicant to perform the required
+  *	verifications)
+  * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed
+  *	on this channel in current regulatory domain.
+  * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed
+  *	on this channel in current regulatory domain.
   * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
   *	currently defined
   * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
+  *
+  * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=528122
+  * for more information on the FCC description of the relaxations allowed
+  * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and
+  * NL80211_FREQUENCY_ATTR_GO_CONCURRENT.
   */
  enum nl80211_frequency_attr {
  	__NL80211_FREQUENCY_ATTR_INVALID,
@@@ -2355,6 -2386,10 +2386,10 @@@
  	NL80211_FREQUENCY_ATTR_NO_80MHZ,
  	NL80211_FREQUENCY_ATTR_NO_160MHZ,
  	NL80211_FREQUENCY_ATTR_DFS_CAC_TIME,
+ 	NL80211_FREQUENCY_ATTR_INDOOR_ONLY,
+ 	NL80211_FREQUENCY_ATTR_GO_CONCURRENT,
+ 	NL80211_FREQUENCY_ATTR_NO_20MHZ,
+ 	NL80211_FREQUENCY_ATTR_NO_10MHZ,
  
  	/* keep last */
  	__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@@ -2573,10 -2608,13 +2608,13 @@@ enum nl80211_dfs_regions 
   *	present has been registered with the wireless core that
   *	has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a
   *	supported feature.
+  * @NL80211_USER_REG_HINT_INDOOR: a user sent an hint indicating that the
+  *	platform is operating in an indoor environment.
   */
  enum nl80211_user_reg_hint_type {
  	NL80211_USER_REG_HINT_USER	= 0,
  	NL80211_USER_REG_HINT_CELL_BASE = 1,
+ 	NL80211_USER_REG_HINT_INDOOR    = 2,
  };
  
  /**
@@@ -3856,8 -3894,6 +3894,8 @@@ enum nl80211_ap_sme_features 
   * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
   *	to work properly to suppport receiving regulatory hints from
   *	cellular base stations.
 + * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only
 + *	here to reserve the value for API/ABI compatibility)
   * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of
   *	equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station
   *	mode
@@@ -3893,13 -3929,16 +3931,16 @@@
   *	interface. An active monitor interface behaves like a normal monitor
   *	interface, but gets added to the driver. It ensures that incoming
   *	unicast packets directed at the configured interface address get ACKed.
+  * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic
+  *	channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the
+  *	lifetime of a BSS.
   */
  enum nl80211_feature_flags {
  	NL80211_FEATURE_SK_TX_STATUS			= 1 << 0,
  	NL80211_FEATURE_HT_IBSS				= 1 << 1,
  	NL80211_FEATURE_INACTIVITY_TIMER		= 1 << 2,
  	NL80211_FEATURE_CELL_BASE_REG_HINTS		= 1 << 3,
 -	/* bit 4 is reserved - don't use */
 +	NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL	= 1 << 4,
  	NL80211_FEATURE_SAE				= 1 << 5,
  	NL80211_FEATURE_LOW_PRIORITY_SCAN		= 1 << 6,
  	NL80211_FEATURE_SCAN_FLUSH			= 1 << 7,
@@@ -3913,6 -3952,7 +3954,7 @@@
  	NL80211_FEATURE_FULL_AP_CLIENT_STATE		= 1 << 15,
  	NL80211_FEATURE_USERSPACE_MPM			= 1 << 16,
  	NL80211_FEATURE_ACTIVE_MONITOR			= 1 << 17,
+ 	NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE	= 1 << 18,
  };
  
  /**
diff --combined lib/Kconfig.debug
index e548aa0,d1b7bdf..6da2c25
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@@ -180,7 -180,7 +180,7 @@@ config STRIP_ASM_SYM
  
  config READABLE_ASM
          bool "Generate readable assembler code"
 -        depends on DEBUG_KERNEL
 +        depends on DEBUG_KERNEL && !LTO
          help
            Disable some compiler optimizations that tend to generate human unreadable
            assembler output. This may make the kernel slightly slower, but it helps
@@@ -1620,6 -1620,19 +1620,19 @@@ config TEST_USER_COP
  
  	  If unsure, say N.
  
+ config TEST_BPF
+ 	tristate "Test BPF filter functionality"
+ 	default n
+ 	depends on m && NET
+ 	help
+ 	  This builds the "test_bpf" module that runs various test vectors
+ 	  against the BPF interpreter or BPF JIT compiler depending on the
+ 	  current setting. This is in particular useful for BPF JIT compiler
+ 	  development, but also to run regression tests against changes in
+ 	  the interpreter code.
+ 
+ 	  If unsure, say N.
+ 
  source "samples/Kconfig"
  
  source "lib/Kconfig.kgdb"
diff --combined net/8021q/vlan_dev.c
index 019efb7,8f025af..4aef04e
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@@ -493,10 -493,48 +493,10 @@@ static void vlan_dev_change_rx_flags(st
  	}
  }
  
 -static int vlan_calculate_locking_subclass(struct net_device *real_dev)
 -{
 -	int subclass = 0;
 -
 -	while (is_vlan_dev(real_dev)) {
 -		subclass++;
 -		real_dev = vlan_dev_priv(real_dev)->real_dev;
 -	}
 -
 -	return subclass;
 -}
 -
 -static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
 -{
 -	int err = 0, subclass;
 -
 -	subclass = vlan_calculate_locking_subclass(to);
 -
 -	spin_lock_nested(&to->addr_list_lock, subclass);
 -	err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
 -	if (!err)
 -		__dev_set_rx_mode(to);
 -	spin_unlock(&to->addr_list_lock);
 -}
 -
 -static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
 -{
 -	int err = 0, subclass;
 -
 -	subclass = vlan_calculate_locking_subclass(to);
 -
 -	spin_lock_nested(&to->addr_list_lock, subclass);
 -	err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
 -	if (!err)
 -		__dev_set_rx_mode(to);
 -	spin_unlock(&to->addr_list_lock);
 -}
 -
  static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
  {
 -	vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 -	vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 +	dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 +	dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
  }
  
  /*
@@@ -524,11 -562,6 +524,11 @@@ static void vlan_dev_set_lockdep_class(
  	netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
  }
  
 +static int vlan_dev_get_lock_subclass(struct net_device *dev)
 +{
 +	return vlan_dev_priv(dev)->nest_level;
 +}
 +
  static const struct header_ops vlan_header_ops = {
  	.create	 = vlan_dev_hard_header,
  	.rebuild = vlan_dev_rebuild_header,
@@@ -564,6 -597,7 +564,6 @@@ static const struct net_device_ops vlan
  static int vlan_dev_init(struct net_device *dev)
  {
  	struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
 -	int subclass = 0;
  
  	netif_carrier_off(dev);
  
@@@ -612,7 -646,8 +612,7 @@@
  
  	SET_NETDEV_DEVTYPE(dev, &vlan_type);
  
 -	subclass = vlan_calculate_locking_subclass(dev);
 -	vlan_dev_set_lockdep_class(dev, subclass);
 +	vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
  
  	vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
  	if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
@@@ -671,38 -706,36 +671,36 @@@ static void vlan_ethtool_get_drvinfo(st
  
  static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  {
+ 	struct vlan_pcpu_stats *p;
+ 	u32 rx_errors = 0, tx_dropped = 0;
+ 	int i;
  
- 	if (vlan_dev_priv(dev)->vlan_pcpu_stats) {
- 		struct vlan_pcpu_stats *p;
- 		u32 rx_errors = 0, tx_dropped = 0;
- 		int i;
- 
- 		for_each_possible_cpu(i) {
- 			u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
- 			unsigned int start;
- 
- 			p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
- 			do {
- 				start = u64_stats_fetch_begin_irq(&p->syncp);
- 				rxpackets	= p->rx_packets;
- 				rxbytes		= p->rx_bytes;
- 				rxmulticast	= p->rx_multicast;
- 				txpackets	= p->tx_packets;
- 				txbytes		= p->tx_bytes;
- 			} while (u64_stats_fetch_retry_irq(&p->syncp, start));
- 
- 			stats->rx_packets	+= rxpackets;
- 			stats->rx_bytes		+= rxbytes;
- 			stats->multicast	+= rxmulticast;
- 			stats->tx_packets	+= txpackets;
- 			stats->tx_bytes		+= txbytes;
- 			/* rx_errors & tx_dropped are u32 */
- 			rx_errors	+= p->rx_errors;
- 			tx_dropped	+= p->tx_dropped;
- 		}
- 		stats->rx_errors  = rx_errors;
- 		stats->tx_dropped = tx_dropped;
+ 	for_each_possible_cpu(i) {
+ 		u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
+ 		unsigned int start;
+ 
+ 		p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+ 		do {
+ 			start = u64_stats_fetch_begin_irq(&p->syncp);
+ 			rxpackets	= p->rx_packets;
+ 			rxbytes		= p->rx_bytes;
+ 			rxmulticast	= p->rx_multicast;
+ 			txpackets	= p->tx_packets;
+ 			txbytes		= p->tx_bytes;
+ 		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ 
+ 		stats->rx_packets	+= rxpackets;
+ 		stats->rx_bytes		+= rxbytes;
+ 		stats->multicast	+= rxmulticast;
+ 		stats->tx_packets	+= txpackets;
+ 		stats->tx_bytes		+= txbytes;
+ 		/* rx_errors & tx_dropped are u32 */
+ 		rx_errors	+= p->rx_errors;
+ 		tx_dropped	+= p->tx_dropped;
  	}
+ 	stats->rx_errors  = rx_errors;
+ 	stats->tx_dropped = tx_dropped;
+ 
  	return stats;
  }
  
@@@ -784,7 -817,6 +782,7 @@@ static const struct net_device_ops vlan
  	.ndo_netpoll_cleanup	= vlan_dev_netpoll_cleanup,
  #endif
  	.ndo_fix_features	= vlan_dev_fix_features,
 +	.ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
  };
  
  void vlan_setup(struct net_device *dev)
diff --combined net/core/dev.c
index 2b872bf,867adb2..330e012
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@@ -1661,6 -1661,29 +1661,29 @@@ bool is_skb_forwardable(struct net_devi
  }
  EXPORT_SYMBOL_GPL(is_skb_forwardable);
  
+ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+ {
+ 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+ 		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+ 			atomic_long_inc(&dev->rx_dropped);
+ 			kfree_skb(skb);
+ 			return NET_RX_DROP;
+ 		}
+ 	}
+ 
+ 	if (unlikely(!is_skb_forwardable(dev, skb))) {
+ 		atomic_long_inc(&dev->rx_dropped);
+ 		kfree_skb(skb);
+ 		return NET_RX_DROP;
+ 	}
+ 
+ 	skb_scrub_packet(skb, true);
+ 	skb->protocol = eth_type_trans(skb, dev);
+ 
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(__dev_forward_skb);
+ 
  /**
   * dev_forward_skb - loopback an skb to another netif
   *
@@@ -1681,24 -1704,7 +1704,7 @@@
   */
  int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
  {
- 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- 		if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
- 			atomic_long_inc(&dev->rx_dropped);
- 			kfree_skb(skb);
- 			return NET_RX_DROP;
- 		}
- 	}
- 
- 	if (unlikely(!is_skb_forwardable(dev, skb))) {
- 		atomic_long_inc(&dev->rx_dropped);
- 		kfree_skb(skb);
- 		return NET_RX_DROP;
- 	}
- 
- 	skb_scrub_packet(skb, true);
- 	skb->protocol = eth_type_trans(skb, dev);
- 
- 	return netif_rx_internal(skb);
+ 	return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
  }
  EXPORT_SYMBOL_GPL(dev_forward_skb);
  
@@@ -3951,7 -3957,6 +3957,7 @@@ static enum gro_result dev_gro_receive(
  	}
  	NAPI_GRO_CB(skb)->count = 1;
  	NAPI_GRO_CB(skb)->age = jiffies;
 +	NAPI_GRO_CB(skb)->last = skb;
  	skb_shinfo(skb)->gso_size = skb_gro_len(skb);
  	skb->next = napi->gro_list;
  	napi->gro_list = skb;
@@@ -4623,32 -4628,6 +4629,32 @@@ void *netdev_lower_get_next_private_rcu
  EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  
  /**
 + * netdev_lower_get_next - Get the next device from the lower neighbour
 + *                         list
 + * @dev: device
 + * @iter: list_head ** of the current position
 + *
 + * Gets the next netdev_adjacent from the dev's lower neighbour
 + * list, starting from iter position. The caller must hold RTNL lock or
 + * its own locking that guarantees that the neighbour lower
 + * list will remain unchainged.
 + */
 +void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
 +{
 +	struct netdev_adjacent *lower;
 +
 +	lower = list_entry((*iter)->next, struct netdev_adjacent, list);
 +
 +	if (&lower->list == &dev->adj_list.lower)
 +		return NULL;
 +
 +	*iter = &lower->list;
 +
 +	return lower->dev;
 +}
 +EXPORT_SYMBOL(netdev_lower_get_next);
 +
 +/**
   * netdev_lower_get_first_private_rcu - Get the first ->private from the
   *				       lower neighbour list, RCU
   *				       variant
@@@ -5098,30 -5077,6 +5104,30 @@@ void *netdev_lower_dev_get_private(stru
  }
  EXPORT_SYMBOL(netdev_lower_dev_get_private);
  
 +
 +int dev_get_nest_level(struct net_device *dev,
 +		       bool (*type_check)(struct net_device *dev))
 +{
 +	struct net_device *lower = NULL;
 +	struct list_head *iter;
 +	int max_nest = -1;
 +	int nest;
 +
 +	ASSERT_RTNL();
 +
 +	netdev_for_each_lower_dev(dev, lower, iter) {
 +		nest = dev_get_nest_level(lower, type_check);
 +		if (max_nest < nest)
 +			max_nest = nest;
 +	}
 +
 +	if (type_check(dev))
 +		max_nest++;
 +
 +	return max_nest;
 +}
 +EXPORT_SYMBOL(dev_get_nest_level);
 +
  static void dev_change_rx_flags(struct net_device *dev, int flags)
  {
  	const struct net_device_ops *ops = dev->netdev_ops;
@@@ -5287,6 -5242,7 +5293,6 @@@ void __dev_set_rx_mode(struct net_devic
  	if (ops->ndo_set_rx_mode)
  		ops->ndo_set_rx_mode(dev);
  }
 -EXPORT_SYMBOL(__dev_set_rx_mode);
  
  void dev_set_rx_mode(struct net_device *dev)
  {
@@@ -5591,7 -5547,7 +5597,7 @@@ static int dev_new_index(struct net *ne
  
  /* Delayed registration/unregisteration */
  static LIST_HEAD(net_todo_list);
 -static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
 +DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
  
  static void net_set_todo(struct net_device *dev)
  {
@@@ -5648,10 -5604,6 +5654,6 @@@ static void rollback_registered_many(st
  		*/
  		call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
  
- 		if (!dev->rtnl_link_ops ||
- 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
- 			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
- 
  		/*
  		 *	Flush the unicast and multicast chains
  		 */
@@@ -5661,6 -5613,10 +5663,10 @@@
  		if (dev->netdev_ops->ndo_uninit)
  			dev->netdev_ops->ndo_uninit(dev);
  
+ 		if (!dev->rtnl_link_ops ||
+ 		    dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+ 			rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
+ 
  		/* Notifier chain MUST detach us all upper devices. */
  		WARN_ON(netdev_has_any_upper_dev(dev));
  
diff --combined net/core/net_namespace.c
index 7c8ffd9,05e949d..85b6269
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@@ -24,7 -24,7 +24,7 @@@
  
  static LIST_HEAD(pernet_list);
  static struct list_head *first_device = &pernet_list;
 -static DEFINE_MUTEX(net_mutex);
 +DEFINE_MUTEX(net_mutex);
  
  LIST_HEAD(net_namespace_list);
  EXPORT_SYMBOL_GPL(net_namespace_list);
@@@ -273,7 -273,7 +273,7 @@@ static void cleanup_net(struct work_str
  {
  	const struct pernet_operations *ops;
  	struct net *net, *tmp;
- 	LIST_HEAD(net_kill_list);
+ 	struct list_head net_kill_list;
  	LIST_HEAD(net_exit_list);
  
  	/* Atomically snapshot the list of namespaces to cleanup */
diff --combined net/core/skbuff.c
index 8383b2b,3d74530..3f6c7e8
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@@ -694,7 -694,7 +694,7 @@@ static void __copy_skb_header(struct sk
  #endif
  	memcpy(new->cb, old->cb, sizeof(old->cb));
  	new->csum		= old->csum;
- 	new->local_df		= old->local_df;
+ 	new->ignore_df		= old->ignore_df;
  	new->pkt_type		= old->pkt_type;
  	new->ip_summed		= old->ip_summed;
  	skb_copy_queue_mapping(new, old);
@@@ -3076,7 -3076,7 +3076,7 @@@ int skb_gro_receive(struct sk_buff **he
  	if (unlikely(p->len + len >= 65536))
  		return -E2BIG;
  
 -	lp = NAPI_GRO_CB(p)->last ?: p;
 +	lp = NAPI_GRO_CB(p)->last;
  	pinfo = skb_shinfo(lp);
  
  	if (headlen <= offset) {
@@@ -3192,7 -3192,7 +3192,7 @@@ merge
  
  	__skb_pull(skb, offset);
  
 -	if (!NAPI_GRO_CB(p)->last)
 +	if (NAPI_GRO_CB(p)->last == p)
  		skb_shinfo(p)->frag_list = skb;
  	else
  		NAPI_GRO_CB(p)->last->next = skb;
@@@ -3913,7 -3913,7 +3913,7 @@@ void skb_scrub_packet(struct sk_buff *s
  	skb->tstamp.tv64 = 0;
  	skb->pkt_type = PACKET_HOST;
  	skb->skb_iif = 0;
- 	skb->local_df = 0;
+ 	skb->ignore_df = 0;
  	skb_dst_drop(skb);
  	skb->mark = 0;
  	secpath_reset(skb);
diff --combined net/ipv4/xfrm4_output.c
index 186a8ec,8e8c018..d5f6bd9
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@@ -25,7 -25,7 +25,7 @@@ static int xfrm4_tunnel_check_size(stru
  	if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
  		goto out;
  
- 	if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
+ 	if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
  		goto out;
  
  	mtu = dst_mtu(skb_dst(skb));
@@@ -62,7 -62,10 +62,7 @@@ int xfrm4_prepare_output(struct xfrm_st
  	if (err)
  		return err;
  
 -	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 -	IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED;
 -
 -	skb->protocol = htons(ETH_P_IP);
 +	IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
  
  	return x->outer_mode->output2(x, skb);
  }
@@@ -70,34 -73,27 +70,34 @@@ EXPORT_SYMBOL(xfrm4_prepare_output)
  
  int xfrm4_output_finish(struct sk_buff *skb)
  {
 +	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 +	skb->protocol = htons(ETH_P_IP);
 +
 +#ifdef CONFIG_NETFILTER
 +	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
 +#endif
 +
 +	return xfrm_output(skb);
 +}
 +
 +static int __xfrm4_output(struct sk_buff *skb)
 +{
 +	struct xfrm_state *x = skb_dst(skb)->xfrm;
 +
  #ifdef CONFIG_NETFILTER
 -	if (!skb_dst(skb)->xfrm) {
 +	if (!x) {
  		IPCB(skb)->flags |= IPSKB_REROUTED;
  		return dst_output(skb);
  	}
 -
 -	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
  #endif
  
 -	skb->protocol = htons(ETH_P_IP);
 -	return xfrm_output(skb);
 +	return x->outer_mode->afinfo->output_finish(skb);
  }
  
  int xfrm4_output(struct sock *sk, struct sk_buff *skb)
  {
 -	struct dst_entry *dst = skb_dst(skb);
 -	struct xfrm_state *x = dst->xfrm;
 -
  	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
 -			    NULL, dst->dev,
 -			    x->outer_mode->afinfo->output_finish,
 +			    NULL, skb_dst(skb)->dev, __xfrm4_output,
  			    !(IPCB(skb)->flags & IPSKB_REROUTED));
  }
  
diff --combined net/ipv6/ip6_output.c
index fbf1156,ab0cc57..85aaeca
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@@ -219,7 -219,7 +219,7 @@@ int ip6_xmit(struct sock *sk, struct sk
  	skb->mark = sk->sk_mark;
  
  	mtu = dst_mtu(dst);
- 	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
+ 	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
  		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
  			      IPSTATS_MIB_OUT, skb->len);
  		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
@@@ -347,11 -347,11 +347,11 @@@ static bool ip6_pkt_too_big(const struc
  	if (skb->len <= mtu)
  		return false;
  
- 	/* ipv6 conntrack defrag sets max_frag_size + local_df */
+ 	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
  	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
  		return true;
  
- 	if (skb->local_df)
+ 	if (skb->ignore_df)
  		return false;
  
  	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
@@@ -559,7 -559,7 +559,7 @@@ int ip6_fragment(struct sk_buff *skb, i
  	/* We must not fragment if the socket is set to force MTU discovery
  	 * or if the skb it not generated by a local socket.
  	 */
- 	if (unlikely(!skb->local_df && skb->len > mtu) ||
+ 	if (unlikely(!skb->ignore_df && skb->len > mtu) ||
  		     (IP6CB(skb)->frag_max_size &&
  		      IP6CB(skb)->frag_max_size > mtu)) {
  		if (skb->sk && dst_allfrag(skb_dst(skb)))
@@@ -1229,12 -1229,12 +1229,12 @@@ int ip6_append_data(struct sock *sk, in
  		unsigned int maxnonfragsize, headersize;
  
  		headersize = sizeof(struct ipv6hdr) +
 -			     (opt ? opt->tot_len : 0) +
 +			     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
  			     (dst_allfrag(&rt->dst) ?
  			      sizeof(struct frag_hdr) : 0) +
  			     rt->rt6i_nfheader_len;
  
- 		if (ip6_sk_local_df(sk))
+ 		if (ip6_sk_ignore_df(sk))
  			maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
  		else
  			maxnonfragsize = mtu;
@@@ -1544,7 -1544,7 +1544,7 @@@ int ip6_push_pending_frames(struct soc
  	}
  
  	/* Allow local fragmentation. */
- 	skb->local_df = ip6_sk_local_df(sk);
+ 	skb->ignore_df = ip6_sk_ignore_df(sk);
  
  	*final_dst = fl6->daddr;
  	__skb_pull(skb, skb_network_header_len(skb));
diff --combined net/ipv6/route.c
index 6ebdb7b6,f0a8ff9..70e6502
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@@ -1176,7 -1176,7 +1176,7 @@@ void ip6_update_pmtu(struct sk_buff *sk
  
  	memset(&fl6, 0, sizeof(fl6));
  	fl6.flowi6_oif = oif;
- 	fl6.flowi6_mark = mark;
+ 	fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
  	fl6.daddr = iph->daddr;
  	fl6.saddr = iph->saddr;
  	fl6.flowlabel = ip6_flowinfo(iph);
@@@ -2234,27 -2234,6 +2234,27 @@@ void rt6_remove_prefsrc(struct inet6_if
  	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
  }
  
 +#define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
 +#define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)
 +
 +/* Remove routers and update dst entries when gateway turn into host. */
 +static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
 +{
 +	struct in6_addr *gateway = (struct in6_addr *)arg;
 +
 +	if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
 +	     ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
 +	     ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
 +		return -1;
 +	}
 +	return 0;
 +}
 +
 +void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
 +{
 +	fib6_clean_all(net, fib6_clean_tohost, gateway);
 +}
 +
  struct arg_dev_net {
  	struct net_device *dev;
  	struct net *net;
@@@ -2730,9 -2709,6 +2730,9 @@@ static int inet6_rtm_getroute(struct sk
  	if (tb[RTA_OIF])
  		oif = nla_get_u32(tb[RTA_OIF]);
  
 +	if (tb[RTA_MARK])
 +		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
 +
  	if (iif) {
  		struct net_device *dev;
  		int flags = 0;
diff --combined net/ipv6/xfrm6_output.c
index b930d08,f47c8b1..433672d
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@@ -78,7 -78,7 +78,7 @@@ static int xfrm6_tunnel_check_size(stru
  	if (mtu < IPV6_MIN_MTU)
  		mtu = IPV6_MIN_MTU;
  
- 	if (!skb->local_df && skb->len > mtu) {
+ 	if (!skb->ignore_df && skb->len > mtu) {
  		skb->dev = dst->dev;
  
  		if (xfrm6_local_dontfrag(skb))
@@@ -114,7 -114,13 +114,7 @@@ int xfrm6_prepare_output(struct xfrm_st
  	if (err)
  		return err;
  
- 	skb->local_df = 1;
 -	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 -#ifdef CONFIG_NETFILTER
 -	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
 -#endif
 -
 -	skb->protocol = htons(ETH_P_IPV6);
+ 	skb->ignore_df = 1;
  
  	return x->outer_mode->output2(x, skb);
  }
@@@ -122,13 -128,11 +122,13 @@@ EXPORT_SYMBOL(xfrm6_prepare_output)
  
  int xfrm6_output_finish(struct sk_buff *skb)
  {
 +	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 +	skb->protocol = htons(ETH_P_IPV6);
 +
  #ifdef CONFIG_NETFILTER
  	IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
  #endif
  
 -	skb->protocol = htons(ETH_P_IPV6);
  	return xfrm_output(skb);
  }
  
@@@ -138,13 -142,6 +138,13 @@@ static int __xfrm6_output(struct sk_buf
  	struct xfrm_state *x = dst->xfrm;
  	int mtu;
  
 +#ifdef CONFIG_NETFILTER
 +	if (!x) {
 +		IP6CB(skb)->flags |= IP6SKB_REROUTED;
 +		return dst_output(skb);
 +	}
 +#endif
 +
  	if (skb->protocol == htons(ETH_P_IPV6))
  		mtu = ip6_skb_dst_mtu(skb);
  	else
@@@ -153,7 -150,7 +153,7 @@@
  	if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
  		xfrm6_local_rxpmtu(skb, mtu);
  		return -EMSGSIZE;
- 	} else if (!skb->local_df && skb->len > mtu && skb->sk) {
+ 	} else if (!skb->ignore_df && skb->len > mtu && skb->sk) {
  		xfrm_local_error(skb, mtu);
  		return -EMSGSIZE;
  	}
@@@ -168,7 -165,6 +168,7 @@@
  
  int xfrm6_output(struct sock *sk, struct sk_buff *skb)
  {
 -	return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL,
 -		       skb_dst(skb)->dev, __xfrm6_output);
 +	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb,
 +			    NULL, skb_dst(skb)->dev, __xfrm6_output,
 +			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
  }
diff --combined net/mac80211/ieee80211_i.h
index f169b6e,b455f62..487c2ef
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@@ -260,7 -260,7 +260,7 @@@ struct ieee80211_if_ap 
  
  	/* to be used after channel switch. */
  	struct cfg80211_beacon_data *next_beacon;
- 	struct list_head vlans;
+ 	struct list_head vlans; /* write-protected with RTNL and local->mtx */
  
  	struct ps_data ps;
  	atomic_t num_mcast_sta; /* number of stations receiving multicast */
@@@ -276,7 -276,7 +276,7 @@@ struct ieee80211_if_wds 
  };
  
  struct ieee80211_if_vlan {
- 	struct list_head list;
+ 	struct list_head list; /* write-protected with RTNL and local->mtx */
  
  	/* used for all tx if the VLAN is configured to 4-addr mode */
  	struct sta_info __rcu *sta;
@@@ -317,7 -317,6 +317,7 @@@ struct ieee80211_roc_work 
  
  	bool started, abort, hw_begun, notified;
  	bool to_be_freed;
 +	bool on_channel;
  
  	unsigned long hw_start_time;
  
@@@ -692,8 -691,10 +692,10 @@@ struct ieee80211_chanctx 
  	struct list_head list;
  	struct rcu_head rcu_head;
  
+ 	struct list_head assigned_vifs;
+ 	struct list_head reserved_vifs;
+ 
  	enum ieee80211_chanctx_mode mode;
- 	int refcount;
  	bool driver_present;
  
  	struct ieee80211_chanctx_conf conf;
@@@ -757,6 -758,14 +759,14 @@@ struct ieee80211_sub_if_data 
  	bool csa_radar_required;
  	struct cfg80211_chan_def csa_chandef;
  
+ 	struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */
+ 	struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */
+ 
+ 	/* context reservation -- protected with chanctx_mtx */
+ 	struct ieee80211_chanctx *reserved_chanctx;
+ 	struct cfg80211_chan_def reserved_chandef;
+ 	bool reserved_radar_required;
+ 
  	/* used to reconfigure hardware SM PS */
  	struct work_struct recalc_smps;
  
@@@ -1772,6 -1781,16 +1782,16 @@@ ieee80211_vif_use_channel(struct ieee80
  			  const struct cfg80211_chan_def *chandef,
  			  enum ieee80211_chanctx_mode mode);
  int __must_check
+ ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
+ 			      const struct cfg80211_chan_def *chandef,
+ 			      enum ieee80211_chanctx_mode mode,
+ 			      bool radar_required);
+ int __must_check
+ ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata,
+ 				   u32 *changed);
+ int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
+ 
+ int __must_check
  ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
  			       const struct cfg80211_chan_def *chandef,
  			       u32 *changed);
@@@ -1783,6 -1802,8 +1803,8 @@@ void ieee80211_vif_release_channel(stru
  void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
  void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
  					 bool clear);
+ int ieee80211_chanctx_refcount(struct ieee80211_local *local,
+ 			       struct ieee80211_chanctx *ctx);
  
  void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
  				   struct ieee80211_chanctx *chanctx);
@@@ -1806,6 -1827,11 +1828,11 @@@ int ieee80211_cs_headroom(struct ieee80
  			  enum nl80211_iftype iftype);
  void ieee80211_recalc_dtim(struct ieee80211_local *local,
  			   struct ieee80211_sub_if_data *sdata);
+ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
+ 				 const struct cfg80211_chan_def *chandef,
+ 				 enum ieee80211_chanctx_mode chanmode,
+ 				 u8 radar_detect);
+ int ieee80211_max_num_channels(struct ieee80211_local *local);
  
  #ifdef CONFIG_MAC80211_NOINLINE
  #define debug_noinline noinline
diff --combined net/mac80211/mlme.c
index 27600a9,488826f..bfb5e20
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@@ -1089,7 -1089,7 +1089,7 @@@ ieee80211_sta_process_chanswitch(struc
  	}
  	chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf),
  			       struct ieee80211_chanctx, conf);
- 	if (chanctx->refcount > 1) {
+ 	if (ieee80211_chanctx_refcount(local, chanctx) > 1) {
  		sdata_info(sdata,
  			   "channel switch with multiple interfaces on the same channel, disconnecting\n");
  		ieee80211_queue_work(&local->hw,
@@@ -3598,24 -3598,18 +3598,24 @@@ void ieee80211_mgd_quiesce(struct ieee8
  
  	sdata_lock(sdata);
  
 -	if (ifmgd->auth_data) {
 +	if (ifmgd->auth_data || ifmgd->assoc_data) {
 +		const u8 *bssid = ifmgd->auth_data ?
 +				ifmgd->auth_data->bss->bssid :
 +				ifmgd->assoc_data->bss->bssid;
 +
  		/*
 -		 * If we are trying to authenticate while suspending, cfg80211
 -		 * won't know and won't actually abort those attempts, thus we
 -		 * need to do that ourselves.
 +		 * If we are trying to authenticate / associate while suspending,
 +		 * cfg80211 won't know and won't actually abort those attempts,
 +		 * thus we need to do that ourselves.
  		 */
 -		ieee80211_send_deauth_disassoc(sdata,
 -					       ifmgd->auth_data->bss->bssid,
 +		ieee80211_send_deauth_disassoc(sdata, bssid,
  					       IEEE80211_STYPE_DEAUTH,
  					       WLAN_REASON_DEAUTH_LEAVING,
  					       false, frame_buf);
 -		ieee80211_destroy_auth_data(sdata, false);
 +		if (ifmgd->assoc_data)
 +			ieee80211_destroy_assoc_data(sdata, false);
 +		if (ifmgd->auth_data)
 +			ieee80211_destroy_auth_data(sdata, false);
  		cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf,
  				      IEEE80211_DEAUTH_FRAME_LEN);
  	}
@@@ -3707,7 -3701,7 +3707,7 @@@ int ieee80211_max_network_latency(struc
  	ieee80211_recalc_ps(local, latency_usec);
  	mutex_unlock(&local->iflist_mtx);
  
- 	return 0;
+ 	return NOTIFY_OK;
  }
  
  static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,

-- 
LinuxNextTracking


More information about the linux-merge mailing list