The following commit has been merged in the master branch: commit 89c4e73b6eeeb7b1f54fac5f4c6e0319b4b64461 Merge: f442d0a87e3cd087ad56d8945acaf40c924aa668 b6052af61a9e0dee236bcf4c69843126c0d28e4f Author: Stephen Rothwell sfr@canb.auug.org.au Date: Wed May 21 11:50:52 2014 +1000
Merge remote-tracking branch 'net-next/master'
Conflicts: drivers/net/ethernet/altera/altera_msgdma.c drivers/net/ethernet/altera/altera_sgdma.c net/ipv6/xfrm6_output.c
diff --combined MAINTAINERS index c519569,bde15ff..c6c47a6 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -355,7 -355,7 +355,7 @@@ F: Documentation/hwmon/adm102 F: drivers/hwmon/adm1025.c
ADM1029 HARDWARE MONITOR DRIVER -M: Corentin Labbe corentin.labbe@geomatys.fr +M: Corentin Labbe clabbe.montjoie@gmail.com L: lm-sensors@lm-sensors.org S: Maintained F: drivers/hwmon/adm1029.c @@@ -1617,6 -1617,12 +1617,6 @@@ S: Supporte F: drivers/misc/atmel_tclib.c F: drivers/clocksource/tcb_clksrc.c
-ATMEL TSADCC DRIVER -M: Josh Wu josh.wu@atmel.com -L: linux-input@vger.kernel.org -S: Supported -F: drivers/input/touchscreen/atmel_tsadcc.c - ATMEL USBA UDC DRIVER M: Nicolas Ferre nicolas.ferre@atmel.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@@ -1887,15 -1893,14 +1887,15 @@@ L: netdev@vger.kernel.or S: Supported F: drivers/net/ethernet/broadcom/bnx2x/
-BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE +BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE M: Christian Daudt bcm@fixthebug.org M: Matt Porter mporter@linaro.org L: bcm-kernel-feedback-list@broadcom.com -T: git git://git.github.com/broadcom/bcm11351 +T: git git://github.com/broadcom/mach-bcm S: Maintained F: arch/arm/mach-bcm/ F: arch/arm/boot/dts/bcm113* +F: arch/arm/boot/dts/bcm216* F: arch/arm/boot/dts/bcm281* F: arch/arm/configs/bcm_defconfig F: drivers/mmc/host/sdhci_bcm_kona.c @@@ -1962,6 -1967,12 +1962,12 @@@ S: Maintaine F: drivers/bcma/ F: include/linux/bcma/
+ BROADCOM SYSTEMPORT ETHERNET DRIVER + M: Florian Fainelli f.fainelli@gmail.com + L: netdev@vger.kernel.org + S: Supported + F: drivers/net/ethernet/broadcom/bcmsysport.* + BROCADE BFA FC SCSI DRIVER M: Anil Gurumurthy anil.gurumurthy@qlogic.com M: Sudarsana Kalluru sudarsana.kalluru@qlogic.com @@@ -2240,6 -2251,12 +2246,6 @@@ L: linux-usb@vger.kernel.or S: Maintained F: drivers/usb/host/ohci-ep93xx.c
-CIRRUS LOGIC CS4270 SOUND DRIVER -M: Timur Tabi timur@tabi.org -L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Odd Fixes -F: sound/soc/codecs/cs4270* - CIRRUS LOGIC AUDIO CODEC DRIVERS M: Brian Austin brian.austin@cirrus.com M: Paul Handrigan Paul.Handrigan@cirrus.com @@@ -2404,6 -2421,7 +2410,6 @@@ F: drivers/net/ethernet/ti/cpmac. CPU FREQUENCY DRIVERS M: Rafael J. Wysocki rjw@rjwysocki.net M: Viresh Kumar viresh.kumar@linaro.org -L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git @@@ -2414,6 -2432,7 +2420,6 @@@ F: include/linux/cpufreq. CPU FREQUENCY DRIVERS - ARM BIG LITTLE M: Viresh Kumar viresh.kumar@linaro.org M: Sudeep Holla sudeep.holla@arm.com -L: cpufreq@vger.kernel.org L: linux-pm@vger.kernel.org W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php S: Maintained @@@ -4805,14 -4824,6 +4811,14 @@@ L: linux-kernel@vger.kernel.or S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: kernel/irq/ + +IRQCHIP DRIVERS +M: Thomas Gleixner tglx@linutronix.de +M: Jason Cooper jason@lakedaemon.net +L: linux-kernel@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core +T: git git://git.infradead.org/users/jcooper/linux.git irqchip/core F: drivers/irqchip/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) @@@ -5485,15 -5496,15 +5491,15 @@@ F: Documentation/hwmon/ltc426 F: drivers/hwmon/ltc4261.c
LTP (Linux Test Project) -M: Shubham Goyal shubham@linux.vnet.ibm.com M: Mike Frysinger vapier@gentoo.org M: Cyril Hrubis chrubis@suse.cz -M: Caspar Zhang caspar@casparzhang.com M: Wanlong Gao gaowanlong@cn.fujitsu.com +M: Jan Stancek jstancek@redhat.com +M: Stanislav Kholmanskikh stanislav.kholmanskikh@oracle.com +M: Alexey Kodanev alexey.kodanev@oracle.com L: ltp-list@lists.sourceforge.net (subscribers-only) -W: http://ltp.sourceforge.net/ +W: http://linux-test-project.github.io/ T: git git://github.com/linux-test-project/ltp.git -T: git git://ltp.git.sourceforge.net/gitroot/ltp/ltp-dev S: Maintained
M32R ARCHITECTURE @@@ -6506,10 -6517,10 +6512,10 @@@ T: git git://openrisc.net/~jonas/linu F: arch/openrisc/
OPENVSWITCH -M: Jesse Gross jesse@nicira.com +M: Pravin Shelar pshelar@nicira.com L: dev@openvswitch.org W: http://openvswitch.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jesse/openvswitch.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/pshelar/openvswitch.git S: Maintained F: net/openvswitch/
@@@ -6699,7 -6710,6 +6705,7 @@@ F: Documentation/PCI F: drivers/pci/ F: include/linux/pci* F: arch/x86/pci/ +F: arch/x86/kernel/quirks.c
PCI DRIVER FOR IMX6 M: Richard Zhu r65037@freescale.com @@@ -7950,26 -7960,6 +7956,26 @@@ M: Robin Holt <robinmholt@gmail.com S: Maintained F: drivers/misc/sgi-xp/
+SI2157 MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/tuners/si2157* + +SI2168 MEDIA DRIVER +M: Antti Palosaari crope@iki.fi +L: linux-media@vger.kernel.org +W: http://linuxtv.org/ +W: http://palosaari.fi/linux/ +Q: http://patchwork.linuxtv.org/project/linux-media/list/ +T: git git://linuxtv.org/anttip/media_tree.git +S: Maintained +F: drivers/media/dvb-frontends/si2168* + SI470X FM RADIO RECEIVER I2C DRIVER M: Hans Verkuil hverkuil@xs4all.nl L: linux-media@vger.kernel.org @@@ -9123,9 -9113,6 +9129,9 @@@ F: arch/um/os-Linux/drivers
TURBOCHANNEL SUBSYSTEM M: "Maciej W. Rozycki" macro@linux-mips.org +M: Ralf Baechle ralf@linux-mips.org +L: linux-mips@linux-mips.org +Q: http://patchwork.linux-mips.org/project/linux-mips/list/ S: Maintained F: drivers/tc/ F: include/linux/tc.h @@@ -9979,7 -9966,7 +9985,7 @@@ F: drivers/net/hamradio/*scc. F: drivers/net/hamradio/z8530.h
ZBUD COMPRESSED PAGE ALLOCATOR -M: Seth Jennings sjenning@linux.vnet.ibm.com +M: Seth Jennings sjennings@variantweb.net L: linux-mm@kvack.org S: Maintained F: mm/zbud.c @@@ -10024,7 -10011,7 +10030,7 @@@ F: mm/zsmalloc. F: include/linux/zsmalloc.h
ZSWAP COMPRESSED SWAP CACHING -M: Seth Jennings sjenning@linux.vnet.ibm.com +M: Seth Jennings sjennings@variantweb.net L: linux-mm@kvack.org S: Maintained F: mm/zswap.c diff --combined arch/arm/boot/dts/am33xx.dtsi index 7ad75b4,baf56cc..f1eea4a --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@@ -144,7 -144,7 +144,7 @@@ compatible = "ti,edma3"; ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2"; reg = <0x49000000 0x10000>, - <0x44e10f90 0x10>; + <0x44e10f90 0x40>; interrupts = <12 13 14>; #dma-cells = <1>; dma-channels = <64>; @@@ -665,6 -665,8 +665,8 @@@ mac: ethernet@4a100000 { compatible = "ti,cpsw"; ti,hwmods = "cpgmac0"; + clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; + clock-names = "fck", "cpts"; cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; diff --combined arch/arm/boot/dts/armada-xp-matrix.dts index 25674fe,3bb8c00..7e291e2 --- a/arch/arm/boot/dts/armada-xp-matrix.dts +++ b/arch/arm/boot/dts/armada-xp-matrix.dts @@@ -37,15 -37,19 +37,15 @@@
internal-regs { serial@12000 { status = "okay"; }; serial@12100 { status = "okay"; }; serial@12200 { status = "okay"; }; serial@12300 { - clock-frequency = <250000000>; status = "okay"; };
@@@ -57,6 -61,10 +57,10 @@@ ethernet@30000 { status = "okay"; phy-mode = "sgmii"; + fixed-link { + speed = <1000>; + full-duplex; + }; };
pcie-controller { diff --combined arch/sparc/include/asm/checksum_32.h index c80cf02,04471dc..426b238 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@@ -29,7 -29,7 +29,7 @@@ * * it's best to have buff aligned on a 32-bit boundary */ -extern __wsum csum_partial(const void *buff, int len, __wsum sum); +__wsum csum_partial(const void *buff, int len, __wsum sum);
/* the same as csum_partial, but copies from fs:src while it * checksums @@@ -38,7 -38,7 +38,7 @@@ * better 64-bit) boundary */
-extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); +unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) @@@ -238,4 -238,16 +238,16 @@@ static inline __sum16 ip_compute_csum(c return csum_fold(csum_partial(buff, len, 0)); }
+ #define HAVE_ARCH_CSUM_ADD + static inline __wsum csum_add(__wsum csum, __wsum addend) + { + __asm__ __volatile__( + "addcc %0, %1, %0\n" + "addx %0, %%g0, %0" + : "=r" (csum) + : "r" (addend), "0" (csum)); + + return csum; + } + #endif /* !(__SPARC_CHECKSUM_H) */ diff --combined arch/sparc/include/asm/checksum_64.h index b3c8b9d,2ff81ae..b8779a6 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@@ -29,7 -29,7 +29,7 @@@ * * it's best to have buff aligned on a 32-bit boundary */ -extern __wsum csum_partial(const void * buff, int len, __wsum sum); +__wsum csum_partial(const void * buff, int len, __wsum sum);
/* the same as csum_partial, but copies from user space while it * checksums @@@ -37,12 -37,12 +37,12 @@@ * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, - int len, __wsum sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, + int len, __wsum sum);
-extern long __csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum); +long __csum_partial_copy_from_user(const void __user *src, + void *dst, int len, + __wsum sum);
static inline __wsum csum_partial_copy_from_user(const void __user *src, @@@ -59,9 -59,9 +59,9 @@@ * Copy and checksum to user */ #define HAVE_CSUM_COPY_USER -extern long __csum_partial_copy_to_user(const void *src, - void __user *dst, int len, - __wsum sum); +long __csum_partial_copy_to_user(const void *src, + void __user *dst, int len, + __wsum sum);
static inline __wsum csum_and_copy_to_user(const void *src, @@@ -77,7 -77,7 +77,7 @@@ /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. */ -extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); +__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
/* Fold a partial checksum without adding pseudo headers. */ static inline __sum16 csum_fold(__wsum sum) @@@ -96,9 -96,9 +96,9 @@@ }
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned int len, - unsigned short proto, - __wsum sum) + unsigned int len, + unsigned short proto, + __wsum sum) { __asm__ __volatile__( " addcc %1, %0, %0\n" @@@ -116,9 -116,9 +116,9 @@@ * returns a 16-bit checksum, already complemented */ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + unsigned short len, + unsigned short proto, + __wsum sum) { return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); } @@@ -164,4 -164,16 +164,16 @@@ static inline __sum16 ip_compute_csum(c return csum_fold(csum_partial(buff, len, 0)); }
+ #define HAVE_ARCH_CSUM_ADD + static inline __wsum csum_add(__wsum csum, __wsum addend) + { + __asm__ __volatile__( + "addcc %0, %1, %0\n" + "addx %0, %%g0, %0" + : "=r" (csum) + : "r" (addend), "0" (csum)); + + return csum; + } + #endif /* !(__SPARC64_CHECKSUM_H) */ diff --combined arch/x86/net/bpf_jit_comp.c index 6d5663a,92aef8fd..080f3f0 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@@ -1,6 -1,7 +1,7 @@@ /* bpf_jit_comp.c : BPF JIT compiler * * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) + * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@@ -14,28 -15,16 +15,16 @@@ #include <linux/if_vlan.h> #include <linux/random.h>
- /* - * Conventions : - * EAX : BPF A accumulator - * EBX : BPF X accumulator - * RDI : pointer to skb (first argument given to JIT function) - * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n) - * ECX,EDX,ESI : scratch registers - * r9d : skb->len - skb->data_len (headlen) - * r8 : skb->data - * -8(RBP) : saved RBX value - * -16(RBP)..-80(RBP) : BPF_MEMWORDS values - */ int bpf_jit_enable __read_mostly;
/* * assembly code in arch/x86/net/bpf_jit.S */ - extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; + extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; - extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[]; + extern u8 sk_load_byte_positive_offset[]; extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; - extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[]; + extern u8 sk_load_byte_negative_offset[];
static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@@ -56,30 -45,44 +45,44 @@@ #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) - #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0) - - #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */ - #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */ + #define EMIT1_off32(b1, off) \ + do {EMIT1(b1); EMIT(off, 4); } while (0) + #define EMIT2_off32(b1, b2, off) \ + do {EMIT2(b1, b2); EMIT(off, 4); } while (0) + #define EMIT3_off32(b1, b2, b3, off) \ + do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) + #define EMIT4_off32(b1, b2, b3, b4, off) \ + do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
static inline bool is_imm8(int value) { return value <= 127 && value >= -128; }
- static inline bool is_near(int offset) + static inline bool is_simm32(s64 value) { - return offset <= 127 && offset >= -128; + return value == (s64) (s32) value; }
- #define EMIT_JMP(offset) \ - do { \ - if (offset) { \ - if (is_near(offset)) \ - EMIT2(0xeb, offset); /* jmp .+off8 */ \ - else \ - EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \ - } \ - } while (0) + /* mov A, X */ + #define EMIT_mov(A, X) \ + do {if (A != X) \ + EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \ + } while (0) + + static int bpf_size_to_x86_bytes(int bpf_size) + { + if (bpf_size == BPF_W) + return 4; + else if (bpf_size == BPF_H) + return 2; + else if (bpf_size == BPF_B) + return 1; + else if (bpf_size == BPF_DW) + return 4; /* imm32 */ + else + return 0; + }
/* list of x86 cond jumps opcodes (. + s8) * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) @@@ -90,27 -93,8 +93,8 @@@ #define X86_JNE 0x75 #define X86_JBE 0x76 #define X86_JA 0x77 - - #define EMIT_COND_JMP(op, offset) \ - do { \ - if (is_near(offset)) \ - EMIT2(op, offset); /* jxx .+off8 */ \ - else { \ - EMIT2(0x0f, op + 0x10); \ - EMIT(offset, 4); /* jxx .+off32 */ \ - } \ - } while (0) - - #define COND_SEL(CODE, TOP, FOP) \ - case CODE: \ - t_op = TOP; \ - f_op = FOP; \ - goto cond_branch - - - #define SEEN_DATAREF 1 /* might call external helpers */ - #define SEEN_XREG 2 /* ebx is used */ - #define SEEN_MEM 4 /* use mem[] for temporary storage */ + #define X86_JGE 0x7D + #define X86_JG 0x7F
static inline void bpf_flush_icache(void *start, void *end) { @@@ -125,26 -109,6 +109,6 @@@ #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
- /* Helper to find the offset of pkt_type in sk_buff - * We want to make sure its still a 3bit field starting at a byte boundary. - */ - #define PKT_TYPE_MAX 7 - static int pkt_type_offset(void) - { - struct sk_buff skb_probe = { - .pkt_type = ~0, - }; - char *ct = (char *)&skb_probe; - unsigned int off; - - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (ct[off] == PKT_TYPE_MAX) - return off; - } - pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n"); - return -1; - } - struct bpf_binary_header { unsigned int pages; /* Note : for security reasons, bpf code will follow a randomly @@@ -171,590 -135,778 +135,778 @@@ static struct bpf_binary_header *bpf_al memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
header->pages = sz / PAGE_SIZE; - hole = sz - (proglen + sizeof(*header)); + hole = min(sz - (proglen + sizeof(*header)), PAGE_SIZE - sizeof(*header));
/* insert a random number of int3 instructions before BPF code */ *image_ptr = &header->image[prandom_u32() % hole]; return header; }
- void bpf_jit_compile(struct sk_filter *fp) + /* pick a register outside of BPF range for JIT internal work */ + #define AUX_REG (MAX_BPF_REG + 1) + + /* the following table maps BPF registers to x64 registers. + * x64 register r12 is unused, since if used as base address register + * in load/store instructions, it always needs an extra byte of encoding + */ + static const int reg2hex[] = { + [BPF_REG_0] = 0, /* rax */ + [BPF_REG_1] = 7, /* rdi */ + [BPF_REG_2] = 6, /* rsi */ + [BPF_REG_3] = 2, /* rdx */ + [BPF_REG_4] = 1, /* rcx */ + [BPF_REG_5] = 0, /* r8 */ + [BPF_REG_6] = 3, /* rbx callee saved */ + [BPF_REG_7] = 5, /* r13 callee saved */ + [BPF_REG_8] = 6, /* r14 callee saved */ + [BPF_REG_9] = 7, /* r15 callee saved */ + [BPF_REG_FP] = 5, /* rbp readonly */ + [AUX_REG] = 3, /* r11 temp register */ + }; + + /* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15 + * which need extra byte of encoding. + * rax,rcx,...,rbp have simpler encoding + */ + static inline bool is_ereg(u32 reg) { - u8 temp[64]; - u8 *prog; - unsigned int proglen, oldproglen = 0; - int ilen, i; - int t_offset, f_offset; - u8 t_op, f_op, seen = 0, pass; - u8 *image = NULL; - struct bpf_binary_header *header = NULL; - u8 *func; - int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ - unsigned int cleanup_addr; /* epilogue code offset */ - unsigned int *addrs; - const struct sock_filter *filter = fp->insns; - int flen = fp->len; + if (reg == BPF_REG_5 || reg == AUX_REG || + (reg >= BPF_REG_7 && reg <= BPF_REG_9)) + return true; + else + return false; + }
- if (!bpf_jit_enable) - return; + /* add modifiers if 'reg' maps to x64 registers r8..r15 */ + static inline u8 add_1mod(u8 byte, u32 reg) + { + if (is_ereg(reg)) + byte |= 1; + return byte; + }
- addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); - if (addrs == NULL) - return; + static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) + { + if (is_ereg(r1)) + byte |= 1; + if (is_ereg(r2)) + byte |= 4; + return byte; + }
- /* Before first pass, make a rough estimation of addrs[] - * each bpf instruction is translated to less than 64 bytes + /* encode dest register 'a_reg' into x64 opcode 'byte' */ + static inline u8 add_1reg(u8 byte, u32 a_reg) + { + return byte + reg2hex[a_reg]; + } + + /* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */ + static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg) + { + return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3); + } + + struct jit_context { + unsigned int cleanup_addr; /* epilogue code offset */ + bool seen_ld_abs; + }; + + static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, + int oldproglen, struct jit_context *ctx) + { + struct sock_filter_int *insn = bpf_prog->insnsi; + int insn_cnt = bpf_prog->len; + u8 temp[64]; + int i; + int proglen = 0; + u8 *prog = temp; + int stacksize = MAX_BPF_STACK + + 32 /* space for rbx, r13, r14, r15 */ + + 8 /* space for skb_copy_bits() buffer */; + + EMIT1(0x55); /* push rbp */ + EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */ + + /* sub rsp, stacksize */ + EMIT3_off32(0x48, 0x81, 0xEC, stacksize); + + /* all classic BPF filters use R6(rbx) save it */ + + /* mov qword ptr [rbp-X],rbx */ + EMIT3_off32(0x48, 0x89, 0x9D, -stacksize); + + /* sk_convert_filter() maps classic BPF register X to R7 and uses R8 + * as temporary, so all tcpdump filters need to spill/fill R7(r13) and + * R8(r14). R9(r15) spill could be made conditional, but there is only + * one 'bpf_error' return path out of helper functions inside bpf_jit.S + * The overhead of extra spill is negligible for any filter other + * than synthetic ones. Therefore not worth adding complexity. */ - for (proglen = 0, i = 0; i < flen; i++) { - proglen += 64; - addrs[i] = proglen; + + /* mov qword ptr [rbp-X],r13 */ + EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8); + /* mov qword ptr [rbp-X],r14 */ + EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16); + /* mov qword ptr [rbp-X],r15 */ + EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24); + + /* clear A and X registers */ + EMIT2(0x31, 0xc0); /* xor eax, eax */ + EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */ + + if (ctx->seen_ld_abs) { + /* r9d : skb->len - skb->data_len (headlen) + * r10 : skb->data + */ + if (is_imm8(offsetof(struct sk_buff, len))) + /* mov %r9d, off8(%rdi) */ + EMIT4(0x44, 0x8b, 0x4f, + offsetof(struct sk_buff, len)); + else + /* mov %r9d, off32(%rdi) */ + EMIT3_off32(0x44, 0x8b, 0x8f, + offsetof(struct sk_buff, len)); + + if (is_imm8(offsetof(struct sk_buff, data_len))) + /* sub %r9d, off8(%rdi) */ + EMIT4(0x44, 0x2b, 0x4f, + offsetof(struct sk_buff, data_len)); + else + EMIT3_off32(0x44, 0x2b, 0x8f, + offsetof(struct sk_buff, data_len)); + + if (is_imm8(offsetof(struct sk_buff, data))) + /* mov %r10, off8(%rdi) */ + EMIT4(0x4c, 0x8b, 0x57, + offsetof(struct sk_buff, data)); + else + /* mov %r10, off32(%rdi) */ + EMIT3_off32(0x4c, 0x8b, 0x97, + offsetof(struct sk_buff, data)); } - cleanup_addr = proglen; /* epilogue address */
- for (pass = 0; pass < 10; pass++) { - u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; - /* no prologue/epilogue for trivial filters (RET something) */ - proglen = 0; - prog = temp; + for (i = 0; i < insn_cnt; i++, insn++) { + const s32 K = insn->imm; + u32 a_reg = insn->a_reg; + u32 x_reg = insn->x_reg; + u8 b1 = 0, b2 = 0, b3 = 0; + s64 jmp_offset; + u8 jmp_cond; + int ilen; + u8 *func; + + switch (insn->code) { + /* ALU */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + switch (BPF_OP(insn->code)) { + case BPF_ADD: b2 = 0x01; break; + case BPF_SUB: b2 = 0x29; break; + case BPF_AND: b2 = 0x21; break; + case BPF_OR: b2 = 0x09; break; + case BPF_XOR: b2 = 0x31; break; + } + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_2mod(0x48, a_reg, x_reg)); + else if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT1(add_2mod(0x40, a_reg, x_reg)); + EMIT2(b2, add_2reg(0xC0, a_reg, x_reg)); + break;
- if (seen_or_pass0) { - EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ - EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ - /* note : must save %rbx in case bpf_error is hit */ - if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF)) - EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ - if (seen_or_pass0 & SEEN_XREG) - CLEAR_X(); /* make sure we dont leek kernel memory */ - - /* - * If this filter needs to access skb data, - * loads r9 and r8 with : - * r9 = skb->len - skb->data_len - * r8 = skb->data + /* mov A, X */ + case BPF_ALU64 | BPF_MOV | BPF_X: + EMIT_mov(a_reg, x_reg); + break; + + /* mov32 A, X */ + case BPF_ALU | BPF_MOV | BPF_X: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT1(add_2mod(0x40, a_reg, x_reg)); + EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg)); + break; + + /* neg A */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + EMIT2(0xF7, add_1reg(0xD8, a_reg)); + break; + + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + + switch (BPF_OP(insn->code)) { + case BPF_ADD: b3 = 0xC0; break; + case BPF_SUB: b3 = 0xE8; break; + case BPF_AND: b3 = 0xE0; break; + case BPF_OR: b3 = 0xC8; break; + case BPF_XOR: b3 = 0xF0; break; + } + + if (is_imm8(K)) + EMIT3(0x83, add_1reg(b3, a_reg), K); + else + EMIT2_off32(0x81, add_1reg(b3, a_reg), K); + break; + + case BPF_ALU64 | BPF_MOV | BPF_K: + /* optimization: if imm32 is positive, + * use 'mov eax, imm32' (which zero-extends imm32) + * to save 2 bytes */ - if (seen_or_pass0 & SEEN_DATAREF) { - if (offsetof(struct sk_buff, len) <= 127) - /* mov off8(%rdi),%r9d */ - EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); - else { - /* mov off32(%rdi),%r9d */ - EMIT3(0x44, 0x8b, 0x8f); - EMIT(offsetof(struct sk_buff, len), 4); - } - if (is_imm8(offsetof(struct sk_buff, data_len))) - /* sub off8(%rdi),%r9d */ - EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len)); - else { - EMIT3(0x44, 0x2b, 0x8f); - EMIT(offsetof(struct sk_buff, data_len), 4); - } + if (K < 0) { + /* 'mov rax, imm32' sign extends imm32 */ + b1 = add_1mod(0x48, a_reg); + b2 = 0xC7; + b3 = 0xC0; + EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K); + break; + }
- if (is_imm8(offsetof(struct sk_buff, data))) - /* mov off8(%rdi),%r8 */ - EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data)); - else { - /* mov off32(%rdi),%r8 */ - EMIT3(0x4c, 0x8b, 0x87); - EMIT(offsetof(struct sk_buff, data), 4); - } + case BPF_ALU | BPF_MOV | BPF_K: + /* mov %eax, imm32 */ + if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + EMIT1_off32(add_1reg(0xB8, a_reg), K); + break; + + /* A %= X, A /= X, A %= K, A /= K */ + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + EMIT1(0x50); /* push rax */ + EMIT1(0x52); /* push rdx */ + + if (BPF_SRC(insn->code) == BPF_X) + /* mov r11, X */ + EMIT_mov(AUX_REG, x_reg); + else + /* mov r11, K */ + EMIT3_off32(0x49, 0xC7, 0xC3, K); + + /* mov rax, A */ + EMIT_mov(BPF_REG_0, a_reg); + + /* xor edx, edx + * equivalent to 'xor rdx, rdx', but one byte less + */ + EMIT2(0x31, 0xd2); + + if (BPF_SRC(insn->code) == BPF_X) { + /* if (X == 0) return 0 */ + + /* cmp r11, 0 */ + EMIT4(0x49, 0x83, 0xFB, 0x00); + + /* jne .+9 (skip over pop, pop, xor and jmp) */ + EMIT2(X86_JNE, 1 + 1 + 2 + 5); + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + EMIT2(0x31, 0xc0); /* xor eax, eax */ + + /* jmp cleanup_addr + * addrs[i] - 11, because there are 11 bytes + * after this insn: div, mov, pop, pop, mov + */ + jmp_offset = ctx->cleanup_addr - (addrs[i] - 11); + EMIT1_off32(0xE9, jmp_offset); } - }
- switch (filter[0].code) { - case BPF_S_RET_K: - case BPF_S_LD_W_LEN: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_CPU: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_QUEUE: - case BPF_S_ANC_PKTTYPE: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - /* first instruction sets A register (or is RET 'constant') */ + if (BPF_CLASS(insn->code) == BPF_ALU64) + /* div r11 */ + EMIT3(0x49, 0xF7, 0xF3); + else + /* div r11d */ + EMIT3(0x41, 0xF7, 0xF3); + + if (BPF_OP(insn->code) == BPF_MOD) + /* mov r11, rdx */ + EMIT3(0x49, 0x89, 0xD3); + else + /* mov r11, rax */ + EMIT3(0x49, 0x89, 0xC3); + + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + + /* mov A, r11 */ + EMIT_mov(a_reg, AUX_REG); break; - default: - /* make sure we dont leak kernel information to user */ - CLEAR_A(); /* A = 0 */ - }
- for (i = 0; i < flen; i++) { - unsigned int K = filter[i].k; + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_X: + EMIT1(0x50); /* push rax */ + EMIT1(0x52); /* push rdx */ + + /* mov r11, A */ + EMIT_mov(AUX_REG, a_reg); + + if (BPF_SRC(insn->code) == BPF_X) + /* mov rax, X */ + EMIT_mov(BPF_REG_0, x_reg); + else + /* mov rax, K */ + EMIT3_off32(0x48, 0xC7, 0xC0, K); + + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, AUX_REG)); + else if (is_ereg(AUX_REG)) + EMIT1(add_1mod(0x40, AUX_REG)); + /* mul(q) r11 */ + EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); + + /* mov r11, rax */ + EMIT_mov(AUX_REG, BPF_REG_0); + + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + + /* mov A, r11 */ + EMIT_mov(a_reg, AUX_REG); + break;
- switch (filter[i].code) { - case BPF_S_ALU_ADD_X: /* A += X; */ - seen |= SEEN_XREG; - EMIT2(0x01, 0xd8); /* add %ebx,%eax */ - break; - case BPF_S_ALU_ADD_K: /* A += K; */ - if (!K) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xc0, K); /* add imm8,%eax */ - else - EMIT1_off32(0x05, K); /* add imm32,%eax */ - break; - case BPF_S_ALU_SUB_X: /* A -= X; */ - seen |= SEEN_XREG; - EMIT2(0x29, 0xd8); /* sub %ebx,%eax */ - break; - case BPF_S_ALU_SUB_K: /* A -= K */ - if (!K) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */ - else - EMIT1_off32(0x2d, K); /* sub imm32,%eax */ - break; - case BPF_S_ALU_MUL_X: /* A *= X; */ - seen |= SEEN_XREG; - EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */ - break; - case BPF_S_ALU_MUL_K: /* A *= K */ - if (is_imm8(K)) - EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ - else { - EMIT2(0x69, 0xc0); /* imul imm32,%eax */ - EMIT(K, 4); - } - break; - case BPF_S_ALU_DIV_X: /* A /= X; */ - seen |= SEEN_XREG; - EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (pc_ret0 > 0) { - /* addrs[pc_ret0 - 1] is start address of target - * (addrs[i] - 4) is the address following this jmp - * ("xor %edx,%edx; div %ebx" being 4 bytes long) - */ - EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] - - (addrs[i] - 4)); - } else { - EMIT_COND_JMP(X86_JNE, 2 + 5); - CLEAR_A(); - EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ - } - EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */ - break; - case BPF_S_ALU_MOD_X: /* A %= X; */ - seen |= SEEN_XREG; - EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (pc_ret0 > 0) { - /* addrs[pc_ret0 - 1] is start address of target - * (addrs[i] - 6) is the address following this jmp - * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long) - */ - EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] - - (addrs[i] - 6)); - } else { - EMIT_COND_JMP(X86_JNE, 2 + 5); - CLEAR_A(); - EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */ - } - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT2(0xf7, 0xf3); /* div %ebx */ - EMIT2(0x89, 0xd0); /* mov %edx,%eax */ - break; - case BPF_S_ALU_MOD_K: /* A %= K; */ - if (K == 1) { - CLEAR_A(); - break; - } - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ - EMIT2(0xf7, 0xf1); /* div %ecx */ - EMIT2(0x89, 0xd0); /* mov %edx,%eax */ - break; - case BPF_S_ALU_DIV_K: /* A /= K */ - if (K == 1) - break; - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ - EMIT2(0xf7, 0xf1); /* div %ecx */ - break; - case BPF_S_ALU_AND_X: - seen |= SEEN_XREG; - EMIT2(0x21, 0xd8); /* and %ebx,%eax */ - break; - case BPF_S_ALU_AND_K: - if (K >= 0xFFFFFF00) { - EMIT2(0x24, K & 0xFF); /* and imm8,%al */ - } else if (K >= 0xFFFF0000) { - EMIT2(0x66, 0x25); /* and imm16,%ax */ - EMIT(K, 2); - } else { - EMIT1_off32(0x25, K); /* and imm32,%eax */ - } - break; - case BPF_S_ALU_OR_X: - seen |= SEEN_XREG; - EMIT2(0x09, 0xd8); /* or %ebx,%eax */ - break; - case BPF_S_ALU_OR_K: - if (is_imm8(K)) - EMIT3(0x83, 0xc8, K); /* or imm8,%eax */ - else - EMIT1_off32(0x0d, K); /* or imm32,%eax */ - break; - case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ - case BPF_S_ALU_XOR_X: - seen |= SEEN_XREG; - EMIT2(0x31, 0xd8); /* xor %ebx,%eax */ - break; - case BPF_S_ALU_XOR_K: /* A ^= K; */ - if (K == 0) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */ - else - EMIT1_off32(0x35, K); /* xor imm32,%eax */ - break; - case BPF_S_ALU_LSH_X: /* A <<= X; */ - seen |= SEEN_XREG; - EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ - break; - case BPF_S_ALU_LSH_K: - if (K == 0) - break; - else if (K == 1) - EMIT2(0xd1, 0xe0); /* shl %eax */ - else - EMIT3(0xc1, 0xe0, K); - break; - case BPF_S_ALU_RSH_X: /* A >>= X; */ - seen |= SEEN_XREG; - EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */ - break; - case BPF_S_ALU_RSH_K: /* A >>= K; */ - if (K == 0) - break; - else if (K == 1) - EMIT2(0xd1, 0xe8); /* shr %eax */ - else - EMIT3(0xc1, 0xe8, K); - break; - case BPF_S_ALU_NEG: - EMIT2(0xf7, 0xd8); /* neg %eax */ - break; - case BPF_S_RET_K: - if (!K) { - if (pc_ret0 == -1) - pc_ret0 = i; - CLEAR_A(); - } else { - EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ - } - /* fallinto */ - case BPF_S_RET_A: - if (seen_or_pass0) { - if (i != flen - 1) { - EMIT_JMP(cleanup_addr - addrs[i]); - break; - } - if (seen_or_pass0 & SEEN_XREG) - EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ - EMIT1(0xc9); /* leaveq */ - } - EMIT1(0xc3); /* ret */ - break; - case BPF_S_MISC_TAX: /* X = A */ - seen |= SEEN_XREG; - EMIT2(0x89, 0xc3); /* mov %eax,%ebx */ - break; - case BPF_S_MISC_TXA: /* A = X */ - seen |= SEEN_XREG; - EMIT2(0x89, 0xd8); /* mov %ebx,%eax */ - break; - case BPF_S_LD_IMM: /* A = K */ - if (!K) - CLEAR_A(); - else - EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ - break; - case BPF_S_LDX_IMM: /* X = K */ - seen |= SEEN_XREG; - if (!K) - CLEAR_X(); + /* shifts */ + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + + switch (BPF_OP(insn->code)) { + case BPF_LSH: b3 = 0xE0; break; + case BPF_RSH: b3 = 0xE8; break; + case BPF_ARSH: b3 = 0xF8; break; + } + EMIT3(0xC1, add_1reg(b3, a_reg), K); + break; + + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (K) { + case 16: + /* emit 'ror %ax, 8' to swap lower 2 bytes */ + EMIT1(0x66); + if (is_ereg(a_reg)) + EMIT1(0x41); + EMIT3(0xC1, add_1reg(0xC8, a_reg), 8); + break; + case 32: + /* emit 'bswap eax' to swap lower 4 bytes */ + if (is_ereg(a_reg)) + EMIT2(0x41, 0x0F); else - EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */ - break; - case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */ - seen |= SEEN_MEM; - EMIT3(0x8b, 0x45, 0xf0 - K*4); - break; - case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */ - seen |= SEEN_XREG | SEEN_MEM; - EMIT3(0x8b, 0x5d, 0xf0 - K*4); - break; - case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */ - seen |= SEEN_MEM; - EMIT3(0x89, 0x45, 0xf0 - K*4); - break; - case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ - seen |= SEEN_XREG | SEEN_MEM; - EMIT3(0x89, 0x5d, 0xf0 - K*4); - break; - case BPF_S_LD_W_LEN: /* A = skb->len; */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); - if (is_imm8(offsetof(struct sk_buff, len))) - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len)); - else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, len), 4); - } - break; - case BPF_S_LDX_W_LEN: /* X = skb->len; */ - seen |= SEEN_XREG; - if (is_imm8(offsetof(struct sk_buff, len))) - /* mov off8(%rdi),%ebx */ - EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len)); - else { - EMIT2(0x8b, 0x9f); - EMIT(offsetof(struct sk_buff, len), 4); - } - break; - case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - if (is_imm8(offsetof(struct sk_buff, protocol))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, protocol), 4); - } - EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */ - break; - case BPF_S_ANC_IFINDEX: - if (is_imm8(offsetof(struct sk_buff, dev))) { - /* movq off8(%rdi),%rax */ - EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev)); - } else { - EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */ - EMIT(offsetof(struct sk_buff, dev), 4); - } - EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */ - EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6)); - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); - EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */ - EMIT(offsetof(struct net_device, ifindex), 4); + EMIT1(0x0F); + EMIT1(add_1reg(0xC8, a_reg)); break; - case BPF_S_ANC_MARK: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - if (is_imm8(offsetof(struct sk_buff, mark))) { - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark)); - } else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, mark), 4); - } - break; - case BPF_S_ANC_RXHASH: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); - if (is_imm8(offsetof(struct sk_buff, hash))) { - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash)); - } else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, hash), 4); - } - break; - case BPF_S_ANC_QUEUE: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); - if (is_imm8(offsetof(struct sk_buff, queue_mapping))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, queue_mapping), 4); - } - break; - case BPF_S_ANC_CPU: - #ifdef CONFIG_SMP - EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */ - EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */ - #else - CLEAR_A(); - #endif - break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - if (is_imm8(offsetof(struct sk_buff, vlan_tci))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, vlan_tci), 4); - } - BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); - if (filter[i].code == BPF_S_ANC_VLAN_TAG) { - EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */ - } else { - EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */ - EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */ - } - break; - case BPF_S_ANC_PKTTYPE: - { - int off = pkt_type_offset(); - - if (off < 0) - goto out; - if (is_imm8(off)) { - /* movzbl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb6, 0x47, off); - } else { - /* movbl off32(%rdi),%eax */ - EMIT3(0x0f, 0xb6, 0x87); - EMIT(off, 4); - } - EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */ + case 64: + /* emit 'bswap rax' to swap 8 bytes */ + EMIT3(add_1mod(0x48, a_reg), 0x0F, + add_1reg(0xC8, a_reg)); break; } - case BPF_S_LD_W_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_word); - common_load: seen |= SEEN_DATAREF; - t_offset = func - (image + addrs[i]); - EMIT1_off32(0xbe, K); /* mov imm32,%esi */ - EMIT1_off32(0xe8, t_offset); /* call */ - break; - case BPF_S_LD_H_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_half); - goto common_load; - case BPF_S_LD_B_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte); - goto common_load; - case BPF_S_LDX_B_MSH: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); - seen |= SEEN_DATAREF | SEEN_XREG; - t_offset = func - (image + addrs[i]); - EMIT1_off32(0xbe, K); /* mov imm32,%esi */ - EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ - break; - case BPF_S_LD_W_IND: - func = sk_load_word; - common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; - t_offset = func - (image + addrs[i]); - if (K) { - if (is_imm8(K)) { - EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */ - } else { - EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */ - EMIT(K, 4); - } - } else { - EMIT2(0x89,0xde); /* mov %ebx,%esi */ - } - EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ - break; - case BPF_S_LD_H_IND: - func = sk_load_half; - goto common_load_ind; - case BPF_S_LD_B_IND: - func = sk_load_byte; - goto common_load_ind; - case BPF_S_JMP_JA: - t_offset = addrs[i + K] - addrs[i]; - EMIT_JMP(t_offset); - break; - COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE); - COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB); - COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE); - COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE); - COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE); - COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB); - COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE); - COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE); - - cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; - t_offset = addrs[i + filter[i].jt] - addrs[i]; - - /* same targets, can avoid doing the test :) */ - if (filter[i].jt == filter[i].jf) { - EMIT_JMP(t_offset); - break; - } + break; + + case BPF_ALU | BPF_END | BPF_FROM_LE: + break;
- switch (filter[i].code) { - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JEQ_X: - seen |= SEEN_XREG; - EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */ - break; - case BPF_S_JMP_JSET_X: - seen |= SEEN_XREG; - EMIT2(0x85, 0xd8); /* test %ebx,%eax */ - break; - case BPF_S_JMP_JEQ_K: - if (K == 0) { - EMIT2(0x85, 0xc0); /* test %eax,%eax */ - break; - } - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGE_K: - if (K <= 127) - EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */ + /* ST: *(u8*)(a_reg + off) = imm */ + case BPF_ST | BPF_MEM | BPF_B: + if (is_ereg(a_reg)) + EMIT2(0x41, 0xC6); + else + EMIT1(0xC6); + goto st; + case BPF_ST | BPF_MEM | BPF_H: + if (is_ereg(a_reg)) + EMIT3(0x66, 0x41, 0xC7); + else + EMIT2(0x66, 0xC7); + goto st; + case BPF_ST | BPF_MEM | BPF_W: + if (is_ereg(a_reg)) + EMIT2(0x41, 0xC7); + else + EMIT1(0xC7); + goto st; + case BPF_ST | BPF_MEM | BPF_DW: + EMIT2(add_1mod(0x48, a_reg), 0xC7); + + st: if (is_imm8(insn->off)) + EMIT2(add_1reg(0x40, a_reg), insn->off); + else + EMIT1_off32(add_1reg(0x80, a_reg), insn->off); + + EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); + break; + + /* STX: *(u8*)(a_reg + off) = x_reg */ + case BPF_STX | BPF_MEM | BPF_B: + /* emit 'mov byte ptr [rax + off], al' */ + if (is_ereg(a_reg) || is_ereg(x_reg) || + /* have to add extra byte for x86 SIL, DIL regs */ + x_reg == BPF_REG_1 || x_reg == BPF_REG_2) + EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88); + else + EMIT1(0x88); + goto stx; + case BPF_STX | BPF_MEM | BPF_H: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89); + else + EMIT2(0x66, 0x89); + goto stx; + case BPF_STX | BPF_MEM | BPF_W: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89); + else + EMIT1(0x89); + goto stx; + case BPF_STX | BPF_MEM | BPF_DW: + EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89); + stx: if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + insn->off); + break; + + /* LDX: a_reg = *(u8*)(x_reg + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + /* emit 'movzx rax, byte ptr [rax + off]' */ + EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_H: + /* emit 'movzx rax, word ptr [rax + off]' */ + EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_W: + /* emit 'mov eax, dword ptr [rax+0x14]' */ + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B); + else + EMIT1(0x8B); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_DW: + /* emit 'mov rax, qword ptr [rax+0x14]' */ + EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B); + ldx: /* if insn->off == 0 we can save one extra byte, but + * special case of x86 r13 which always needs an offset + * is not worth the hassle + */ + if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, x_reg, a_reg), + insn->off); + break; + + /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */ + case BPF_STX | BPF_XADD | BPF_W: + /* emit 'lock add dword ptr [rax + off], eax' */ + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01); + else + EMIT2(0xF0, 0x01); + goto xadd; + case BPF_STX | BPF_XADD | BPF_DW: + EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01); + xadd: if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + insn->off); + break; + + /* call */ + case BPF_JMP | BPF_CALL: + func = (u8 *) __bpf_call_base + K; + jmp_offset = func - (image + addrs[i]); + if (ctx->seen_ld_abs) { + EMIT2(0x41, 0x52); /* push %r10 */ + EMIT2(0x41, 0x51); /* push %r9 */ + /* need to adjust jmp offset, since + * pop %r9, pop %r10 take 4 bytes after call insn + */ + jmp_offset += 4; + } + if (!K || !is_simm32(jmp_offset)) { + pr_err("unsupported bpf func %d addr %p image %p\n", + K, func, image); + return -EINVAL; + } + EMIT1_off32(0xE8, jmp_offset); + if (ctx->seen_ld_abs) { + EMIT2(0x41, 0x59); /* pop %r9 */ + EMIT2(0x41, 0x5A); /* pop %r10 */ + } + break; + + /* cond jump */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + /* cmp a_reg, x_reg */ + EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39, + add_2reg(0xC0, a_reg, x_reg)); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JSET | BPF_X: + /* test a_reg, x_reg */ + EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85, + add_2reg(0xC0, a_reg, x_reg)); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JSET | BPF_K: + /* test a_reg, imm32 */ + EMIT1(add_1mod(0x48, a_reg)); + EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + /* cmp a_reg, imm8/32 */ + EMIT1(add_1mod(0x48, a_reg)); + + if (is_imm8(K)) + EMIT3(0x83, add_1reg(0xF8, a_reg), K); + else + EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K); + + emit_cond_jmp: /* convert BPF opcode to x86 */ + switch (BPF_OP(insn->code)) { + case BPF_JEQ: + jmp_cond = X86_JE; + break; + case BPF_JSET: + case BPF_JNE: + jmp_cond = X86_JNE; + break; + case BPF_JGT: + /* GT is unsigned '>', JA in x86 */ + jmp_cond = X86_JA; + break; + case BPF_JGE: + /* GE is unsigned '>=', JAE in x86 */ + jmp_cond = X86_JAE; + break; + case BPF_JSGT: + /* signed '>', GT in x86 */ + jmp_cond = X86_JG; + break; + case BPF_JSGE: + /* signed '>=', GE in x86 */ + jmp_cond = X86_JGE; + break; + default: /* to silence gcc warning */ + return -EFAULT; + } + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (is_imm8(jmp_offset)) { + EMIT2(jmp_cond, jmp_offset); + } else if (is_simm32(jmp_offset)) { + EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); + } else { + pr_err("cond_jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + + break; + + case BPF_JMP | BPF_JA: + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (!jmp_offset) + /* optimize out nop jumps */ + break; + emit_jmp: + if (is_imm8(jmp_offset)) { + EMIT2(0xEB, jmp_offset); + } else if (is_simm32(jmp_offset)) { + EMIT1_off32(0xE9, jmp_offset); + } else { + pr_err("jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + break; + + case BPF_LD | BPF_IND | BPF_W: + func = sk_load_word; + goto common_load; + case BPF_LD | BPF_ABS | BPF_W: + func = CHOOSE_LOAD_FUNC(K, sk_load_word); + common_load: ctx->seen_ld_abs = true; + jmp_offset = func - (image + addrs[i]); + if (!func || !is_simm32(jmp_offset)) { + pr_err("unsupported bpf func %d addr %p image %p\n", + K, func, image); + return -EINVAL; + } + if (BPF_MODE(insn->code) == BPF_ABS) { + /* mov %esi, imm32 */ + EMIT1_off32(0xBE, K); + } else { + /* mov %rsi, x_reg */ + EMIT_mov(BPF_REG_2, x_reg); + if (K) { + if (is_imm8(K)) + /* add %esi, imm8 */ + EMIT3(0x83, 0xC6, K); else - EMIT1_off32(0x3d, K); /* cmp imm32,%eax */ - break; - case BPF_S_JMP_JSET_K: - if (K <= 0xFF) - EMIT2(0xa8, K); /* test imm8,%al */ - else if (!(K & 0xFFFF00FF)) - EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */ - else if (K <= 0xFFFF) { - EMIT2(0x66, 0xa9); /* test imm16,%ax */ - EMIT(K, 2); - } else { - EMIT1_off32(0xa9, K); /* test imm32,%eax */ - } - break; + /* add %esi, imm32 */ + EMIT2_off32(0x81, 0xC6, K); } - if (filter[i].jt != 0) { - if (filter[i].jf && f_offset) - t_offset += is_near(f_offset) ? 2 : 5; - EMIT_COND_JMP(t_op, t_offset); - if (filter[i].jf) - EMIT_JMP(f_offset); - break; - } - EMIT_COND_JMP(f_op, f_offset); - break; - default: - /* hmm, too complex filter, give up with jit compiler */ - goto out; } - ilen = prog - temp; - if (image) { - if (unlikely(proglen + ilen > oldproglen)) { - pr_err("bpb_jit_compile fatal error\n"); - kfree(addrs); - module_free(NULL, header); - return; - } - memcpy(image + proglen, temp, ilen); + /* skb pointer is in R6 (%rbx), it will be copied into + * %rdi if skb_copy_bits() call is necessary. + * sk_load_* helpers also use %r10 and %r9d. + * See bpf_jit.S + */ + EMIT1_off32(0xE8, jmp_offset); /* call */ + break; + + case BPF_LD | BPF_IND | BPF_H: + func = sk_load_half; + goto common_load; + case BPF_LD | BPF_ABS | BPF_H: + func = CHOOSE_LOAD_FUNC(K, sk_load_half); + goto common_load; + case BPF_LD | BPF_IND | BPF_B: + func = sk_load_byte; + goto common_load; + case BPF_LD | BPF_ABS | BPF_B: + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); + goto common_load; + + case BPF_JMP | BPF_EXIT: + if (i != insn_cnt - 1) { + jmp_offset = ctx->cleanup_addr - addrs[i]; + goto emit_jmp; } - proglen += ilen; - addrs[i] = proglen; - prog = temp; + /* update cleanup_addr */ + ctx->cleanup_addr = proglen; + /* mov rbx, qword ptr [rbp-X] */ + EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize); + /* mov r13, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8); + /* mov r14, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16); + /* mov r15, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24); + + EMIT1(0xC9); /* leave */ + EMIT1(0xC3); /* ret */ + break; + + default: + /* By design x64 JIT should support all BPF instructions + * This error will be seen if new instruction was added + * to interpreter, but not to JIT + * or if there is junk in sk_filter + */ + pr_err("bpf_jit: unknown opcode %02x\n", insn->code); + return -EINVAL; } - /* last bpf instruction is always a RET : - * use it to give the cleanup instruction(s) addr - */ - cleanup_addr = proglen - 1; /* ret */ - if (seen_or_pass0) - cleanup_addr -= 1; /* leaveq */ - if (seen_or_pass0 & SEEN_XREG) - cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
+ ilen = prog - temp; + if (image) { + if (unlikely(proglen + ilen > oldproglen)) { + pr_err("bpf_jit_compile fatal error\n"); + return -EFAULT; + } + memcpy(image + proglen, temp, ilen); + } + proglen += ilen; + addrs[i] = proglen; + prog = temp; + } + return proglen; + } + + void bpf_jit_compile(struct sk_filter *prog) + { + } + + void bpf_int_jit_compile(struct sk_filter *prog) + { + struct bpf_binary_header *header = NULL; + int proglen, oldproglen = 0; + struct jit_context ctx = {}; + u8 *image = NULL; + int *addrs; + int pass; + int i; + + if (!bpf_jit_enable) + return; + + if (!prog || !prog->len) + return; + + addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); + if (!addrs) + return; + + /* Before first pass, make a rough estimation of addrs[] + * each bpf instruction is translated to less than 64 bytes + */ + for (proglen = 0, i = 0; i < prog->len; i++) { + proglen += 64; + addrs[i] = proglen; + } + ctx.cleanup_addr = proglen; + + for (pass = 0; pass < 10; pass++) { + proglen = do_jit(prog, addrs, image, oldproglen, &ctx); + if (proglen <= 0) { + image = NULL; + if (header) + module_free(NULL, header); + goto out; + } if (image) { if (proglen != oldproglen) - pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen); + pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", + proglen, oldproglen); break; } if (proglen == oldproglen) { @@@ -766,17 -918,16 +918,16 @@@ }
if (bpf_jit_enable > 1) - bpf_jit_dump(flen, proglen, pass, image); + bpf_jit_dump(prog->len, proglen, 0, image);
if (image) { bpf_flush_icache(header, image + proglen); set_memory_ro((unsigned long)header, header->pages); - fp->bpf_func = (void *)image; - fp->jited = 1; + prog->bpf_func = (void *)image; + prog->jited = 1; } out: kfree(addrs); - return; }
static void bpf_jit_free_deferred(struct work_struct *work) diff --combined drivers/net/bonding/bond_alb.c index e538478,03e0bca..9d42c61 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@@ -228,7 -228,7 +228,7 @@@ static struct slave *tlb_get_least_load
/* Find the slave with the largest gap */ bond_for_each_slave_rcu(bond, slave, iter) { - if (SLAVE_IS_OK(slave)) { + if (bond_slave_can_tx(slave)) { long long gap = compute_gap(slave);
if (max_gap < gap) { @@@ -383,7 -383,7 +383,7 @@@ static struct slave *rlb_next_rx_slave( bool found = false;
bond_for_each_slave(bond, slave, iter) { - if (!SLAVE_IS_OK(slave)) + if (!bond_slave_can_tx(slave)) continue; if (!found) { if (!before || before->speed < slave->speed) @@@ -416,7 -416,7 +416,7 @@@ static struct slave *__rlb_next_rx_slav bool found = false;
bond_for_each_slave_rcu(bond, slave, iter) { - if (!SLAVE_IS_OK(slave)) + if (!bond_slave_can_tx(slave)) continue; if (!found) { if (!before || before->speed < slave->speed) @@@ -1045,7 -1045,7 +1045,7 @@@ static void alb_send_learning_packets(s /* loop through vlans and send one packet for each */ rcu_read_lock(); netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper->priv_flags & IFF_802_1Q_VLAN) + if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) alb_send_lp_vid(slave, mac_addr, vlan_dev_vlan_id(upper)); } @@@ -1057,7 -1057,7 +1057,7 @@@ static int alb_set_slave_mac_addr(struc struct net_device *dev = slave->dev; struct sockaddr s_addr;
- if (slave->bond->params.mode == BOND_MODE_TLB) { + if (BOND_MODE(slave->bond) == BOND_MODE_TLB) { memcpy(dev->dev_addr, addr, dev->addr_len); return 0; } @@@ -1100,13 -1100,13 +1100,13 @@@ static void alb_swap_mac_addr(struct sl static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, struct slave *slave2) { - int slaves_state_differ = (SLAVE_IS_OK(slave1) != SLAVE_IS_OK(slave2)); + int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2)); struct slave *disabled_slave = NULL;
ASSERT_RTNL();
/* fasten the change in the switch */ - if (SLAVE_IS_OK(slave1)) { + if (bond_slave_can_tx(slave1)) { alb_send_learning_packets(slave1, slave1->dev->dev_addr); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address @@@ -1118,7 -1118,7 +1118,7 @@@ disabled_slave = slave1; }
- if (SLAVE_IS_OK(slave2)) { + if (bond_slave_can_tx(slave2)) { alb_send_learning_packets(slave2, slave2->dev->dev_addr); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address @@@ -1347,6 -1347,77 +1347,77 @@@ void bond_alb_deinitialize(struct bondi rlb_deinitialize(bond); }
+ static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond, + struct slave *tx_slave) + { + struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); + struct ethhdr *eth_data = eth_hdr(skb); + + if (!tx_slave) { + /* unbalanced or unassigned, send through primary */ + tx_slave = rcu_dereference(bond->curr_active_slave); + if (bond->params.tlb_dynamic_lb) + bond_info->unbalanced_load += skb->len; + } + + if (tx_slave && bond_slave_can_tx(tx_slave)) { + if (tx_slave != rcu_dereference(bond->curr_active_slave)) { + ether_addr_copy(eth_data->h_source, + tx_slave->dev->dev_addr); + } + + bond_dev_queue_xmit(bond, skb, tx_slave->dev); + goto out; + } + + if (tx_slave && bond->params.tlb_dynamic_lb) { + _lock_tx_hashtbl(bond); + __tlb_clear_slave(bond, tx_slave, 0); + _unlock_tx_hashtbl(bond); + } + + /* no suitable interface, frame not sent */ + dev_kfree_skb_any(skb); + out: + return NETDEV_TX_OK; + } + + int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) + { + struct bonding *bond = netdev_priv(bond_dev); + struct ethhdr *eth_data; + struct slave *tx_slave = NULL; + u32 hash_index; + + skb_reset_mac_header(skb); + eth_data = eth_hdr(skb); + + /* Do not TX balance any multicast or broadcast */ + if (!is_multicast_ether_addr(eth_data->h_dest)) { + switch (skb->protocol) { + case htons(ETH_P_IP): + case htons(ETH_P_IPX): + /* In case of IPX, it will falback to L2 hash */ + case htons(ETH_P_IPV6): + hash_index = bond_xmit_hash(bond, skb); + if (bond->params.tlb_dynamic_lb) { + tx_slave = tlb_choose_channel(bond, + hash_index & 0xFF, + skb->len); + } else { + struct list_head *iter; + int idx = hash_index % bond->slave_cnt; + + bond_for_each_slave_rcu(bond, tx_slave, iter) + if (--idx < 0) + break; + } + break; + } + } + return bond_do_alb_xmit(skb, bond, tx_slave); + } + int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); @@@ -1355,7 -1426,7 +1426,7 @@@ struct slave *tx_slave = NULL; static const __be32 ip_bcast = htonl(0xffffffff); int hash_size = 0; - int do_tx_balance = 1; + bool do_tx_balance = true; u32 hash_index = 0; const u8 *hash_start = NULL; struct ipv6hdr *ip6hdr; @@@ -1370,7 -1441,7 +1441,7 @@@ if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) || (iph->daddr == ip_bcast) || (iph->protocol == IPPROTO_IGMP)) { - do_tx_balance = 0; + do_tx_balance = false; break; } hash_start = (char *)&(iph->daddr); @@@ -1382,7 -1453,7 +1453,7 @@@ * that here just in case. */ if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) { - do_tx_balance = 0; + do_tx_balance = false; break; }
@@@ -1390,7 -1461,7 +1461,7 @@@ * broadcasts in IPv4. */ if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) { - do_tx_balance = 0; + do_tx_balance = false; break; }
@@@ -1400,7 -1471,7 +1471,7 @@@ */ ip6hdr = ipv6_hdr(skb); if (ipv6_addr_any(&ip6hdr->saddr)) { - do_tx_balance = 0; + do_tx_balance = false; break; }
@@@ -1410,7 -1481,7 +1481,7 @@@ case ETH_P_IPX: if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) { /* something is wrong with this packet */ - do_tx_balance = 0; + do_tx_balance = false; break; }
@@@ -1419,7 -1490,7 +1490,7 @@@ * this family since it has an "ARP" like * mechanism */ - do_tx_balance = 0; + do_tx_balance = false; break; }
@@@ -1427,12 -1498,12 +1498,12 @@@ hash_size = ETH_ALEN; break; case ETH_P_ARP: - do_tx_balance = 0; + do_tx_balance = false; if (bond_info->rlb_enabled) tx_slave = rlb_arp_xmit(skb, bond); break; default: - do_tx_balance = 0; + do_tx_balance = false; break; }
@@@ -1441,32 -1512,7 +1512,7 @@@ tx_slave = tlb_choose_channel(bond, hash_index, skb->len); }
- if (!tx_slave) { - /* unbalanced or unassigned, send through primary */ - tx_slave = rcu_dereference(bond->curr_active_slave); - bond_info->unbalanced_load += skb->len; - } - - if (tx_slave && SLAVE_IS_OK(tx_slave)) { - if (tx_slave != rcu_dereference(bond->curr_active_slave)) { - ether_addr_copy(eth_data->h_source, - tx_slave->dev->dev_addr); - } - - bond_dev_queue_xmit(bond, skb, tx_slave->dev); - goto out; - } - - if (tx_slave) { - _lock_tx_hashtbl(bond); - __tlb_clear_slave(bond, tx_slave, 0); - _unlock_tx_hashtbl(bond); - } - - /* no suitable interface, frame not sent */ - dev_kfree_skb_any(skb); - out: - return NETDEV_TX_OK; + return bond_do_alb_xmit(skb, bond, tx_slave); }
void bond_alb_monitor(struct work_struct *work) @@@ -1699,7 -1745,7 +1745,7 @@@ void bond_alb_handle_active_change(stru /* in TLB mode, the slave might flip down/up with the old dev_addr, * and thus filter bond->dev_addr's packets, so force bond's mac */ - if (bond->params.mode == BOND_MODE_TLB) { + if (BOND_MODE(bond) == BOND_MODE_TLB) { struct sockaddr sa; u8 tmp_addr[ETH_ALEN];
diff --combined drivers/net/bonding/bond_main.c index d3a6789,7123205..da9afee --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@@ -343,7 -343,7 +343,7 @@@ static int bond_set_carrier(struct bond if (!bond_has_slaves(bond)) goto down;
- if (bond->params.mode == BOND_MODE_8023AD) + if (BOND_MODE(bond) == BOND_MODE_8023AD) return bond_3ad_set_carrier(bond);
bond_for_each_slave(bond, slave, iter) { @@@ -497,7 -497,7 +497,7 @@@ static int bond_set_promiscuity(struct struct list_head *iter; int err = 0;
- if (USES_PRIMARY(bond->params.mode)) { + if (bond_uses_primary(bond)) { /* write lock already acquired */ if (bond->curr_active_slave) { err = dev_set_promiscuity(bond->curr_active_slave->dev, @@@ -523,7 -523,7 +523,7 @@@ static int bond_set_allmulti(struct bon struct list_head *iter; int err = 0;
- if (USES_PRIMARY(bond->params.mode)) { + if (bond_uses_primary(bond)) { /* write lock already acquired */ if (bond->curr_active_slave) { err = dev_set_allmulti(bond->curr_active_slave->dev, @@@ -574,7 -574,7 +574,7 @@@ static void bond_hw_addr_flush(struct n dev_uc_unsync(slave_dev, bond_dev); dev_mc_unsync(slave_dev, bond_dev);
- if (bond->params.mode == BOND_MODE_8023AD) { + if (BOND_MODE(bond) == BOND_MODE_8023AD) { /* del lacpdu mc addr from mc list */ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
@@@ -585,8 -585,8 +585,8 @@@ /*--------------------------- Active slave change ---------------------------*/
/* Update the hardware address list and promisc/allmulti for the new and - * old active slaves (if any). Modes that are !USES_PRIMARY keep all - * slaves up date at all times; only the USES_PRIMARY modes need to call + * old active slaves (if any). Modes that are not using primary keep all + * slaves up date at all times; only the modes that use primary need to call * this function to swap these settings during a failover. */ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, @@@ -747,7 -747,7 +747,7 @@@ static struct slave *bond_find_best_sla bond_for_each_slave(bond, slave, iter) { if (slave->link == BOND_LINK_UP) return slave; - if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) && + if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) && slave->delay < mintime) { mintime = slave->delay; bestslave = slave; @@@ -801,7 -801,7 +801,7 @@@ void bond_change_active_slave(struct bo new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) { - if (USES_PRIMARY(bond->params.mode)) { + if (bond_uses_primary(bond)) { pr_info("%s: making interface %s the new active one %d ms earlier\n", bond->dev->name, new_active->dev->name, (bond->params.updelay - new_active->delay) * bond->params.miimon); @@@ -810,20 -810,20 +810,20 @@@ new_active->delay = 0; new_active->link = BOND_LINK_UP;
- if (bond->params.mode == BOND_MODE_8023AD) + if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
if (bond_is_lb(bond)) bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP); } else { - if (USES_PRIMARY(bond->params.mode)) { + if (bond_uses_primary(bond)) { pr_info("%s: making interface %s the new active one\n", bond->dev->name, new_active->dev->name); } } }
- if (USES_PRIMARY(bond->params.mode)) + if (bond_uses_primary(bond)) bond_hw_addr_swap(bond, new_active, old_active);
if (bond_is_lb(bond)) { @@@ -838,7 -838,7 +838,7 @@@ rcu_assign_pointer(bond->curr_active_slave, new_active); }
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { if (old_active) bond_set_slave_inactive_flags(old_active, BOND_SLAVE_NOTIFY_NOW); @@@ -876,8 -876,8 +876,8 @@@ * resend only if bond is brought up with the affected * bonding modes and the retransmission is enabled */ if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && - ((USES_PRIMARY(bond->params.mode) && new_active) || - bond->params.mode == BOND_MODE_ROUNDROBIN)) { + ((bond_uses_primary(bond) && new_active) || + BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) { bond->igmp_retrans = bond->params.resend_igmp; queue_delayed_work(bond->wq, &bond->mcast_work, 1); } @@@ -958,7 -958,7 +958,7 @@@ static void bond_netpoll_cleanup(struc struct slave *slave;
bond_for_each_slave(bond, slave, iter) - if (IS_UP(slave->dev)) + if (bond_slave_is_up(slave)) slave_disable_netpoll(slave); }
@@@ -1084,7 -1084,7 +1084,7 @@@ static bool bond_should_deliver_exact_m struct bonding *bond) { if (bond_is_slave_inactive(slave)) { - if (bond->params.mode == BOND_MODE_ALB && + if (BOND_MODE(bond) == BOND_MODE_ALB && skb->pkt_type != PACKET_BROADCAST && skb->pkt_type != PACKET_MULTICAST) return false; @@@ -1126,7 -1126,7 +1126,7 @@@ static rx_handler_result_t bond_handle_
skb->dev = bond->dev;
- if (bond->params.mode == BOND_MODE_ALB && + if (BOND_MODE(bond) == BOND_MODE_ALB && bond->dev->priv_flags & IFF_BRIDGE_PORT && skb->pkt_type == PACKET_HOST) {
@@@ -1163,6 -1163,35 +1163,35 @@@ static void bond_upper_dev_unlink(struc rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL); }
+ static struct slave *bond_alloc_slave(struct bonding *bond) + { + struct slave *slave = NULL; + + slave = kzalloc(sizeof(struct slave), GFP_KERNEL); + if (!slave) + return NULL; + + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info), + GFP_KERNEL); + if (!SLAVE_AD_INFO(slave)) { + kfree(slave); + return NULL; + } + } + return slave; + } + + static void bond_free_slave(struct slave *slave) + { + struct bonding *bond = bond_get_bond_by_slave(slave); + + if (BOND_MODE(bond) == BOND_MODE_8023AD) + kfree(SLAVE_AD_INFO(slave)); + + kfree(slave); + } + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) { @@@ -1269,7 -1298,7 +1298,7 @@@ if (!bond_has_slaves(bond)) { pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n", bond_dev->name); - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) { bond->params.fail_over_mac = BOND_FOM_ACTIVE; pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n", bond_dev->name); @@@ -1290,11 -1319,12 +1319,12 @@@ bond->dev->addr_assign_type == NET_ADDR_RANDOM) bond_set_dev_addr(bond->dev, slave_dev);
- new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); + new_slave = bond_alloc_slave(bond); if (!new_slave) { res = -ENOMEM; goto err_undo_flags; } + /* * Set the new_slave's queue_id to be zero. Queue ID mapping * is set via sysfs or module option if desired. @@@ -1317,7 -1347,7 +1347,7 @@@ ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
if (!bond->params.fail_over_mac || - bond->params.mode != BOND_MODE_ACTIVEBACKUP) { + BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* * Set slave to master's mac address. The application already * set the master's mac address to that of the first slave @@@ -1351,10 -1381,10 +1381,10 @@@ goto err_close; }
- /* If the mode USES_PRIMARY, then the following is handled by + /* If the mode uses primary, then the following is handled by * bond_change_active_slave(). */ - if (!USES_PRIMARY(bond->params.mode)) { + if (!bond_uses_primary(bond)) { /* set promiscuity level to new slave */ if (bond_dev->flags & IFF_PROMISC) { res = dev_set_promiscuity(slave_dev, 1); @@@ -1377,7 -1407,7 +1407,7 @@@ netif_addr_unlock_bh(bond_dev); }
- if (bond->params.mode == BOND_MODE_8023AD) { + if (BOND_MODE(bond) == BOND_MODE_8023AD) { /* add lacpdu mc addr to mc list */ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
@@@ -1450,7 -1480,7 +1480,7 @@@ new_slave->link == BOND_LINK_DOWN ? "DOWN" : (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
- if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { + if (bond_uses_primary(bond) && bond->params.primary[0]) { /* if there is a primary slave, remember it */ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { bond->primary_slave = new_slave; @@@ -1458,7 -1488,7 +1488,7 @@@ } }
- switch (bond->params.mode) { + switch (BOND_MODE(bond)) { case BOND_MODE_ACTIVEBACKUP: bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); @@@ -1471,14 -1501,14 +1501,14 @@@ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); /* if this is the first slave */ if (!prev_slave) { - SLAVE_AD_INFO(new_slave).id = 1; + SLAVE_AD_INFO(new_slave)->id = 1; /* Initialize AD with the number of times that the AD timer is called in 1 second * can be called only after the mac address of the bond is set */ bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL); } else { - SLAVE_AD_INFO(new_slave).id = - SLAVE_AD_INFO(prev_slave).id + 1; + SLAVE_AD_INFO(new_slave)->id = + SLAVE_AD_INFO(prev_slave)->id + 1; }
bond_3ad_bind_slave(new_slave); @@@ -1539,7 -1569,7 +1569,7 @@@ bond_compute_features(bond); bond_set_carrier(bond);
- if (USES_PRIMARY(bond->params.mode)) { + if (bond_uses_primary(bond)) { block_netpoll_tx(); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); @@@ -1563,7 -1593,7 +1593,7 @@@ err_unregister netdev_rx_handler_unregister(slave_dev);
err_detach: - if (!USES_PRIMARY(bond->params.mode)) + if (!bond_uses_primary(bond)) bond_hw_addr_flush(bond_dev, slave_dev);
vlan_vids_del_by_dev(slave_dev, bond_dev); @@@ -1585,7 -1615,7 +1615,7 @@@ err_close
err_restore_mac: if (!bond->params.fail_over_mac || - bond->params.mode != BOND_MODE_ACTIVEBACKUP) { + BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* XXX TODO - fom follow mode needs to change master's * MAC if this slave's MAC is in use by the bond, or at * least print a warning. @@@ -1599,7 -1629,7 +1629,7 @@@ err_restore_mtu dev_set_mtu(slave_dev, new_slave->original_mtu);
err_free: - kfree(new_slave); + bond_free_slave(new_slave);
err_undo_flags: /* Enslave of first slave has failed and we need to fix master's mac */ @@@ -1661,7 -1691,7 +1691,7 @@@ static int __bond_release_one(struct ne write_lock_bh(&bond->lock);
/* Inform AD package of unbinding of slave. */ - if (bond->params.mode == BOND_MODE_8023AD) + if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_unbind_slave(slave);
write_unlock_bh(&bond->lock); @@@ -1676,7 -1706,7 +1706,7 @@@ bond->current_arp_slave = NULL;
if (!all && (!bond->params.fail_over_mac || - bond->params.mode != BOND_MODE_ACTIVEBACKUP)) { + BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && bond_has_slaves(bond)) pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n", @@@ -1748,10 -1778,10 +1778,10 @@@ /* must do this from outside any spinlocks */ vlan_vids_del_by_dev(slave_dev, bond_dev);
- /* If the mode USES_PRIMARY, then this cases was handled above by + /* If the mode uses primary, then this cases was handled above by * bond_change_active_slave(..., NULL) */ - if (!USES_PRIMARY(bond->params.mode)) { + if (!bond_uses_primary(bond)) { /* unset promiscuity level from slave * NOTE: The NETDEV_CHANGEADDR call above may change the value * of the IFF_PROMISC flag in the bond_dev, but we need the @@@ -1775,7 -1805,7 +1805,7 @@@ dev_close(slave_dev);
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE || - bond->params.mode != BOND_MODE_ACTIVEBACKUP) { + BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* restore original ("permanent") mac address */ ether_addr_copy(addr.sa_data, slave->perm_hwaddr); addr.sa_family = slave_dev->type; @@@ -1786,7 -1816,7 +1816,7 @@@
slave_dev->priv_flags &= ~IFF_BONDING;
- kfree(slave); + bond_free_slave(slave);
return 0; /* deletion OK */ } @@@ -1821,7 -1851,7 +1851,7 @@@ static int bond_info_query(struct net_d { struct bonding *bond = netdev_priv(bond_dev);
- info->bond_mode = bond->params.mode; + info->bond_mode = BOND_MODE(bond); info->miimon = bond->params.miimon;
info->num_slaves = bond->slave_cnt; @@@ -1877,7 -1907,7 +1907,7 @@@ static int bond_miimon_inspect(struct b if (slave->delay) { pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n", bond->dev->name, - (bond->params.mode == + (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) ? (bond_is_active_slave(slave) ? "active " : "backup ") : "", @@@ -1968,10 -1998,10 +1998,10 @@@ static void bond_miimon_commit(struct b slave->link = BOND_LINK_UP; slave->last_link_up = jiffies;
- if (bond->params.mode == BOND_MODE_8023AD) { + if (BOND_MODE(bond) == BOND_MODE_8023AD) { /* prevent it from being the active one */ bond_set_backup_slave(slave); - } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { + } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { /* make it immediately active */ bond_set_active_slave(slave); } else if (slave != bond->primary_slave) { @@@ -1985,7 -2015,7 +2015,7 @@@ slave->duplex ? "full" : "half");
/* notify ad that the link status has changed */ - if (bond->params.mode == BOND_MODE_8023AD) + if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_handle_link_change(slave, BOND_LINK_UP);
if (bond_is_lb(bond)) @@@ -2004,15 -2034,15 +2034,15 @@@
slave->link = BOND_LINK_DOWN;
- if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || - bond->params.mode == BOND_MODE_8023AD) + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || + BOND_MODE(bond) == BOND_MODE_8023AD) bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n", bond->dev->name, slave->dev->name);
- if (bond->params.mode == BOND_MODE_8023AD) + if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_handle_link_change(slave, BOND_LINK_DOWN);
@@@ -2126,10 -2156,10 +2156,10 @@@ static bool bond_has_this_ip(struct bon */ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, - struct bond_vlan_tag *inner, - struct bond_vlan_tag *outer) + struct bond_vlan_tag *tags) { struct sk_buff *skb; + int i;
pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", arp_op, slave_dev->name, &dest_ip, &src_ip); @@@ -2141,26 -2171,21 +2171,26 @@@ net_err_ratelimited("ARP packet allocation failed\n"); return; } - if (outer->vlan_id) { - if (inner->vlan_id) { - pr_debug("inner tag: proto %X vid %X\n", - ntohs(inner->vlan_proto), inner->vlan_id); - skb = __vlan_put_tag(skb, inner->vlan_proto, - inner->vlan_id); - if (!skb) { - net_err_ratelimited("failed to insert inner VLAN tag\n"); - return; - } - }
- pr_debug("outer reg: proto %X vid %X\n", - ntohs(outer->vlan_proto), outer->vlan_id); - skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); + /* Go through all the tags backwards and add them to the packet */ + for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) { + if (!tags[i].vlan_id) + continue; + + pr_debug("inner tag: proto %X vid %X\n", + ntohs(tags[i].vlan_proto), tags[i].vlan_id); + skb = __vlan_put_tag(skb, tags[i].vlan_proto, + tags[i].vlan_id); + if (!skb) { + net_err_ratelimited("failed to insert inner VLAN tag\n"); + return; + } + } + /* Set the outer tag */ + if (tags[0].vlan_id) { + pr_debug("outer tag: proto %X vid %X\n", + ntohs(tags[0].vlan_proto), tags[0].vlan_id); + skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id); if (!skb) { net_err_ratelimited("failed to insert outer VLAN tag\n"); return; @@@ -2169,52 -2194,22 +2199,52 @@@ arp_xmit(skb); }
+/* Validate the device path between the @start_dev and the @end_dev. + * The path is valid if the @end_dev is reachable through device + * stacking. + * When the path is validated, collect any vlan information in the + * path. + */ +static bool bond_verify_device_path(struct net_device *start_dev, + struct net_device *end_dev, + struct bond_vlan_tag *tags) +{ + struct net_device *upper; + struct list_head *iter; + int idx; + + if (start_dev == end_dev) + return true; + + netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { + if (bond_verify_device_path(upper, end_dev, tags)) { + if (is_vlan_dev(upper)) { + idx = vlan_get_encap_level(upper); + if (idx >= BOND_MAX_VLAN_ENCAP) + return false; + + tags[idx].vlan_proto = + vlan_dev_vlan_proto(upper); + tags[idx].vlan_id = vlan_dev_vlan_id(upper); + } + return true; + } + } + + return false; +}
static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { - struct net_device *upper, *vlan_upper; - struct list_head *iter, *vlan_iter; struct rtable *rt; - struct bond_vlan_tag inner, outer; + struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP]; __be32 *targets = bond->params.arp_targets, addr; int i; + bool ret;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { pr_debug("basa: target %pI4\n", &targets[i]); - inner.vlan_proto = 0; - inner.vlan_id = 0; - outer.vlan_proto = 0; - outer.vlan_id = 0; + memset(tags, 0, sizeof(tags));
/* Find out through which dev should the packet go */ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, @@@ -2227,8 -2222,7 +2257,8 @@@ net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", bond->dev->name, &targets[i]); - bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); + bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], + 0, tags); continue; }
@@@ -2237,12 -2231,52 +2267,12 @@@ goto found;
rcu_read_lock(); - /* first we search only for vlan devices. for every vlan - * found we verify its upper dev list, searching for the - * rt->dst.dev. If found we save the tag of the vlan and - * proceed to send the packet. - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper, - vlan_iter) { - if (!is_vlan_dev(vlan_upper)) - continue; - - if (vlan_upper == rt->dst.dev) { - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - netdev_for_each_all_upper_dev_rcu(vlan_upper, upper, - iter) { - if (upper == rt->dst.dev) { - /* If the upper dev is a vlan dev too, - * set the vlan tag to inner tag. - */ - if (is_vlan_dev(upper)) { - inner.vlan_proto = vlan_dev_vlan_proto(upper); - inner.vlan_id = vlan_dev_vlan_id(upper); - } - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - } - } - - /* if the device we're looking for is not on top of any of - * our upper vlans, then just search for any dev that - * matches, and in case it's a vlan - save the id - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper == rt->dst.dev) { - rcu_read_unlock(); - goto found; - } - } + ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags); rcu_read_unlock();
+ if (ret) + goto found; + /* Not our device - skip */ pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", bond->dev->name, &targets[i], @@@ -2255,7 -2289,7 +2285,7 @@@ found addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); ip_rt_put(rt); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], - addr, &inner, &outer); + addr, tags); } }
@@@ -2287,8 -2321,8 +2317,8 @@@ int bond_arp_rcv(const struct sk_buff * int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
if (!slave_do_arp_validate(bond, slave)) { - if ((slave_do_arp_validate_only(bond, slave) && is_arp) || - !slave_do_arp_validate_only(bond, slave)) + if ((slave_do_arp_validate_only(bond) && is_arp) || + !slave_do_arp_validate_only(bond)) slave->last_rx = jiffies; return RX_HANDLER_ANOTHER; } else if (!is_arp) { @@@ -2456,7 -2490,7 +2486,7 @@@ static void bond_loadbalance_arp_mon(st * do - all replies will be rx'ed on same link causing slaves * to be unstable during low/no traffic periods */ - if (IS_UP(slave->dev)) + if (bond_slave_is_up(slave)) bond_arp_send_all(bond, slave); }
@@@ -2678,10 -2712,10 +2708,10 @@@ static bool bond_ab_arp_probe(struct bo bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
bond_for_each_slave_rcu(bond, slave, iter) { - if (!found && !before && IS_UP(slave->dev)) + if (!found && !before && bond_slave_is_up(slave)) before = slave;
- if (found && !new_slave && IS_UP(slave->dev)) + if (found && !new_slave && bond_slave_is_up(slave)) new_slave = slave; /* if the link state is up at this point, we * mark it down - this can happen if we have @@@ -2690,7 -2724,7 +2720,7 @@@ * one the current slave so it is still marked * up when it is actually down */ - if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { + if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { slave->link = BOND_LINK_DOWN; if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; @@@ -2853,7 -2887,7 +2883,7 @@@ static int bond_slave_netdev_event(unsi
bond_update_speed_duplex(slave);
- if (bond->params.mode == BOND_MODE_8023AD) { + if (BOND_MODE(bond) == BOND_MODE_8023AD) { if (old_speed != slave->speed) bond_3ad_adapter_speed_changed(slave); if (old_duplex != slave->duplex) @@@ -2881,7 -2915,7 +2911,7 @@@ break; case NETDEV_CHANGENAME: /* we don't care if we don't have primary set */ - if (!USES_PRIMARY(bond->params.mode) || + if (!bond_uses_primary(bond) || !bond->params.primary[0]) break;
@@@ -3011,20 -3045,18 +3041,18 @@@ static bool bond_flow_dissect(struct bo * bond_xmit_hash - generate a hash value based on the xmit policy * @bond: bonding device * @skb: buffer to use for headers * * This function will extract the necessary headers from the skb buffer and use * them to generate a hash based on the xmit_policy set in the bonding device - * which will be reduced modulo count before returning. */ - int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count) + u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb) { struct flow_keys flow; u32 hash;
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 || !bond_flow_dissect(bond, skb, &flow)) - return bond_eth_hash(skb) % count; + return bond_eth_hash(skb);
if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 || bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) @@@ -3035,7 -3067,7 +3063,7 @@@ hash ^= (hash >> 16); hash ^= (hash >> 8);
- return hash % count; + return hash; }
/*-------------------------- Device entry points ----------------------------*/ @@@ -3046,7 -3078,7 +3074,7 @@@ static void bond_work_init_all(struct b bond_resend_igmp_join_requests_delayed); INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor); INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); - if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon); else INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); @@@ -3073,7 -3105,7 +3101,7 @@@ static int bond_open(struct net_device if (bond_has_slaves(bond)) { read_lock(&bond->curr_slave_lock); bond_for_each_slave(bond, slave, iter) { - if (USES_PRIMARY(bond->params.mode) + if (bond_uses_primary(bond) && (slave != bond->curr_active_slave)) { bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); @@@ -3092,9 -3124,10 +3120,10 @@@ /* bond_alb_initialize must be called before the timer * is started. */ - if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB))) + if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB))) return -ENOMEM; - queue_delayed_work(bond->wq, &bond->alb_work, 0); + if (bond->params.tlb_dynamic_lb) + queue_delayed_work(bond->wq, &bond->alb_work, 0); }
if (bond->params.miimon) /* link check interval, in milliseconds. */ @@@ -3105,7 -3138,7 +3134,7 @@@ bond->recv_probe = bond_arp_rcv; }
- if (bond->params.mode == BOND_MODE_8023AD) { + if (BOND_MODE(bond) == BOND_MODE_8023AD) { queue_delayed_work(bond->wq, &bond->ad_work, 0); /* register to receive LACPDUs */ bond->recv_probe = bond_3ad_lacpdu_recv; @@@ -3310,7 -3343,7 +3339,7 @@@ static void bond_set_rx_mode(struct net
rcu_read_lock(); - if (USES_PRIMARY(bond->params.mode)) { + if (bond_uses_primary(bond)) { slave = rcu_dereference(bond->curr_active_slave); if (slave) { dev_uc_sync(slave->dev, bond_dev); @@@ -3464,7 -3497,7 +3493,7 @@@ static int bond_set_mac_address(struct struct list_head *iter; int res = 0;
- if (bond->params.mode == BOND_MODE_ALB) + if (BOND_MODE(bond) == BOND_MODE_ALB) return bond_alb_set_mac_address(bond_dev, addr);
@@@ -3475,7 -3508,7 +3504,7 @@@ * Returning an error causes ifenslave to fail. */ if (bond->params.fail_over_mac && - bond->params.mode == BOND_MODE_ACTIVEBACKUP) + BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) return 0;
if (!is_valid_ether_addr(sa->sa_data)) @@@ -3555,7 -3588,7 +3584,7 @@@ static void bond_xmit_slave_id(struct b /* Here we start from the slave with slave_id */ bond_for_each_slave_rcu(bond, slave, iter) { if (--i < 0) { - if (slave_can_tx(slave)) { + if (bond_slave_can_tx(slave)) { bond_dev_queue_xmit(bond, skb, slave->dev); return; } @@@ -3567,7 -3600,7 +3596,7 @@@ bond_for_each_slave_rcu(bond, slave, iter) { if (--i < 0) break; - if (slave_can_tx(slave)) { + if (bond_slave_can_tx(slave)) { bond_dev_queue_xmit(bond, skb, slave->dev); return; } @@@ -3624,7 -3657,7 +3653,7 @@@ static int bond_xmit_roundrobin(struct */ if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) { slave = rcu_dereference(bond->curr_active_slave); - if (slave && slave_can_tx(slave)) + if (slave && bond_slave_can_tx(slave)) bond_dev_queue_xmit(bond, skb, slave->dev); else bond_xmit_slave_id(bond, skb, 0); @@@ -3662,7 -3695,7 +3691,7 @@@ static int bond_xmit_xor(struct sk_buf { struct bonding *bond = netdev_priv(bond_dev);
- bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt)); + bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt);
return NETDEV_TX_OK; } @@@ -3677,7 -3710,7 +3706,7 @@@ static int bond_xmit_broadcast(struct s bond_for_each_slave_rcu(bond, slave, iter) { if (bond_is_last_slave(bond, slave)) break; - if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) { + if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) { @@@ -3689,7 -3722,7 +3718,7 @@@ bond_dev_queue_xmit(bond, skb2, slave->dev); } } - if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP) + if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) bond_dev_queue_xmit(bond, skb, slave->dev); else dev_kfree_skb_any(skb); @@@ -3714,7 -3747,7 +3743,7 @@@ static inline int bond_slave_override(s /* Find out if any slaves have the same mapping as this skb. */ bond_for_each_slave_rcu(bond, slave, iter) { if (slave->queue_id == skb->queue_mapping) { - if (slave_can_tx(slave)) { + if (bond_slave_can_tx(slave)) { bond_dev_queue_xmit(bond, skb, slave->dev); return 0; } @@@ -3755,12 -3788,11 +3784,11 @@@ static netdev_tx_t __bond_start_xmit(st { struct bonding *bond = netdev_priv(dev);
- if (TX_QUEUE_OVERRIDE(bond->params.mode)) { - if (!bond_slave_override(bond, skb)) - return NETDEV_TX_OK; - } + if (bond_should_override_tx_queue(bond) && + !bond_slave_override(bond, skb)) + return NETDEV_TX_OK;
- switch (bond->params.mode) { + switch (BOND_MODE(bond)) { case BOND_MODE_ROUNDROBIN: return bond_xmit_roundrobin(skb, dev); case BOND_MODE_ACTIVEBACKUP: @@@ -3772,12 -3804,13 +3800,13 @@@ case BOND_MODE_8023AD: return bond_3ad_xmit_xor(skb, dev); case BOND_MODE_ALB: - case BOND_MODE_TLB: return bond_alb_xmit(skb, dev); + case BOND_MODE_TLB: + return bond_tlb_xmit(skb, dev); default: /* Should never happen, mode already checked */ pr_err("%s: Error: Unknown bonding mode %d\n", - dev->name, bond->params.mode); + dev->name, BOND_MODE(bond)); WARN_ON_ONCE(1); dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@@ -3817,14 -3850,14 +3846,14 @@@ static int bond_ethtool_get_settings(st ecmd->duplex = DUPLEX_UNKNOWN; ecmd->port = PORT_OTHER;
- /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we + /* Since bond_slave_can_tx returns false for all inactive or down slaves, we * do not need to check mode. Though link speed might not represent * the true receive or transmit bandwidth (not all modes are symmetric) * this is an accurate maximum. */ read_lock(&bond->lock); bond_for_each_slave(bond, slave, iter) { - if (SLAVE_IS_OK(slave)) { + if (bond_slave_can_tx(slave)) { if (slave->speed != SPEED_UNKNOWN) speed += slave->speed; if (ecmd->duplex == DUPLEX_UNKNOWN && @@@ -3994,7 -4027,8 +4023,8 @@@ static int bond_check_params(struct bon
if (xmit_hash_policy) { if ((bond_mode != BOND_MODE_XOR) && - (bond_mode != BOND_MODE_8023AD)) { + (bond_mode != BOND_MODE_8023AD) && + (bond_mode != BOND_MODE_TLB)) { pr_info("xmit_hash_policy param is irrelevant in mode %s\n", bond_mode_name(bond_mode)); } else { @@@ -4079,7 -4113,7 +4109,7 @@@ }
/* reset values for 802.3ad/TLB/ALB */ - if (BOND_NO_USES_ARP(bond_mode)) { + if (!bond_mode_uses_arp(bond_mode)) { if (!miimon) { pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n"); pr_warn("Forcing miimon to 100msec\n"); @@@ -4161,7 -4195,7 +4191,7 @@@ catch mistakes */ __be32 ip; if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) || - IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) { + !bond_is_ip_target_ok(ip)) { pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", arp_ip_target[i]); arp_interval = 0; @@@ -4234,7 -4268,7 +4264,7 @@@ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n"); }
- if (primary && !USES_PRIMARY(bond_mode)) { + if (primary && !bond_mode_uses_primary(bond_mode)) { /* currently, using a primary only makes sense * in active backup, TLB or ALB modes */ @@@ -4300,6 -4334,7 +4330,7 @@@ params->min_links = min_links; params->lp_interval = lp_interval; params->packets_per_slave = packets_per_slave; + params->tlb_dynamic_lb = 1; /* Default value */ if (packets_per_slave > 0) { params->reciprocal_packets_per_slave = reciprocal_value(packets_per_slave); diff --combined drivers/net/bonding/bond_options.c index 8320702,94094b3..540e016 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@@ -70,6 -70,8 +70,8 @@@ static int bond_option_mode_set(struct const struct bond_opt_value *newval); static int bond_option_slaves_set(struct bonding *bond, const struct bond_opt_value *newval); + static int bond_option_tlb_dynamic_lb_set(struct bonding *bond, + const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = { @@@ -125,7 -127,6 +127,7 @@@ static const struct bond_opt_value bond static const struct bond_opt_value bond_intmax_tbl[] = { { "off", 0, BOND_VALFLAG_DEFAULT}, { "maxval", INT_MAX, BOND_VALFLAG_MAX}, + { NULL, -1, 0} };
static const struct bond_opt_value bond_lacp_rate_tbl[] = { @@@ -180,6 -181,12 +182,12 @@@ static const struct bond_opt_value bond { NULL, -1, 0}, };
+ static const struct bond_opt_value bond_tlb_dynamic_lb_tbl[] = { + { "off", 0, 0}, + { "on", 1, BOND_VALFLAG_DEFAULT}, + { NULL, -1, 0} + }; + static const struct bond_option bond_opts[] = { [BOND_OPT_MODE] = { .id = BOND_OPT_MODE, @@@ -200,7 -207,7 +208,7 @@@ [BOND_OPT_XMIT_HASH] = { .id = BOND_OPT_XMIT_HASH, .name = "xmit_hash_policy", - .desc = "balance-xor and 802.3ad hashing method", + .desc = "balance-xor, 802.3ad, and tlb hashing method", .values = bond_xmit_hashtype_tbl, .set = bond_option_xmit_hash_policy_set }, @@@ -365,9 -372,33 +373,33 @@@ .flags = BOND_OPTFLAG_RAWVAL, .set = bond_option_slaves_set }, + [BOND_OPT_TLB_DYNAMIC_LB] = { + .id = BOND_OPT_TLB_DYNAMIC_LB, + .name = "tlb_dynamic_lb", + .desc = "Enable dynamic flow shuffling", + .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_TLB)), + .values = bond_tlb_dynamic_lb_tbl, + .flags = BOND_OPTFLAG_IFDOWN, + .set = bond_option_tlb_dynamic_lb_set, + }, { } };
+ /* Searches for an option by name */ + const struct bond_option *bond_opt_get_by_name(const char *name) + { + const struct bond_option *opt; + int option; + + for (option = 0; option < BOND_OPT_LAST; option++) { + opt = bond_opt_get(option); + if (opt && !strcmp(opt->name, name)) + return opt; + } + + return NULL; + } + /* Searches for a value in opt's values[] table */ const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val) { @@@ -641,7 -672,7 +673,7 @@@ const struct bond_option *bond_opt_get(
int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) { - if (BOND_NO_USES_ARP(newval->value) && bond->params.arp_interval) { + if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n", bond->dev->name, newval->string); /* disable arp monitoring */ @@@ -662,7 -693,7 +694,7 @@@ static struct net_device *__bond_option_active_slave_get(struct bonding *bond, struct slave *slave) { - return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL; + return bond_uses_primary(bond) && slave ? slave->dev : NULL; }
struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond) @@@ -727,7 -758,7 +759,7 @@@ static int bond_option_active_slave_set bond->dev->name, new_active->dev->name); } else { if (old_active && (new_active->link == BOND_LINK_UP) && - IS_UP(new_active->dev)) { + bond_slave_is_up(new_active)) { pr_info("%s: Setting %s as active slave\n", bond->dev->name, new_active->dev->name); bond_change_active_slave(bond, new_active); @@@ -746,6 -777,10 +778,10 @@@ return ret; }
+ /* There are two tricky bits here. First, if MII monitoring is activated, then + * we must disable ARP monitoring. Second, if the timer isn't running, we must + * start it. + */ static int bond_option_miimon_set(struct bonding *bond, const struct bond_opt_value *newval) { @@@ -784,6 -819,10 +820,10 @@@ return 0; }
+ /* Set up and down delays. These must be multiples of the + * MII monitoring value, and are stored internally as the multiplier. + * Thus, we must translate to MS for the real world. + */ static int bond_option_updelay_set(struct bonding *bond, const struct bond_opt_value *newval) { @@@ -842,6 -881,10 +882,10 @@@ static int bond_option_use_carrier_set( return 0; }
+ /* There are two tricky bits here. First, if ARP monitoring is activated, then + * we must disable MII monitoring. Second, if the ARP timer isn't running, + * we must start it. + */ static int bond_option_arp_interval_set(struct bonding *bond, const struct bond_opt_value *newval) { @@@ -899,7 -942,7 +943,7 @@@ static int _bond_option_arp_ip_target_a __be32 *targets = bond->params.arp_targets; int ind;
- if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) { + if (!bond_is_ip_target_ok(target)) { pr_err("%s: invalid ARP target %pI4 specified for addition\n", bond->dev->name, &target); return -EINVAL; @@@ -944,7 -987,7 +988,7 @@@ static int bond_option_arp_ip_target_re unsigned long *targets_rx; int ind, i;
- if (IS_IP_TARGET_UNUSABLE_ADDRESS(target)) { + if (!bond_is_ip_target_ok(target)) { pr_err("%s: invalid ARP target %pI4 specified for removal\n", bond->dev->name, &target); return -EINVAL; @@@ -1338,3 -1381,13 +1382,13 @@@ err_no_cmd ret = -EPERM; goto out; } + + static int bond_option_tlb_dynamic_lb_set(struct bonding *bond, + const struct bond_opt_value *newval) + { + pr_info("%s: Setting dynamic-lb to %s (%llu)\n", + bond->dev->name, newval->string, newval->value); + bond->params.tlb_dynamic_lb = newval->value; + + return 0; + } diff --combined drivers/net/bonding/bonding.h index 00bea32,44334b3d..7d54744 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@@ -36,47 -36,10 +36,11 @@@
#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+#define BOND_MAX_VLAN_ENCAP 2 #define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
- #define IS_UP(dev) \ - ((((dev)->flags & IFF_UP) == IFF_UP) && \ - netif_running(dev) && \ - netif_carrier_ok(dev)) - - /* - * Checks whether slave is ready for transmit. - */ - #define SLAVE_IS_OK(slave) \ - (((slave)->dev->flags & IFF_UP) && \ - netif_running((slave)->dev) && \ - ((slave)->link == BOND_LINK_UP) && \ - bond_is_active_slave(slave)) - - - #define USES_PRIMARY(mode) \ - (((mode) == BOND_MODE_ACTIVEBACKUP) || \ - ((mode) == BOND_MODE_TLB) || \ - ((mode) == BOND_MODE_ALB)) - - #define BOND_NO_USES_ARP(mode) \ - (((mode) == BOND_MODE_8023AD) || \ - ((mode) == BOND_MODE_TLB) || \ - ((mode) == BOND_MODE_ALB)) - - #define TX_QUEUE_OVERRIDE(mode) \ - (((mode) == BOND_MODE_ACTIVEBACKUP) || \ - ((mode) == BOND_MODE_ROUNDROBIN)) - - #define BOND_MODE_IS_LB(mode) \ - (((mode) == BOND_MODE_TLB) || \ - ((mode) == BOND_MODE_ALB)) - - #define IS_IP_TARGET_UNUSABLE_ADDRESS(a) \ - ((htonl(INADDR_BROADCAST) == a) || \ - ipv4_is_zeronet(a)) /* * Less bad way to call ioctl from within the kernel; this needs to be * done some other way to get the call out of interrupt context. @@@ -90,6 -53,8 +54,8 @@@ set_fs(fs); \ res; })
+ #define BOND_MODE(bond) ((bond)->params.mode) + /* slave list primitives */ #define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
@@@ -175,6 -140,7 +141,7 @@@ struct bond_params int resend_igmp; int lp_interval; int packets_per_slave; + int tlb_dynamic_lb; struct reciprocal_value reciprocal_packets_per_slave; };
@@@ -183,8 -149,6 +150,6 @@@ struct bond_parm_tbl int mode; };
- #define BOND_MAX_MODENAME_LEN 20 - struct slave { struct net_device *dev; /* first - useful for panic debug */ struct bonding *bond; /* our master */ @@@ -205,7 -169,7 +170,7 @@@ u32 speed; u16 queue_id; u8 perm_hwaddr[ETH_ALEN]; - struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */ + struct ad_slave_info *ad_info; struct tlb_slave_info tlb_info; #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *np; @@@ -290,9 -254,38 +255,38 @@@ static inline struct bonding *bond_get_ return slave->bond; }
+ static inline bool bond_should_override_tx_queue(struct bonding *bond) + { + return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || + BOND_MODE(bond) == BOND_MODE_ROUNDROBIN; + } + static inline bool bond_is_lb(const struct bonding *bond) { - return BOND_MODE_IS_LB(bond->params.mode); + return BOND_MODE(bond) == BOND_MODE_TLB || + BOND_MODE(bond) == BOND_MODE_ALB; + } + + static inline bool bond_mode_uses_arp(int mode) + { + return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB && + mode != BOND_MODE_ALB; + } + + static inline bool bond_mode_uses_primary(int mode) + { + return mode == BOND_MODE_ACTIVEBACKUP || mode == BOND_MODE_TLB || + mode == BOND_MODE_ALB; + } + + static inline bool bond_uses_primary(struct bonding *bond) + { + return bond_mode_uses_primary(BOND_MODE(bond)); + } + + static inline bool bond_slave_is_up(struct slave *slave) + { + return netif_running(slave->dev) && netif_carrier_ok(slave->dev); }
static inline void bond_set_active_slave(struct slave *slave) @@@ -365,6 -358,12 +359,12 @@@ static inline bool bond_is_active_slave return !bond_slave_state(slave); }
+ static inline bool bond_slave_can_tx(struct slave *slave) + { + return bond_slave_is_up(slave) && slave->link == BOND_LINK_UP && + bond_is_active_slave(slave); + } + #define BOND_PRI_RESELECT_ALWAYS 0 #define BOND_PRI_RESELECT_BETTER 1 #define BOND_PRI_RESELECT_FAILURE 2 @@@ -396,12 -395,16 +396,16 @@@ static inline int slave_do_arp_validate return bond->params.arp_validate & (1 << bond_slave_state(slave)); }
- static inline int slave_do_arp_validate_only(struct bonding *bond, - struct slave *slave) + static inline int slave_do_arp_validate_only(struct bonding *bond) { return bond->params.arp_validate & BOND_ARP_FILTER; }
+ static inline int bond_is_ip_target_ok(__be32 addr) + { + return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); + } + /* Get the oldest arp which we've received on this slave for bond's * arp_targets. */ @@@ -479,16 -482,14 +483,14 @@@ static inline __be32 bond_confirm_addr( return addr; }
- static inline bool slave_can_tx(struct slave *slave) - { - if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP && - bond_is_active_slave(slave)) - return true; - else - return false; - } - - struct bond_net; + struct bond_net { + struct net *net; /* Associated network namespace */ + struct list_head dev_list; + #ifdef CONFIG_PROC_FS + struct proc_dir_entry *proc_dir; + #endif + struct class_attribute class_attr_bonding_masters; + };
int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); @@@ -500,7 -501,7 +502,7 @@@ int bond_sysfs_slave_add(struct slave * void bond_sysfs_slave_del(struct slave *slave); int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); int bond_release(struct net_device *bond_dev, struct net_device *slave_dev); - int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count); + u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb); void bond_select_active_slave(struct bonding *bond); void bond_change_active_slave(struct bonding *bond, struct slave *new_active); void bond_create_debugfs(void); @@@ -517,15 -518,6 +519,6 @@@ struct net_device *bond_option_active_s struct net_device *bond_option_active_slave_get(struct bonding *bond); const char *bond_slave_link_status(s8 link);
- struct bond_net { - struct net * net; /* Associated network namespace */ - struct list_head dev_list; - #ifdef CONFIG_PROC_FS - struct proc_dir_entry * proc_dir; - #endif - struct class_attribute class_attr_bonding_masters; - }; - #ifdef CONFIG_PROC_FS void bond_create_proc_entry(struct bonding *bond); void bond_remove_proc_entry(struct bonding *bond); diff --combined drivers/net/ethernet/altera/altera_sgdma.c index 99cc56f,dbd40e1..5d2d911 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@@ -20,8 -20,8 +20,8 @@@ #include "altera_sgdmahw.h" #include "altera_sgdma.h"
-static void sgdma_setup_descrip(struct sgdma_descrip *desc, - struct sgdma_descrip *ndesc, +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, dma_addr_t ndesc_phys, dma_addr_t raddr, dma_addr_t waddr, @@@ -31,17 -31,17 +31,17 @@@ int wfixed);
static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc);
static int sgdma_async_read(struct altera_tse_private *priv);
static dma_addr_t sgdma_txphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc);
static dma_addr_t sgdma_rxphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc);
static int sgdma_txbusy(struct altera_tse_private *priv);
@@@ -79,8 -79,7 +79,8 @@@ int sgdma_initialize(struct altera_tse_ priv->rxdescphys = (dma_addr_t) 0; priv->txdescphys = (dma_addr_t) 0;
- priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, + priv->rxdescphys = dma_map_single(priv->device, + (void __force *)priv->rx_dma_desc, priv->rxdescmem, DMA_BIDIRECTIONAL);
if (dma_mapping_error(priv->device, priv->rxdescphys)) { @@@ -89,8 -88,7 +89,8 @@@ return -EINVAL; }
- priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, + priv->txdescphys = dma_map_single(priv->device, + (void __force *)priv->tx_dma_desc, priv->txdescmem, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, priv->txdescphys)) { @@@ -100,8 -98,8 +100,8 @@@ }
/* Initialize descriptor memory to all 0's, sync memory to cache */ - memset(priv->tx_dma_desc, 0, priv->txdescmem); - memset(priv->rx_dma_desc, 0, priv->rxdescmem); + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
dma_sync_single_for_device(priv->device, priv->txdescphys, priv->txdescmem, DMA_TO_DEVICE); @@@ -128,15 -126,22 +128,15 @@@ void sgdma_uninitialize(struct altera_t */ void sgdma_reset(struct altera_tse_private *priv) { - u32 *ptxdescripmem = priv->tx_dma_desc; - u32 txdescriplen = priv->txdescmem; - u32 *prxdescripmem = priv->rx_dma_desc; - u32 rxdescriplen = priv->rxdescmem; - struct sgdma_csr *ptxsgdma = priv->tx_dma_csr; - struct sgdma_csr *prxsgdma = priv->rx_dma_csr; - /* Initialize descriptor memory to 0 */ - memset(ptxdescripmem, 0, txdescriplen); - memset(prxdescripmem, 0, rxdescriplen); + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
- iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); - iowrite32(0, &ptxsgdma->control); + csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control));
- iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); - iowrite32(0, &prxsgdma->control); + csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); }
/* For SGDMA, interrupts remain enabled after initially enabling, @@@ -162,14 -167,14 +162,14 @@@ void sgdma_disable_txirq(struct altera_
void sgdma_clear_rxirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = priv->rx_dma_csr; - tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); + tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); }
void sgdma_clear_txirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = priv->tx_dma_csr; - tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); + tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); }
/* transmits buffer through SGDMA. Returns number of buffers @@@ -179,11 -184,11 +179,10 @@@ */ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) { - struct sgdma_descrip __iomem *descbase = - (struct sgdma_descrip __iomem *)priv->tx_dma_desc; - int pktstx = 0; - struct sgdma_descrip *descbase = priv->tx_dma_desc; ++ struct sgdma_descrip __iomem *descbase = priv->tx_dma_desc;
- struct sgdma_descrip *cdesc = &descbase[0]; - struct sgdma_descrip *ndesc = &descbase[1]; + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1];
/* wait 'til the tx sgdma is ready for the next transmit request */ if (sgdma_txbusy(priv)) @@@ -199,7 -204,7 +198,7 @@@ 0, /* read fixed */ SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
- pktstx = sgdma_async_write(priv, cdesc); + sgdma_async_write(priv, cdesc);
/* enqueue the request to the pending transmit queue */ queue_tx(priv, buffer); @@@ -213,10 -218,10 +212,10 @@@ u32 sgdma_tx_completions(struct altera_tse_private *priv) { u32 ready = 0; - struct sgdma_descrip *desc = priv->tx_dma_desc;
if (!sgdma_txbusy(priv) && - ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && + ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) + & SGDMA_CONTROL_HW_OWNED) == 0) && (dequeue_tx(priv))) { ready = 1; } @@@ -240,31 -245,32 +239,30 @@@ void sgdma_add_rx_desc(struct altera_ts */ u32 sgdma_rx_status(struct altera_tse_private *priv) { - struct sgdma_descrip __iomem *base = - (struct sgdma_descrip __iomem *)priv->rx_dma_desc; - struct sgdma_csr *csr = priv->rx_dma_csr; - struct sgdma_descrip *base = priv->rx_dma_desc; - struct sgdma_descrip *desc = NULL; - int pktsrx; - unsigned int rxstatus = 0; - unsigned int pktlength = 0; - unsigned int pktstatus = 0; ++ struct sgdma_descrip __iomem *base = priv->rx_dma_desc; + struct sgdma_descrip __iomem *desc = NULL; struct tse_buffer *rxbuffer = NULL; + unsigned int rxstatus = 0;
- u32 sts = ioread32(&csr->status); + u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status));
desc = &base[0]; if (sts & SGDMA_STSREG_EOP) { + unsigned int pktlength = 0; + unsigned int pktstatus = 0; dma_sync_single_for_cpu(priv->device, priv->rxdescphys, priv->sgdmadesclen, DMA_FROM_DEVICE);
- pktlength = desc->bytes_xferred; - pktstatus = desc->status & 0x3f; - rxstatus = pktstatus; + pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); + pktstatus = csrrd8(desc, sgdma_descroffs(status)); + rxstatus = pktstatus & ~SGDMA_STATUS_EOP; rxstatus = rxstatus << 16; rxstatus |= (pktlength & 0xffff);
if (rxstatus) { - desc->status = 0; + csrwr8(0, desc, sgdma_descroffs(status));
rxbuffer = dequeue_rx(priv); if (rxbuffer == NULL) @@@ -272,12 -278,12 +270,12 @@@ "sgdma rx and rx queue empty!\n");
/* Clear control */ - iowrite32(0, &csr->control); + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); /* clear status */ - iowrite32(0xf, &csr->status); + csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
/* kick the rx sgdma after reaping this descriptor */ - pktsrx = sgdma_async_read(priv); + sgdma_async_read(priv);
} else { /* If the SGDMA indicated an end of packet on recv, @@@ -291,11 -297,10 +289,11 @@@ */ netdev_err(priv->dev, "SGDMA RX Error Info: %x, %x, %x\n", - sts, desc->status, rxstatus); + sts, csrrd8(desc, sgdma_descroffs(status)), + rxstatus); } } else if (sts == 0) { - pktsrx = sgdma_async_read(priv); + sgdma_async_read(priv); }
return rxstatus; @@@ -303,8 -308,8 +301,8 @@@
/* Private functions */ -static void sgdma_setup_descrip(struct sgdma_descrip *desc, - struct sgdma_descrip *ndesc, +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, dma_addr_t ndesc_phys, dma_addr_t raddr, dma_addr_t waddr, @@@ -314,30 -319,27 +312,30 @@@ int wfixed) { /* Clear the next descriptor as not owned by hardware */ - u32 ctrl = ndesc->control; + + u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); ctrl &= ~SGDMA_CONTROL_HW_OWNED; - ndesc->control = ctrl; + csrwr8(ctrl, ndesc, sgdma_descroffs(control));
- ctrl = 0; ctrl = SGDMA_CONTROL_HW_OWNED; ctrl |= generate_eop; ctrl |= rfixed; ctrl |= wfixed;
/* Channel is implicitly zero, initialized to 0 by default */ - - desc->raddr = raddr; - desc->waddr = waddr; - desc->next = lower_32_bits(ndesc_phys); - desc->control = ctrl; - desc->status = 0; - desc->rburst = 0; - desc->wburst = 0; - desc->bytes = length; - desc->bytes_xferred = 0; + csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); + csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); + + csrwr32(0, desc, sgdma_descroffs(pad1)); + csrwr32(0, desc, sgdma_descroffs(pad2)); + csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); + + csrwr8(ctrl, desc, sgdma_descroffs(control)); + csrwr8(0, desc, sgdma_descroffs(status)); + csrwr8(0, desc, sgdma_descroffs(wburst)); + csrwr8(0, desc, sgdma_descroffs(rburst)); + csrwr16(length, desc, sgdma_descroffs(bytes)); + csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); }
/* If hardware is busy, don't restart async read. @@@ -348,11 -350,10 +346,10 @@@ */ static int sgdma_async_read(struct altera_tse_private *priv) { - struct sgdma_descrip __iomem *descbase = - (struct sgdma_descrip __iomem *)priv->rx_dma_desc; - struct sgdma_csr *csr = priv->rx_dma_csr; - struct sgdma_descrip *descbase = priv->rx_dma_desc; - struct sgdma_descrip *cdesc = &descbase[0]; - struct sgdma_descrip *ndesc = &descbase[1]; ++ struct sgdma_descrip __iomem *descbase = priv->rx_dma_desc; + + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1];
struct tse_buffer *rxbuffer = NULL;
@@@ -378,13 -379,11 +375,13 @@@ priv->sgdmadesclen, DMA_TO_DEVICE);
- iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), - &csr->next_descrip); + csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), + priv->rx_dma_csr, + sgdma_csroffs(next_descrip));
- iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), - &csr->control); + csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), + priv->rx_dma_csr, + sgdma_csroffs(control));
return 1; } @@@ -393,32 -392,32 +390,32 @@@ }
static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { - struct sgdma_csr *csr = priv->tx_dma_csr; - if (sgdma_txbusy(priv)) return 0;
/* clear control and status */ - iowrite32(0, &csr->control); - iowrite32(0x1f, &csr->status); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
dma_sync_single_for_device(priv->device, priv->txdescphys, priv->sgdmadesclen, DMA_TO_DEVICE);
- iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), - &csr->next_descrip); + csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), + priv->tx_dma_csr, + sgdma_csroffs(next_descrip));
- iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), - &csr->control); + csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), + priv->tx_dma_csr, + sgdma_csroffs(control));
return 1; }
static dma_addr_t sgdma_txphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->txdescmem_busaddr; uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; @@@ -427,7 -426,7 +424,7 @@@
static dma_addr_t sgdma_rxphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->rxdescmem_busaddr; uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; @@@ -516,8 -515,8 +513,8 @@@ queue_rx_peekhead(struct altera_tse_pri */ static int sgdma_rxbusy(struct altera_tse_private *priv) { - struct sgdma_csr *csr = priv->rx_dma_csr; - return ioread32(&csr->status) & SGDMA_STSREG_BUSY; + return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY; }
/* waits for the tx sgdma to finish it's current operation, returns 0 @@@ -526,14 -525,13 +523,14 @@@ static int sgdma_txbusy(struct altera_tse_private *priv) { int delay = 0; - struct sgdma_csr *csr = priv->tx_dma_csr;
/* if DMA is busy, wait for current transactino to finish */ - while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) + while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) && (delay++ < 100)) udelay(1);
- if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { + if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) { netdev_err(priv->dev, "timeout waiting for tx dma\n"); return 1; } diff --combined drivers/net/ethernet/altera/altera_tse_ethtool.c index 54c25ef,d817e28..be72e1e --- a/drivers/net/ethernet/altera/altera_tse_ethtool.c +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@@ -96,89 -96,54 +96,89 @@@ static void tse_fill_stats(struct net_d u64 *buf) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; u64 ext;
- buf[0] = ioread32(&mac->frames_transmitted_ok); - buf[1] = ioread32(&mac->frames_received_ok); - buf[2] = ioread32(&mac->frames_check_sequence_errors); - buf[3] = ioread32(&mac->alignment_errors); + buf[0] = csrrd32(priv->mac_dev, + tse_csroffs(frames_transmitted_ok)); + buf[1] = csrrd32(priv->mac_dev, + tse_csroffs(frames_received_ok)); + buf[2] = csrrd32(priv->mac_dev, + tse_csroffs(frames_check_sequence_errors)); + buf[3] = csrrd32(priv->mac_dev, + tse_csroffs(alignment_errors));
/* Extended aOctetsTransmittedOK counter */ - ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; - ext |= ioread32(&mac->octets_transmitted_ok); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_transmitted_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_transmitted_ok)); buf[4] = ext;
/* Extended aOctetsReceivedOK counter */ - ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; - ext |= ioread32(&mac->octets_received_ok); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_received_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_received_ok)); buf[5] = ext;
- buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); - buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); - buf[8] = ioread32(&mac->if_in_errors); - buf[9] = ioread32(&mac->if_out_errors); - buf[10] = ioread32(&mac->if_in_ucast_pkts); - buf[11] = ioread32(&mac->if_in_multicast_pkts); - buf[12] = ioread32(&mac->if_in_broadcast_pkts); - buf[13] = ioread32(&mac->if_out_discards); - buf[14] = ioread32(&mac->if_out_ucast_pkts); - buf[15] = ioread32(&mac->if_out_multicast_pkts); - buf[16] = ioread32(&mac->if_out_broadcast_pkts); - buf[17] = ioread32(&mac->ether_stats_drop_events); + buf[6] = csrrd32(priv->mac_dev, + tse_csroffs(tx_pause_mac_ctrl_frames)); + buf[7] = csrrd32(priv->mac_dev, + tse_csroffs(rx_pause_mac_ctrl_frames)); + buf[8] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_errors)); + buf[9] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_errors)); + buf[10] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_ucast_pkts)); + buf[11] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_multicast_pkts)); + buf[12] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_broadcast_pkts)); + buf[13] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_discards)); + buf[14] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_ucast_pkts)); + buf[15] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_multicast_pkts)); + buf[16] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_broadcast_pkts)); + buf[17] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_drop_events));
/* Extended etherStatsOctets counter */ - ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; - ext |= ioread32(&mac->ether_stats_octets); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_ether_stats_octets)) << 32; + ext |= csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_octets)); buf[18] = ext;
- buf[19] = ioread32(&mac->ether_stats_pkts); - buf[20] = ioread32(&mac->ether_stats_undersize_pkts); - buf[21] = ioread32(&mac->ether_stats_oversize_pkts); - buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); - buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); - buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); - buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); - buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); - buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); - buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); - buf[29] = ioread32(&mac->ether_stats_jabbers); - buf[30] = ioread32(&mac->ether_stats_fragments); + buf[19] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts)); + buf[20] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_undersize_pkts)); + buf[21] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_oversize_pkts)); + buf[22] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_64_octets)); + buf[23] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_65to127_octets)); + buf[24] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_128to255_octets)); + buf[25] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_256to511_octets)); + buf[26] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_512to1023_octets)); + buf[27] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1024to1518_octets)); + buf[28] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1519tox_octets)); + buf[29] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_jabbers)); + buf[30] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_fragments)); }
static int tse_sset_count(struct net_device *dev, int sset) @@@ -213,6 -178,7 +213,6 @@@ static void tse_get_regs(struct net_dev { int i; struct altera_tse_private *priv = netdev_priv(dev); - u32 *tse_mac_regs = (u32 *)priv->mac_dev; u32 *buf = regbuf;
/* Set version to a known value, so ethtool knows @@@ -230,7 -196,7 +230,7 @@@ regs->version = 1;
for (i = 0; i < TSE_NUM_REGS; i++) - buf[i] = ioread32(&tse_mac_regs[i]); + buf[i] = csrrd32(priv->mac_dev, i * 4); }
static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) @@@ -271,5 -237,5 +271,5 @@@ static const struct ethtool_ops tse_eth
void altera_tse_set_ethtool_ops(struct net_device *netdev) { - SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops); + netdev->ethtool_ops = &tse_ethtool_ops; } diff --combined drivers/net/ethernet/emulex/benet/be_main.c index dc19bc5,dcc5e5c..0640f12 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@@ -134,7 -134,7 +134,7 @@@ static void be_queue_free(struct be_ada }
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, - u16 len, u16 entry_size) + u16 len, u16 entry_size) { struct be_dma_mem *mem = &q->dma_mem;
@@@ -154,7 -154,7 +154,7 @@@ static void be_reg_intr_set(struct be_a u32 reg, enabled;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, - ®); + ®); enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
if (!enabled && enable) @@@ -165,7 -165,7 +165,7 @@@ return;
pci_write_config_dword(adapter->pdev, - PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); }
static void be_intr_set(struct be_adapter *adapter, bool enable) @@@ -206,12 -206,11 +206,11 @@@ static void be_txq_notify(struct be_ada }
static void be_eq_notify(struct be_adapter *adapter, u16 qid, - bool arm, bool clear_int, u16 num_popped) + bool arm, bool clear_int, u16 num_popped) { u32 val = 0; val |= qid & DB_EQ_RING_ID_MASK; - val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << - DB_EQ_RING_ID_EXT_MASK_SHIFT); + val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_error) return; @@@ -477,7 -476,7 +476,7 @@@ static void populate_be_v2_stats(struc drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; - if (be_roce_supported(adapter)) { + if (be_roce_supported(adapter)) { drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; drvs->rx_roce_frames = port_stats->roce_frames_received; @@@ -491,8 -490,7 +490,7 @@@ static void populate_lancer_stats(struc {
struct be_drv_stats *drvs = &adapter->drv_stats; - struct lancer_pport_stats *pport_stats = - pport_stats_from_cmd(adapter); + struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats)); drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo; @@@ -539,8 -537,7 +537,7 @@@ static void accumulate_16bit_val(u32 *a }
static void populate_erx_stats(struct be_adapter *adapter, - struct be_rx_obj *rxo, - u32 erx_stat) + struct be_rx_obj *rxo, u32 erx_stat) { if (!BEx_chip(adapter)) rx_stats(rxo)->rx_drops_no_frags = erx_stat; @@@ -579,7 -576,7 +576,7 @@@ void be_parse_stats(struct be_adapter * }
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *stats) + struct rtnl_link_stats64 *stats) { struct be_adapter *adapter = netdev_priv(netdev); struct be_drv_stats *drvs = &adapter->drv_stats; @@@ -660,7 -657,8 +657,8 @@@ void be_link_status_update(struct be_ad }
static void be_tx_stats_update(struct be_tx_obj *txo, - u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) + u32 wrb_cnt, u32 copied, u32 gso_segs, + bool stopped) { struct be_tx_stats *stats = tx_stats(txo);
@@@ -676,7 -674,7 +674,7 @@@
/* Determine number of WRB entries needed to xmit data in an skb */ static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, - bool *dummy) + bool *dummy) { int cnt = (skb->len > skb->data_len);
@@@ -704,7 -702,7 +702,7 @@@ static inline void wrb_fill(struct be_e }
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, - struct sk_buff *skb) + struct sk_buff *skb) { u8 vlan_prio; u16 vlan_tag; @@@ -733,7 -731,8 +731,8 @@@ static u16 skb_ip_proto(struct sk_buff }
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, - struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) + struct sk_buff *skb, u32 wrb_cnt, u32 len, + bool skip_hw_vlan) { u16 vlan_tag, proto;
@@@ -774,7 -773,7 +773,7 @@@ }
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, - bool unmap_single) + bool unmap_single) { dma_addr_t dma;
@@@ -791,8 -790,8 +790,8 @@@ }
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, - struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, - bool skip_hw_vlan) + struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, + bool skip_hw_vlan) { dma_addr_t busaddr; int i, copied = 0; @@@ -821,8 -820,7 +820,7 @@@ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const struct skb_frag_struct *frag = - &skb_shinfo(skb)->frags[i]; + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; busaddr = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) @@@ -927,8 -925,7 +925,7 @@@ static int be_vlan_tag_tx_chk(struct be return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; }
- static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, - struct sk_buff *skb) + static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) { return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); } @@@ -959,7 -956,7 +956,7 @@@ static struct sk_buff *be_lancer_xmit_w */ if (be_pvid_tagging_enabled(adapter) && veh->h_vlan_proto == htons(ETH_P_8021Q)) - *skip_hw_vlan = true; + *skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. @@@ -1077,16 -1074,15 +1074,15 @@@ static int be_change_mtu(struct net_dev { struct be_adapter *adapter = netdev_priv(netdev); if (new_mtu < BE_MIN_MTU || - new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - - (ETH_HLEN + ETH_FCS_LEN))) { + new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) { dev_info(&adapter->pdev->dev, - "MTU must be between %d and %d bytes\n", - BE_MIN_MTU, - (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); + "MTU must be between %d and %d bytes\n", + BE_MIN_MTU, + (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))); return -EINVAL; } dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n", - netdev->mtu, new_mtu); + netdev->mtu, new_mtu); netdev->mtu = new_mtu; return 0; } @@@ -1098,7 -1094,7 +1094,7 @@@ static int be_vid_config(struct be_adapter *adapter) { u16 vids[BE_NUM_VLANS_SUPPORTED]; - u16 num = 0, i; + u16 num = 0, i = 0; int status = 0;
/* No need to further configure vids if in promiscuous mode */ @@@ -1109,13 -1105,10 +1105,10 @@@ goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */ - for (i = 0; i < VLAN_N_VID; i++) - if (adapter->vlan_tag[i]) - vids[num++] = cpu_to_le16(i); - - status = be_cmd_vlan_config(adapter, adapter->if_handle, - vids, num, 0); + for_each_set_bit(i, adapter->vids, VLAN_N_VID) + vids[num++] = cpu_to_le16(i);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); if (status) { /* Set to VLAN promisc mode as setting VLAN filter failed */ if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES) @@@ -1160,16 -1153,16 +1153,16 @@@ static int be_vlan_add_vid(struct net_d if (lancer_chip(adapter) && vid == 0) return status;
- if (adapter->vlan_tag[vid]) + if (test_bit(vid, adapter->vids)) return status;
- adapter->vlan_tag[vid] = 1; + set_bit(vid, adapter->vids); adapter->vlans_added++;
status = be_vid_config(adapter); if (status) { adapter->vlans_added--; - adapter->vlan_tag[vid] = 0; + clear_bit(vid, adapter->vids); }
return status; @@@ -1184,12 -1177,12 +1177,12 @@@ static int be_vlan_rem_vid(struct net_d if (lancer_chip(adapter) && vid == 0) goto ret;
- adapter->vlan_tag[vid] = 0; + clear_bit(vid, adapter->vids); status = be_vid_config(adapter); if (!status) adapter->vlans_added--; else - adapter->vlan_tag[vid] = 1; + set_bit(vid, adapter->vids); ret: return status; } @@@ -1254,8 -1247,10 +1247,10 @@@ static void be_set_rx_mode(struct net_d
/* Set to MCAST promisc mode if setting MULTICAST address fails */ if (status) { - dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n"); - dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n"); + dev_info(&adapter->pdev->dev, + "Exhausted multicast HW filters.\n"); + dev_info(&adapter->pdev->dev, + "Disabling HW multicast filtering.\n"); be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); } done: @@@ -1287,7 -1282,7 +1282,7 @@@ static int be_set_vf_mac(struct net_dev
if (status) dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n", - mac, vf); + mac, vf); else memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@@ -1295,7 -1290,7 +1290,7 @@@ }
static int be_get_vf_config(struct net_device *netdev, int vf, - struct ifla_vf_info *vi) + struct ifla_vf_info *vi) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; @@@ -1316,8 -1311,7 +1311,7 @@@ return 0; }
- static int be_set_vf_vlan(struct net_device *netdev, - int vf, u16 vlan, u8 qos) + static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; @@@ -1348,8 -1342,7 +1342,7 @@@ return status; }
- static int be_set_vf_tx_rate(struct net_device *netdev, - int vf, int rate) + static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int rate) { struct be_adapter *adapter = netdev_priv(netdev); int status = 0; @@@ -1369,7 -1362,7 +1362,7 @@@ status = be_cmd_config_qos(adapter, rate / 10, vf + 1); if (status) dev_err(&adapter->pdev->dev, - "tx rate %d on VF %d failed\n", rate, vf); + "tx rate %d on VF %d failed\n", rate, vf); else adapter->vf_cfg[vf].tx_rate = rate; return status; @@@ -1469,7 -1462,7 +1462,7 @@@ modify_eqd }
static void be_rx_stats_update(struct be_rx_obj *rxo, - struct be_rx_compl_info *rxcp) + struct be_rx_compl_info *rxcp) { struct be_rx_stats *stats = rx_stats(rxo);
@@@ -1566,7 -1559,8 +1559,8 @@@ static void skb_fill_rx_data(struct be_ skb_frag_set_page(skb, 0, page_info->page); skb_shinfo(skb)->frags[0].page_offset = page_info->page_offset + hdr_len; - skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); + skb_frag_size_set(&skb_shinfo(skb)->frags[0], + curr_frag_len - hdr_len); skb->data_len = curr_frag_len - hdr_len; skb->truesize += rx_frag_size; skb->tail += hdr_len; @@@ -1725,8 -1719,8 +1719,8 @@@ static void be_parse_rx_compl_v1(struc if (rxcp->vlanf) { rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq, compl); - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, - compl); + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, + vlan_tag, compl); } rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl); rxcp->tunneled = @@@ -1757,8 -1751,8 +1751,8 @@@ static void be_parse_rx_compl_v0(struc if (rxcp->vlanf) { rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq, compl); - rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, - compl); + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, + vlan_tag, compl); } rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl); rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, @@@ -1799,7 -1793,7 +1793,7 @@@ static struct be_rx_compl_info *be_rx_c rxcp->vlan_tag = swab16(rxcp->vlan_tag);
if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) && - !adapter->vlan_tag[rxcp->vlan_tag]) + !test_bit(rxcp->vlan_tag, adapter->vids)) rxcp->vlanf = 0; }
@@@ -1915,7 -1909,7 +1909,7 @@@ static struct be_eth_tx_compl *be_tx_co }
static u16 be_tx_compl_process(struct be_adapter *adapter, - struct be_tx_obj *txo, u16 last_index) + struct be_tx_obj *txo, u16 last_index) { struct be_queue_info *txq = &txo->q; struct be_eth_wrb *wrb; @@@ -2122,7 -2116,7 +2116,7 @@@ static int be_evt_queues_create(struct
eq = &eqo->q; rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, - sizeof(struct be_eq_entry)); + sizeof(struct be_eq_entry)); if (rc) return rc;
@@@ -2155,7 -2149,7 +2149,7 @@@ static int be_mcc_queues_create(struct
cq = &adapter->mcc_obj.cq; if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, - sizeof(struct be_mcc_compl))) + sizeof(struct be_mcc_compl))) goto err;
/* Use the default EQ for MCC completions */ @@@ -2275,7 -2269,7 +2269,7 @@@ static int be_rx_cqs_create(struct be_a rxo->adapter = adapter; cq = &rxo->cq; rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, - sizeof(struct be_eth_rx_compl)); + sizeof(struct be_eth_rx_compl)); if (rc) return rc;
@@@ -2339,7 -2333,7 +2333,7 @@@ static inline bool do_gro(struct be_rx_ }
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, - int budget, int polling) + int budget, int polling) { struct be_adapter *adapter = rxo->adapter; struct be_queue_info *rx_cq = &rxo->cq; @@@ -2365,7 -2359,7 +2359,7 @@@ * promiscuous mode on some skews */ if (unlikely(rxcp->port != adapter->port_num && - !lancer_chip(adapter))) { + !lancer_chip(adapter))) { be_rx_compl_discard(rxo, rxcp); goto loop_continue; } @@@ -2405,8 -2399,9 +2399,9 @@@ static bool be_process_tx(struct be_ada if (!txcp) break; num_wrbs += be_tx_compl_process(adapter, txo, - AMAP_GET_BITS(struct amap_eth_tx_compl, - wrb_index, txcp)); + AMAP_GET_BITS(struct + amap_eth_tx_compl, + wrb_index, txcp)); }
if (work_done) { @@@ -2416,7 -2411,7 +2411,7 @@@ /* As Tx wrbs have been freed up, wake up netdev queue * if it was stopped due to lack of tx wrbs. */ if (__netif_subqueue_stopped(adapter->netdev, idx) && - atomic_read(&txo->q.used) < txo->q.len / 2) { + atomic_read(&txo->q.used) < txo->q.len / 2) { netif_wake_subqueue(adapter->netdev, idx); }
@@@ -2510,9 -2505,9 +2505,9 @@@ void be_detect_error(struct be_adapter sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sliport_err1 = ioread32(adapter->db + - SLIPORT_ERROR1_OFFSET); + SLIPORT_ERROR1_OFFSET); sliport_err2 = ioread32(adapter->db + - SLIPORT_ERROR2_OFFSET); + SLIPORT_ERROR2_OFFSET); adapter->hw_error = true; /* Do not log error messages if its a FW reset */ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && @@@ -2531,13 -2526,13 +2526,13 @@@ } } else { pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW, &ue_lo); + PCICFG_UE_STATUS_LOW, &ue_lo); pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HIGH, &ue_hi); + PCICFG_UE_STATUS_HIGH, &ue_hi); pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); + PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); pci_read_config_dword(adapter->pdev, - PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); + PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
ue_lo = (ue_lo & ~ue_lo_mask); ue_hi = (ue_hi & ~ue_hi_mask); @@@ -2624,7 -2619,7 +2619,7 @@@ fail }
static inline int be_msix_vec_get(struct be_adapter *adapter, - struct be_eq_obj *eqo) + struct be_eq_obj *eqo) { return adapter->msix_entries[eqo->msix_idx].vector; } @@@ -2648,7 -2643,7 +2643,7 @@@ err_msix for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) free_irq(be_msix_vec_get(adapter, eqo), eqo); dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", - status); + status); be_msix_disable(adapter); return status; } @@@ -2774,7 -2769,8 +2769,8 @@@ static int be_rx_qs_create(struct be_ad { struct be_rx_obj *rxo; int rc, i, j; - u8 rsstable[128]; + u8 rss_hkey[RSS_HASH_KEY_LEN]; + struct rss_info *rss = &adapter->rss_info;
for_all_rx_queues(adapter, rxo, i) { rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, @@@ -2799,31 -2795,36 +2795,36 @@@ }
if (be_multi_rxq(adapter)) { - for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { + for (j = 0; j < RSS_INDIR_TABLE_LEN; + j += adapter->num_rx_qs - 1) { for_all_rss_queues(adapter, rxo, i) { - if ((j + i) >= 128) + if ((j + i) >= RSS_INDIR_TABLE_LEN) break; - rsstable[j + i] = rxo->rss_id; + rss->rsstable[j + i] = rxo->rss_id; + rss->rss_queue[j + i] = i; } } - adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | - RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6; + rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | + RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
if (!BEx_chip(adapter)) - adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 | - RSS_ENABLE_UDP_IPV6; + rss->rss_flags |= RSS_ENABLE_UDP_IPV4 | + RSS_ENABLE_UDP_IPV6; } else { /* Disable RSS, if only default RX Q is created */ - adapter->rss_flags = RSS_ENABLE_NONE; + rss->rss_flags = RSS_ENABLE_NONE; }
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags, - 128); + get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN); + rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, + 128, rss_hkey); if (rc) { - adapter->rss_flags = RSS_ENABLE_NONE; + rss->rss_flags = RSS_ENABLE_NONE; return rc; }
+ memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN); + /* First time posting */ for_all_rx_queues(adapter, rxo, i) be_post_rx_frags(rxo, GFP_KERNEL); @@@ -2896,7 -2897,8 +2897,8 @@@ static int be_setup_wol(struct be_adapt
if (enable) { status = pci_write_config_dword(adapter->pdev, - PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); + PCICFG_PM_CONTROL_OFFSET, + PCICFG_PM_CONTROL_MASK); if (status) { dev_err(&adapter->pdev->dev, "Could not enable Wake-on-lan\n"); @@@ -2905,7 -2907,8 +2907,8 @@@ return status; } status = be_cmd_enable_magic_wol(adapter, - adapter->netdev->dev_addr, &cmd); + adapter->netdev->dev_addr, + &cmd); pci_enable_wake(adapter->pdev, PCI_D3hot, 1); pci_enable_wake(adapter->pdev, PCI_D3cold, 1); } else { @@@ -2944,7 -2947,8 +2947,8 @@@ static int be_vf_eth_addr_config(struc
if (status) dev_err(&adapter->pdev->dev, - "Mac address assignment failed for VF %d\n", vf); + "Mac address assignment failed for VF %d\n", + vf); else memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@@ -3086,9 -3090,11 +3090,11 @@@ static int be_vfs_if_create(struct be_a
/* If a FW profile exists, then cap_flags are updated */ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | - BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST); - status = be_cmd_if_create(adapter, cap_flags, en_flags, - &vf_cfg->if_handle, vf + 1); + BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MULTICAST); + status = + be_cmd_if_create(adapter, cap_flags, en_flags, + &vf_cfg->if_handle, vf + 1); if (status) goto err; } @@@ -3594,8 -3600,8 +3600,8 @@@ static void be_netpoll(struct net_devic static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter, - const u8 *p, u32 img_start, int image_size, - int hdr_size) + const u8 *p, u32 img_start, int image_size, + int hdr_size) { u32 crc_offset; u8 flashed_crc[4]; @@@ -3605,11 -3611,10 +3611,10 @@@
p += crc_offset;
- status = be_cmd_get_flash_crc(adapter, flashed_crc, - (image_size - 4)); + status = be_cmd_get_flash_crc(adapter, flashed_crc, (image_size - 4)); if (status) { dev_err(&adapter->pdev->dev, - "could not get crc from flash, not flashing redboot\n"); + "could not get crc from flash, not flashing redboot\n"); return false; }
@@@ -3649,8 -3654,8 +3654,8 @@@ static bool is_comp_in_ufi(struct be_ad }
static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, - int header_size, - const struct firmware *fw) + int header_size, + const struct firmware *fw) { struct flash_section_info *fsec = NULL; const u8 *p = fw->data; @@@ -3666,7 -3671,7 +3671,7 @@@ }
static int be_flash(struct be_adapter *adapter, const u8 *img, - struct be_dma_mem *flash_cmd, int optype, int img_size) + struct be_dma_mem *flash_cmd, int optype, int img_size) { u32 total_bytes = 0, flash_op, num_bytes = 0; int status = 0; @@@ -3693,7 -3698,7 +3698,7 @@@ memcpy(req->data_buf, img, num_bytes); img += num_bytes; status = be_cmd_write_flashrom(adapter, flash_cmd, optype, - flash_op, num_bytes); + flash_op, num_bytes); if (status) { if (status == ILLEGAL_IOCTL_REQ && optype == OPTYPE_PHY_FW) @@@ -3708,10 -3713,8 +3713,8 @@@
/* For BE2, BE3 and BE3-R */ static int be_flash_BEx(struct be_adapter *adapter, - const struct firmware *fw, - struct be_dma_mem *flash_cmd, - int num_of_images) - + const struct firmware *fw, + struct be_dma_mem *flash_cmd, int num_of_images) { int status = 0, i, filehdr_size = 0; int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); @@@ -3793,8 -3796,10 +3796,10 @@@
if (pflashcomp[i].optype == OPTYPE_REDBOOT) { redboot = be_flash_redboot(adapter, fw->data, - pflashcomp[i].offset, pflashcomp[i].size, - filehdr_size + img_hdrs_size); + pflashcomp[i].offset, + pflashcomp[i].size, + filehdr_size + + img_hdrs_size); if (!redboot) continue; } @@@ -3805,7 -3810,7 +3810,7 @@@ return -1;
status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, - pflashcomp[i].size); + pflashcomp[i].size); if (status) { dev_err(&adapter->pdev->dev, "Flashing section type %d failed.\n", @@@ -3817,8 -3822,8 +3822,8 @@@ }
static int be_flash_skyhawk(struct be_adapter *adapter, - const struct firmware *fw, - struct be_dma_mem *flash_cmd, int num_of_images) + const struct firmware *fw, + struct be_dma_mem *flash_cmd, int num_of_images) { int status = 0, i, filehdr_size = 0; int img_offset, img_size, img_optype, redboot; @@@ -3866,8 -3871,9 +3871,9 @@@
if (img_optype == OPTYPE_REDBOOT) { redboot = be_flash_redboot(adapter, fw->data, - img_offset, img_size, - filehdr_size + img_hdrs_size); + img_offset, img_size, + filehdr_size + + img_hdrs_size); if (!redboot) continue; } @@@ -3889,7 -3895,7 +3895,7 @@@ }
static int lancer_fw_download(struct be_adapter *adapter, - const struct firmware *fw) + const struct firmware *fw) { #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) #define LANCER_FW_DOWNLOAD_LOCATION "/prg" @@@ -3955,7 -3961,7 +3961,7 @@@ }
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, - flash_cmd.dma); + flash_cmd.dma); if (status) { dev_err(&adapter->pdev->dev, "Firmware load error. " @@@ -3976,9 -3982,8 +3982,8 @@@ goto lancer_fw_exit; } } else if (change_status != LANCER_NO_RESET_NEEDED) { - dev_err(&adapter->pdev->dev, - "System reboot required for new FW" - " to be active\n"); + dev_err(&adapter->pdev->dev, + "System reboot required for new FW to be active\n"); }
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); @@@ -4042,7 -4047,7 +4047,7 @@@ static int be_fw_download(struct be_ada switch (ufi_type) { case UFI_TYPE4: status = be_flash_skyhawk(adapter, fw, - &flash_cmd, num_imgs); + &flash_cmd, num_imgs); break; case UFI_TYPE3R: status = be_flash_BEx(adapter, fw, &flash_cmd, @@@ -4112,8 -4117,7 +4117,7 @@@ fw_exit return status; }
- static int be_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh) + static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) { struct be_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; @@@ -4155,8 -4159,7 +4159,7 @@@ err }
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, - u32 filter_mask) + struct net_device *dev, u32 filter_mask) { struct be_adapter *adapter = netdev_priv(dev); int status = 0; @@@ -4301,7 -4304,7 +4304,7 @@@ static void be_netdev_init(struct net_d
netdev->netdev_ops = &be_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); + netdev->ethtool_ops = &be_ethtool_ops; }
static void be_unmap_pci_bars(struct be_adapter *adapter) @@@ -4870,7 -4873,7 +4873,7 @@@ static void be_shutdown(struct pci_dev }
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, - pci_channel_state_t state) + pci_channel_state_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; @@@ -4949,12 -4952,6 +4952,12 @@@ static void be_eeh_resume(struct pci_de if (status) goto err;
+ /* On some BE3 FW versions, after a HW reset, + * interrupts will remain disabled for each function. + * So, explicitly enable interrupts + */ + be_intr_set(adapter, true); + /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) diff --combined drivers/net/ethernet/mellanox/mlx4/cmd.c index 92d3249,357dcb0..161bbc8 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@@ -212,8 -212,7 +212,7 @@@ static int mlx4_comm_cmd_poll(struct ml
/* First, verify that the master reports correct status */ if (comm_pending(dev)) { - mlx4_warn(dev, "Communication channel is not idle." - "my toggle is %d (cmd:0x%x)\n", + mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n", priv->cmd.comm_toggle, cmd); return -EAGAIN; } @@@ -422,9 -421,8 +421,8 @@@ static int mlx4_slave_cmd(struct mlx4_d *out_param = be64_to_cpu(vhcr->out_param); else { - mlx4_err(dev, "response expected while" - "output mailbox is NULL for " - "command 0x%x\n", op); + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); vhcr->status = CMD_STAT_BAD_PARAM; } } @@@ -439,16 -437,15 +437,15 @@@ *out_param = be64_to_cpu(vhcr->out_param); else { - mlx4_err(dev, "response expected while" - "output mailbox is NULL for " - "command 0x%x\n", op); + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); vhcr->status = CMD_STAT_BAD_PARAM; } } ret = mlx4_status_to_errno(vhcr->status); } else - mlx4_err(dev, "failed execution of VHCR_POST command" - "opcode 0x%x\n", op); + mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", + op); }
mutex_unlock(&priv->cmd.slave_cmd_mutex); @@@ -476,6 -473,13 +473,13 @@@ static int mlx4_cmd_poll(struct mlx4_de goto out; }
+ if (out_is_imm && !out_param) { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + err = -EINVAL; + goto out; + } + err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0); if (err) @@@ -554,6 -558,13 +558,13 @@@ static int mlx4_cmd_wait(struct mlx4_de cmd->free_head = context->next; spin_unlock(&cmd->context_lock);
+ if (out_is_imm && !out_param) { + mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n", + op); + err = -EINVAL; + goto out; + } + init_completion(&context->done);
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0, @@@ -625,9 -636,8 +636,8 @@@ static int mlx4_ACCESS_MEM(struct mlx4_
if ((slave_addr & 0xfff) | (master_addr & 0xfff) | (slave & ~0x7f) | (size & 0xff)) { - mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx " - "master_addr:0x%llx slave_id:%d size:%d\n", - slave_addr, master_addr, slave, size); + mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n", + slave_addr, master_addr, slave, size); return -EINVAL; }
@@@ -788,8 -798,7 +798,7 @@@ static int mlx4_MAD_IFC_wrapper(struct ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) || (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && smp->method == IB_MGMT_METHOD_SET))) { - mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, " - "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n", + mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x for attr 0x%x - Rejecting\n", slave, smp->method, smp->mgmt_class, be16_to_cpu(smp->attr_id)); return -EPERM; @@@ -1253,12 -1262,12 +1262,12 @@@ static struct mlx4_cmd_info cmd_info[] }, { .opcode = MLX4_CMD_UPDATE_QP, - .has_inbox = false, + .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = mlx4_CMD_EPERM_wrapper + .wrapper = mlx4_UPDATE_QP_wrapper }, { .opcode = MLX4_CMD_GET_OP_REQ, @@@ -1409,8 -1418,8 +1418,8 @@@ static int mlx4_master_process_vhcr(str ALIGN(sizeof(struct mlx4_vhcr_cmd), MLX4_ACCESS_MEM_ALIGN), 1); if (ret) { - mlx4_err(dev, "%s:Failed reading vhcr" - "ret: 0x%x\n", __func__, ret); + mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n", + __func__, ret); kfree(vhcr); return ret; } @@@ -1461,9 -1470,8 +1470,8 @@@
/* Apply permission and bound checks if applicable */ if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) { - mlx4_warn(dev, "Command:0x%x from slave: %d failed protection " - "checks for resource_id:%d\n", vhcr->op, slave, - vhcr->in_modifier); + mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n", + vhcr->op, slave, vhcr->in_modifier); vhcr_cmd->status = CMD_STAT_BAD_OP; goto out_status; } @@@ -1502,8 -1510,7 +1510,7 @@@ }
if (err) { - mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with" - " error:%d, status %d\n", + mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n", vhcr->op, slave, vhcr->errno, err); vhcr_cmd->status = mlx4_errno_to_status(err); goto out_status; @@@ -1537,8 -1544,8 +1544,8 @@@ out_status __func__); else if (vhcr->e_bit && mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe)) - mlx4_warn(dev, "Failed to generate command completion " - "eqe for slave %d\n", slave); + mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n", + slave); }
out: @@@ -1577,8 -1584,9 +1584,9 @@@ static int mlx4_master_immediate_activa
mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n", slave, port); - mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan, - vp_admin->default_qos, vp_admin->link_state); + mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", + vp_admin->default_vlan, vp_admin->default_qos, + vp_admin->link_state);
work = kzalloc(sizeof(*work), GFP_KERNEL); if (!work) @@@ -1591,7 -1599,7 +1599,7 @@@ &admin_vlan_ix); if (err) { kfree(work); - mlx4_warn((&priv->dev), + mlx4_warn(&priv->dev, "No vlan resources slave %d, port %d\n", slave, port); return err; @@@ -1600,7 -1608,7 +1608,7 @@@ admin_vlan_ix = NO_INDX; } work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN; - mlx4_dbg((&(priv->dev)), + mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", (int)(vp_admin->default_vlan), admin_vlan_ix, slave, port); @@@ -1661,12 -1669,12 +1669,12 @@@ static int mlx4_master_activate_admin_s vp_admin->default_vlan, &(vp_oper->vlan_idx)); if (err) { vp_oper->vlan_idx = NO_INDX; - mlx4_warn((&priv->dev), + mlx4_warn(&priv->dev, "No vlan resorces slave %d, port %d\n", slave, port); return err; } - mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n", + mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n", (int)(vp_oper->state.default_vlan), vp_oper->vlan_idx, slave, port); } @@@ -1677,12 -1685,12 +1685,12 @@@ if (0 > vp_oper->mac_idx) { err = vp_oper->mac_idx; vp_oper->mac_idx = NO_INDX; - mlx4_warn((&priv->dev), + mlx4_warn(&priv->dev, "No mac resorces slave %d, port %d\n", slave, port); return err; } - mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n", + mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n", vp_oper->state.mac, vp_oper->mac_idx, slave, port); } } @@@ -1731,8 -1739,8 +1739,8 @@@ static void mlx4_master_do_cmd(struct m slave_state[slave].comm_toggle ^= 1; reply = (u32) slave_state[slave].comm_toggle << 31; if (toggle != slave_state[slave].comm_toggle) { - mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER" - "STATE COMPROMISIED ***\n", toggle, slave); + mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n", + toggle, slave); goto reset_slave; } if (cmd == MLX4_COMM_CMD_RESET) { @@@ -1759,8 -1767,8 +1767,8 @@@ /*command from slave in the middle of FLR*/ if (cmd != MLX4_COMM_CMD_RESET && MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) { - mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) " - "in the middle of FLR\n", slave, cmd); + mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n", + slave, cmd); return; }
@@@ -1798,8 -1806,8 +1806,8 @@@
mutex_lock(&priv->cmd.slave_cmd_mutex); if (mlx4_master_process_vhcr(dev, slave, NULL)) { - mlx4_err(dev, "Failed processing vhcr for slave:%d," - " resetting slave.\n", slave); + mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n", + slave); mutex_unlock(&priv->cmd.slave_cmd_mutex); goto reset_slave; } @@@ -1816,8 -1824,7 +1824,7 @@@ is_going_down = 1; spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); if (is_going_down) { - mlx4_warn(dev, "Slave is going down aborting command(%d)" - " executing from slave:%d\n", + mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n", cmd, slave); return; } @@@ -1880,9 -1887,8 +1887,8 @@@ void mlx4_master_comm_channel(struct wo if (toggle != slt) { if (master->slave_state[slave].comm_toggle != slt) { - printk(KERN_INFO "slave %d out of sync." - " read toggle %d, state toggle %d. " - "Resynching.\n", slave, slt, + printk(KERN_INFO "slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n", + slave, slt, master->slave_state[slave].comm_toggle); master->slave_state[slave].comm_toggle = slt; @@@ -1896,8 -1902,7 +1902,7 @@@ }
if (reported && reported != served) - mlx4_warn(dev, "Got command event with bitmask from %d slaves" - " but %d were served\n", + mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n", reported, served);
if (mlx4_ARM_COMM_CHANNEL(dev)) @@@ -1953,7 -1958,7 +1958,7 @@@ int mlx4_multi_func_init(struct mlx4_de ioremap(pci_resource_start(dev->pdev, 2) + MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); if (!priv->mfunc.comm) { - mlx4_err(dev, "Couldn't map communication vector.\n"); + mlx4_err(dev, "Couldn't map communication vector\n"); goto err_vhcr; }
@@@ -2080,7 -2085,7 +2085,7 @@@ int mlx4_cmd_init(struct mlx4_dev *dev priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE); if (!priv->cmd.hcr) { - mlx4_err(dev, "Couldn't map command register.\n"); + mlx4_err(dev, "Couldn't map command register\n"); return -ENOMEM; } } diff --combined drivers/net/ethernet/mellanox/mlx4/mlx4.h index 212cea4,52c1e7d..9dd1b30 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@@ -216,18 -216,19 +216,19 @@@ extern int mlx4_debug_level #define mlx4_debug_level (0) #endif /* CONFIG_MLX4_DEBUG */
- #define mlx4_dbg(mdev, format, arg...) \ + #define mlx4_dbg(mdev, format, ...) \ do { \ if (mlx4_debug_level) \ - dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \ + dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ + ##__VA_ARGS__); \ } while (0)
- #define mlx4_err(mdev, format, arg...) \ - dev_err(&mdev->pdev->dev, format, ##arg) - #define mlx4_info(mdev, format, arg...) \ - dev_info(&mdev->pdev->dev, format, ##arg) - #define mlx4_warn(mdev, format, arg...) \ - dev_warn(&mdev->pdev->dev, format, ##arg) + #define mlx4_err(mdev, format, ...) \ + dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + #define mlx4_info(mdev, format, ...) \ + dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) + #define mlx4_warn(mdev, format, ...) \ + dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size; extern int log_mtts_per_seg; @@@ -1195,12 -1196,6 +1196,12 @@@ int mlx4_QP_ATTACH_wrapper(struct mlx4_ struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd);
+int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --combined drivers/net/ethernet/mellanox/mlx4/qp.c index fbd32af,9bdb6ae..1d3234a --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@@ -264,8 -264,8 +264,8 @@@ void mlx4_qp_release_range(struct mlx4_ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err) { - mlx4_warn(dev, "Failed to release qp range" - " base:%d cnt:%d\n", base_qpn, cnt); + mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n", + base_qpn, cnt); } } else __mlx4_qp_release_range(dev, base_qpn, cnt); @@@ -389,41 -389,6 +389,41 @@@ err_icm
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
+#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC +int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *cmd; + u64 pri_addr_path_mask = 0; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cmd = (struct mlx4_update_qp_context *)mailbox->buf; + + if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) + return -EINVAL; + + if (attr & MLX4_UPDATE_QP_SMAC) { + pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; + cmd->qp_context.pri_path.grh_mylmc = params->smac_index; + } + + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + + err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_update_qp); + void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; @@@ -612,8 -577,7 +612,7 @@@ int mlx4_qp_to_ready(struct mlx4_dev *d err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], context, 0, 0, qp); if (err) { - mlx4_err(dev, "Failed to bring QP to state: " - "%d with error: %d\n", + mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n", states[i + 1], err); return err; } diff --combined drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 8f1254a,a95df9d..ce0e249 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@@ -3880,7 -3880,7 +3880,7 @@@ static int add_eth_header(struct mlx4_d } } if (!be_mac) { - pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", + pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n", port); return -EINVAL; } @@@ -3895,60 -3895,6 +3895,60 @@@
}
+#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd_info) +{ + int err; + u32 qpn = vhcr->in_modifier & 0xffffff; + struct res_qp *rqp; + u64 mac; + unsigned port; + u64 pri_addr_path_mask; + struct mlx4_update_qp_context *cmd; + int smac_index; + + cmd = (struct mlx4_update_qp_context *)inbox->buf; + + pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); + if (cmd->qp_mask || cmd->secondary_addr_path_mask || + (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) + return -EPERM; + + /* Just change the smac for the QP */ + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); + return err; + } + + port = (rqp->sched_queue >> 6 & 1) + 1; + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } + + err = mlx4_cmd(dev, inbox->dma, + vhcr->in_modifier, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); + goto err_mac; + } + +err_mac: + put_res(dev, slave, qpn, RES_QP); + return err; +} + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@@ -3977,7 -3923,7 +3977,7 @@@ qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { - pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); + pr_err("Steering rule with qpn 0x%x rejected\n", qpn); return err; } rule_header = (struct _rule_hw *)(ctrl + 1); @@@ -3995,7 -3941,7 +3995,7 @@@ case MLX4_NET_TRANS_RULE_ID_IPV4: case MLX4_NET_TRANS_RULE_ID_TCP: case MLX4_NET_TRANS_RULE_ID_UDP: - pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); + pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n"); if (add_eth_header(dev, slave, inbox, rlist, header_id)) { err = -EINVAL; goto err_put; @@@ -4004,7 -3950,7 +4004,7 @@@ sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; break; default: - pr_err("Corrupted mailbox.\n"); + pr_err("Corrupted mailbox\n"); err = -EINVAL; goto err_put; } @@@ -4018,7 -3964,7 +4018,7 @@@
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); if (err) { - mlx4_err(dev, "Fail to add flow steering resources.\n "); + mlx4_err(dev, "Fail to add flow steering resources\n"); /* detach rule*/ mlx4_cmd(dev, vhcr->out_param, 0, 0, MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, @@@ -4056,7 -4002,7 +4056,7 @@@ int mlx4_QP_FLOW_STEERING_DETACH_wrappe
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); if (err) { - mlx4_err(dev, "Fail to remove flow steering resources.\n "); + mlx4_err(dev, "Fail to remove flow steering resources\n"); goto out; }
@@@ -4185,8 -4131,8 +4185,8 @@@ static void rem_slave_qps(struct mlx4_d
err = move_all_busy(dev, slave, RES_QP); if (err) - mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy" - "for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(qp, tmp, qp_list, com.list) { @@@ -4224,10 -4170,8 +4224,8 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_qps: failed" - " to move slave %d qpn %d to" - " reset\n", slave, - qp->local_qpn); + mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n", + slave, qp->local_qpn); atomic_dec(&qp->rcq->ref_count); atomic_dec(&qp->scq->ref_count); atomic_dec(&qp->mtt->ref_count); @@@ -4261,8 -4205,8 +4259,8 @@@ static void rem_slave_srqs(struct mlx4_
err = move_all_busy(dev, slave, RES_SRQ); if (err) - mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(srq, tmp, srq_list, com.list) { @@@ -4292,9 -4236,7 +4290,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_srqs: failed" - " to move slave %d srq %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n", slave, srqn);
atomic_dec(&srq->mtt->ref_count); @@@ -4329,8 -4271,8 +4325,8 @@@ static void rem_slave_cqs(struct mlx4_d
err = move_all_busy(dev, slave, RES_CQ); if (err) - mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(cq, tmp, cq_list, com.list) { @@@ -4360,9 -4302,7 +4356,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_cqs: failed" - " to move slave %d cq %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n", slave, cqn); atomic_dec(&cq->mtt->ref_count); state = RES_CQ_ALLOCATED; @@@ -4394,8 -4334,8 +4388,8 @@@ static void rem_slave_mrs(struct mlx4_d
err = move_all_busy(dev, slave, RES_MPT); if (err) - mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) { @@@ -4430,9 -4370,7 +4424,7 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_mrs: failed" - " to move slave %d mpt %d to" - " SW ownership\n", + mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n", slave, mptn); if (mpt->mtt) atomic_dec(&mpt->mtt->ref_count); @@@ -4464,8 -4402,8 +4456,8 @@@ static void rem_slave_mtts(struct mlx4_
err = move_all_busy(dev, slave, RES_MTT); if (err) - mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) { @@@ -4567,8 -4505,8 +4559,8 @@@ static void rem_slave_eqs(struct mlx4_d
err = move_all_busy(dev, slave, RES_EQ); if (err) - mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(eq, tmp, eq_list, com.list) { @@@ -4600,9 -4538,8 +4592,8 @@@ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); if (err) - mlx4_dbg(dev, "rem_slave_eqs: failed" - " to move slave %d eqs %d to" - " SW ownership\n", slave, eqn); + mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", + slave, eqn); mlx4_free_cmd_mailbox(dev, mailbox); atomic_dec(&eq->mtt->ref_count); state = RES_EQ_RESERVED; @@@ -4631,8 -4568,8 +4622,8 @@@ static void rem_slave_counters(struct m
err = move_all_busy(dev, slave, RES_COUNTER); if (err) - mlx4_warn(dev, "rem_slave_counters: Could not move all counters to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(counter, tmp, counter_list, com.list) { @@@ -4662,8 -4599,8 +4653,8 @@@ static void rem_slave_xrcdns(struct mlx
err = move_all_busy(dev, slave, RES_XRCD); if (err) - mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to " - "busy for slave %d\n", slave); + mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n", + slave);
spin_lock_irq(mlx4_tlock(dev)); list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { @@@ -4808,10 -4745,8 +4799,8 @@@ void mlx4_vf_immed_vlan_work_handler(st 0, MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); if (err) { - mlx4_info(dev, "UPDATE_QP failed for slave %d, " - "port %d, qpn %d (%d)\n", - work->slave, port, qp->local_qpn, - err); + mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n", + work->slave, port, qp->local_qpn, err); errors++; } } diff --combined drivers/net/macvlan.c index d53e299,f4701da..a665e90 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@@ -30,6 -30,7 +30,7 @@@ #include <linux/if_link.h> #include <linux/if_macvlan.h> #include <linux/hash.h> + #include <linux/workqueue.h> #include <net/rtnetlink.h> #include <net/xfrm.h>
@@@ -40,10 -41,19 +41,19 @@@ struct macvlan_port struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; struct list_head vlans; struct rcu_head rcu; + struct sk_buff_head bc_queue; + struct work_struct bc_work; bool passthru; - int count; };
+ #define MACVLAN_PORT_IS_EMPTY(port) list_empty(&port->vlans) + + struct macvlan_skb_cb { + const struct macvlan_dev *src; + }; + + #define MACVLAN_SKB_CB(__skb) ((struct macvlan_skb_cb *)&((__skb)->cb[0])) + static void macvlan_port_destroy(struct net_device *dev);
static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev) @@@ -120,7 -130,7 +130,7 @@@ static int macvlan_broadcast_one(struc struct net_device *dev = vlan->dev;
if (local) - return dev_forward_skb(dev, skb); + return __dev_forward_skb(dev, skb);
skb->dev = dev; if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast)) @@@ -128,7 -138,7 +138,7 @@@ else skb->pkt_type = PACKET_MULTICAST;
- return netif_rx(skb); + return 0; }
static u32 macvlan_hash_mix(const struct macvlan_dev *vlan) @@@ -175,32 -185,32 +185,32 @@@ static void macvlan_broadcast(struct sk if (likely(nskb)) err = macvlan_broadcast_one( nskb, vlan, eth, - mode == MACVLAN_MODE_BRIDGE); + mode == MACVLAN_MODE_BRIDGE) ?: + netif_rx_ni(nskb); macvlan_count_rx(vlan, skb->len + ETH_HLEN, err == NET_RX_SUCCESS, 1); } } }
- /* called under rcu_read_lock() from netif_receive_skb */ - static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) + static void macvlan_process_broadcast(struct work_struct *w) { - struct macvlan_port *port; - struct sk_buff *skb = *pskb; - const struct ethhdr *eth = eth_hdr(skb); - const struct macvlan_dev *vlan; - const struct macvlan_dev *src; - struct net_device *dev; - unsigned int len = 0; - int ret = NET_RX_DROP; + struct macvlan_port *port = container_of(w, struct macvlan_port, + bc_work); + struct sk_buff *skb; + struct sk_buff_head list; + + skb_queue_head_init(&list); + + spin_lock_bh(&port->bc_queue.lock); + skb_queue_splice_tail_init(&port->bc_queue, &list); + spin_unlock_bh(&port->bc_queue.lock); + + while ((skb = __skb_dequeue(&list))) { + const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src; + + rcu_read_lock();
- port = macvlan_port_get_rcu(skb->dev); - if (is_multicast_ether_addr(eth->h_dest)) { - skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN); - if (!skb) - return RX_HANDLER_CONSUMED; - eth = eth_hdr(skb); - src = macvlan_hash_lookup(port, eth->h_source); if (!src) /* frame comes from an external address */ macvlan_broadcast(skb, port, NULL, @@@ -213,20 -223,80 +223,80 @@@ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA | MACVLAN_MODE_BRIDGE); - else if (src->mode == MACVLAN_MODE_BRIDGE) + else /* * flood only to VEPA ports, bridge ports * already saw the frame on the way out. */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA); - else { + + rcu_read_unlock(); + + kfree_skb(skb); + } + } + + static void macvlan_broadcast_enqueue(struct macvlan_port *port, + struct sk_buff *skb) + { + struct sk_buff *nskb; + int err = -ENOMEM; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + goto err; + + spin_lock(&port->bc_queue.lock); + if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) { + __skb_queue_tail(&port->bc_queue, nskb); + err = 0; + } + spin_unlock(&port->bc_queue.lock); + + if (err) + goto free_nskb; + + schedule_work(&port->bc_work); + return; + + free_nskb: + kfree_skb(nskb); + err: + atomic_long_inc(&skb->dev->rx_dropped); + } + + /* called under rcu_read_lock() from netif_receive_skb */ + static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) + { + struct macvlan_port *port; + struct sk_buff *skb = *pskb; + const struct ethhdr *eth = eth_hdr(skb); + const struct macvlan_dev *vlan; + const struct macvlan_dev *src; + struct net_device *dev; + unsigned int len = 0; + int ret = NET_RX_DROP; + + port = macvlan_port_get_rcu(skb->dev); + if (is_multicast_ether_addr(eth->h_dest)) { + skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN); + if (!skb) + return RX_HANDLER_CONSUMED; + eth = eth_hdr(skb); + src = macvlan_hash_lookup(port, eth->h_source); + if (src && src->mode != MACVLAN_MODE_VEPA && + src->mode != MACVLAN_MODE_BRIDGE) { /* forward to original port. */ vlan = src; - ret = macvlan_broadcast_one(skb, vlan, eth, 0); + ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?: + netif_rx(skb); goto out; }
+ MACVLAN_SKB_CB(skb)->src = src; + macvlan_broadcast_enqueue(port, skb); + return RX_HANDLER_PASS; }
@@@ -458,10 -528,8 +528,10 @@@ static void macvlan_change_rx_flags(str struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev;
- if (change & IFF_ALLMULTI) - dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (dev->flags & IFF_UP) { + if (change & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + } }
static void macvlan_set_mac_lists(struct net_device *dev) @@@ -517,11 -585,6 +587,11 @@@ static struct lock_class_key macvlan_ne #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+static int macvlan_get_nest_level(struct net_device *dev) +{ + return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; +} + static void macvlan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) @@@ -532,9 -595,8 +602,9 @@@
static void macvlan_set_lockdep_class(struct net_device *dev) { - lockdep_set_class(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key, + macvlan_get_nest_level(dev)); netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); }
@@@ -567,8 -629,7 +637,7 @@@ static void macvlan_uninit(struct net_d
free_percpu(vlan->pcpu_stats);
- port->count -= 1; - if (!port->count) + if (MACVLAN_PORT_IS_EMPTY(port)) macvlan_port_destroy(port->dev); }
@@@ -729,7 -790,6 +798,7 @@@ static const struct net_device_ops macv .ndo_fdb_add = macvlan_fdb_add, .ndo_fdb_del = macvlan_fdb_del, .ndo_fdb_dump = ndo_dflt_fdb_dump, + .ndo_get_lock_subclass = macvlan_get_nest_level, };
void macvlan_common_setup(struct net_device *dev) @@@ -770,6 -830,9 +839,9 @@@ static int macvlan_port_create(struct n for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_hash[i]);
+ skb_queue_head_init(&port->bc_queue); + INIT_WORK(&port->bc_work, macvlan_process_broadcast); + err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); if (err) kfree(port); @@@ -782,6 -845,7 +854,7 @@@ static void macvlan_port_destroy(struc { struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+ cancel_work_sync(&port->bc_work); dev->priv_flags &= ~IFF_MACVLAN_PORT; netdev_rx_handler_unregister(dev); kfree_rcu(port, rcu); @@@ -858,7 -922,6 +931,7 @@@ int macvlan_common_newlink(struct net * vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; + vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) @@@ -868,13 -931,12 +941,12 @@@ vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
if (vlan->mode == MACVLAN_MODE_PASSTHRU) { - if (port->count) + if (!MACVLAN_PORT_IS_EMPTY(port)) return -EINVAL; port->passthru = true; eth_hw_addr_inherit(dev, lowerdev); }
- port->count += 1; err = register_netdevice(dev); if (err < 0) goto destroy_port; @@@ -892,8 -954,7 +964,7 @@@ unregister_netdev: unregister_netdevice(dev); destroy_port: - port->count -= 1; - if (!port->count) + if (MACVLAN_PORT_IS_EMPTY(port)) macvlan_port_destroy(lowerdev);
return err; @@@ -1028,6 -1089,13 +1099,13 @@@ static int macvlan_device_event(struct netdev_update_features(vlan->dev); } break; + case NETDEV_CHANGEMTU: + list_for_each_entry(vlan, &port->vlans, list) { + if (vlan->dev->mtu <= dev->mtu) + continue; + dev_set_mtu(vlan->dev, dev->mtu); + } + break; case NETDEV_UNREGISTER: /* twiddle thumbs on netns device moves */ if (dev->reg_state != NETREG_UNREGISTERING) diff --combined drivers/net/phy/phy_device.c index 4987a1c,466ae3e..4383434 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@@ -614,8 -614,8 +614,8 @@@ int phy_attach_direct(struct net_devic err = phy_init_hw(phydev); if (err) phy_detach(phydev); - - phy_resume(phydev); + else + phy_resume(phydev);
return err; } @@@ -1067,7 -1067,7 +1067,7 @@@ int genphy_soft_reset(struct phy_devic } EXPORT_SYMBOL(genphy_soft_reset);
- static int genphy_config_init(struct phy_device *phydev) + int genphy_config_init(struct phy_device *phydev) { int val; u32 features; @@@ -1118,6 -1118,7 +1118,7 @@@ static int gen10g_soft_reset(struct phy /* Do nothing for now */ return 0; } + EXPORT_SYMBOL(genphy_config_init);
static int gen10g_config_init(struct phy_device *phydev) { diff --combined drivers/net/usb/cdc_ncm.c index 9a2bd11,2d0caf1..ad2a386 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@@ -65,19 -65,270 +65,270 @@@ static void cdc_ncm_tx_timeout_start(st static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); static struct usb_driver cdc_ncm_driver;
- static int cdc_ncm_setup(struct usbnet *dev) + struct cdc_ncm_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + }; + + #define CDC_NCM_STAT(str, m) { \ + .stat_string = str, \ + .sizeof_stat = sizeof(((struct cdc_ncm_ctx *)0)->m), \ + .stat_offset = offsetof(struct cdc_ncm_ctx, m) } + #define CDC_NCM_SIMPLE_STAT(m) CDC_NCM_STAT(__stringify(m), m) + + static const struct cdc_ncm_stats cdc_ncm_gstrings_stats[] = { + CDC_NCM_SIMPLE_STAT(tx_reason_ntb_full), + CDC_NCM_SIMPLE_STAT(tx_reason_ndp_full), + CDC_NCM_SIMPLE_STAT(tx_reason_timeout), + CDC_NCM_SIMPLE_STAT(tx_reason_max_datagram), + CDC_NCM_SIMPLE_STAT(tx_overhead), + CDC_NCM_SIMPLE_STAT(tx_ntbs), + CDC_NCM_SIMPLE_STAT(rx_overhead), + CDC_NCM_SIMPLE_STAT(rx_ntbs), + }; + + static int cdc_ncm_get_sset_count(struct net_device __always_unused *netdev, int sset) + { + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(cdc_ncm_gstrings_stats); + default: + return -EOPNOTSUPP; + } + } + + static void cdc_ncm_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) { + struct usbnet *dev = netdev_priv(netdev); struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - u32 val; - u8 flags; - u8 iface_no; - int err; - int eth_hlen; - u16 mbim_mtu; - u16 ntb_fmt_supported; - __le16 max_datagram_size; + int i; + char *p = NULL;
- iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; + for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) { + p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset; + data[i] = (cdc_ncm_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + } + + static void cdc_ncm_get_strings(struct net_device __always_unused *netdev, u32 stringset, u8 *data) + { + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(cdc_ncm_gstrings_stats); i++) { + memcpy(p, cdc_ncm_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } + } + + static int cdc_ncm_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) + { + struct usbnet *dev = netdev_priv(netdev); + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + + /* assuming maximum sized dgrams and ignoring NDPs */ + ec->rx_max_coalesced_frames = ctx->rx_max / ctx->max_datagram_size; + ec->tx_max_coalesced_frames = ctx->tx_max / ctx->max_datagram_size; + + /* the timer will fire CDC_NCM_TIMER_PENDING_CNT times in a row */ - ec->tx_coalesce_usecs = (ctx->timer_interval * CDC_NCM_TIMER_PENDING_CNT) / NSEC_PER_USEC; ++ ec->tx_coalesce_usecs = ctx->timer_interval / (NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT); + return 0; + } + + static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx); + + static int cdc_ncm_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) + { + struct usbnet *dev = netdev_priv(netdev); + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + u32 new_rx_max = ctx->rx_max; + u32 new_tx_max = ctx->tx_max; + + /* assuming maximum sized dgrams and a single NDP */ + if (ec->rx_max_coalesced_frames) + new_rx_max = ec->rx_max_coalesced_frames * ctx->max_datagram_size; + if (ec->tx_max_coalesced_frames) + new_tx_max = ec->tx_max_coalesced_frames * ctx->max_datagram_size; + + if (ec->tx_coalesce_usecs && + (ec->tx_coalesce_usecs < CDC_NCM_TIMER_INTERVAL_MIN * CDC_NCM_TIMER_PENDING_CNT || + ec->tx_coalesce_usecs > CDC_NCM_TIMER_INTERVAL_MAX * CDC_NCM_TIMER_PENDING_CNT)) + return -EINVAL; + + spin_lock_bh(&ctx->mtx); - ctx->timer_interval = ec->tx_coalesce_usecs * NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT; ++ ctx->timer_interval = ec->tx_coalesce_usecs * (NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT); + if (!ctx->timer_interval) + ctx->tx_timer_pending = 0; + spin_unlock_bh(&ctx->mtx); + + /* inform device of new values */ + if (new_rx_max != ctx->rx_max || new_tx_max != ctx->tx_max) + cdc_ncm_update_rxtx_max(dev, new_rx_max, new_tx_max); + return 0; + } + + static const struct ethtool_ops cdc_ncm_ethtool_ops = { + .get_settings = usbnet_get_settings, + .set_settings = usbnet_set_settings, + .get_link = usbnet_get_link, + .nway_reset = usbnet_nway_reset, + .get_drvinfo = usbnet_get_drvinfo, + .get_msglevel = usbnet_get_msglevel, + .set_msglevel = usbnet_set_msglevel, + .get_ts_info = ethtool_op_get_ts_info, + .get_sset_count = cdc_ncm_get_sset_count, + .get_strings = cdc_ncm_get_strings, + .get_ethtool_stats = cdc_ncm_get_ethtool_stats, + .get_coalesce = cdc_ncm_get_coalesce, + .set_coalesce = cdc_ncm_set_coalesce, + }; + + /* handle rx_max and tx_max changes */ + static void cdc_ncm_update_rxtx_max(struct usbnet *dev, u32 new_rx, u32 new_tx) + { + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; + u32 val, max, min; + + /* clamp new_rx to sane values */ + min = USB_CDC_NCM_NTB_MIN_IN_SIZE; + max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)); + + /* dwNtbInMaxSize spec violation? Use MIN size for both limits */ + if (max < min) { + dev_warn(&dev->intf->dev, "dwNtbInMaxSize=%u is too small. Using %u\n", + le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min); + max = min; + } + + val = clamp_t(u32, new_rx, min, max); + if (val != new_rx) { + dev_dbg(&dev->intf->dev, "rx_max must be in the [%u, %u] range. Using %u\n", + min, max, val); + } + + /* usbnet use these values for sizing rx queues */ + dev->rx_urb_size = val; + + /* inform device about NTB input size changes */ + if (val != ctx->rx_max) { + __le32 dwNtbInMaxSize = cpu_to_le32(val); + + dev_info(&dev->intf->dev, "setting rx_max = %u\n", val); + + /* need to unlink rx urbs before increasing buffer size */ + if (netif_running(dev->net) && dev->rx_urb_size > ctx->rx_max) + usbnet_unlink_rx_urbs(dev); + + /* tell device to use new size */ + if (usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + 0, iface_no, &dwNtbInMaxSize, 4) < 0) + dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n"); + else + ctx->rx_max = val; + } + + /* clamp new_tx to sane values */ + min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16); + max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); + + /* some devices set dwNtbOutMaxSize too low for the above default */ + min = min(min, max); + + val = clamp_t(u32, new_tx, min, max); + if (val != new_tx) { + dev_dbg(&dev->intf->dev, "tx_max must be in the [%u, %u] range. Using %u\n", + min, max, val); + } + if (val != ctx->tx_max) + dev_info(&dev->intf->dev, "setting tx_max = %u\n", val); + + /* Adding a pad byte here if necessary simplifies the handling + * in cdc_ncm_fill_tx_frame, making tx_max always represent + * the real skb max size. + * + * We cannot use dev->maxpacket here because this is called from + * .bind which is called before usbnet sets up dev->maxpacket + */ + if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && + val % usb_maxpacket(dev->udev, dev->out, 1) == 0) + val++; + + /* we might need to flush any pending tx buffers if running */ + if (netif_running(dev->net) && val > ctx->tx_max) { + netif_tx_lock_bh(dev->net); + usbnet_start_xmit(NULL, dev->net); + ctx->tx_max = val; + netif_tx_unlock_bh(dev->net); + } else { + ctx->tx_max = val; + } + + dev->hard_mtu = ctx->tx_max; + + /* max qlen depend on hard_mtu and rx_urb_size */ + usbnet_update_max_qlen(dev); + + /* never pad more than 3 full USB packets per transfer */ + ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1), + CDC_NCM_MIN_TX_PKT, ctx->tx_max); + } + + /* helpers for NCM and MBIM differences */ + static u8 cdc_ncm_flags(struct usbnet *dev) + { + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + + if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc) + return ctx->mbim_desc->bmNetworkCapabilities; + if (ctx->func_desc) + return ctx->func_desc->bmNetworkCapabilities; + return 0; + } + + static int cdc_ncm_eth_hlen(struct usbnet *dev) + { + if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting)) + return 0; + return ETH_HLEN; + } + + static u32 cdc_ncm_min_dgram_size(struct usbnet *dev) + { + if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting)) + return CDC_MBIM_MIN_DATAGRAM_SIZE; + return CDC_NCM_MIN_DATAGRAM_SIZE; + } + + static u32 cdc_ncm_max_dgram_size(struct usbnet *dev) + { + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + + if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc) + return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize); + if (ctx->ether_desc) + return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); + return CDC_NCM_MAX_DATAGRAM_SIZE; + } + + /* initial one-time device setup. MUST be called with the data interface + * in altsetting 0 + */ + static int cdc_ncm_init(struct usbnet *dev) + { + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; + int err;
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, USB_TYPE_CLASS | USB_DIR_IN @@@ -89,7 -340,35 +340,35 @@@ return err; /* GET_NTB_PARAMETERS is required */ }
- /* read correct set of parameters according to device mode */ + /* set CRC Mode */ + if (cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_CRC_MODE) { + dev_dbg(&dev->intf->dev, "Setting CRC mode off\n"); + err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + USB_CDC_NCM_CRC_NOT_APPENDED, + iface_no, NULL, 0); + if (err < 0) + dev_err(&dev->intf->dev, "SET_CRC_MODE failed\n"); + } + + /* set NTB format, if both formats are supported. + * + * "The host shall only send this command while the NCM Data + * Interface is in alternate setting 0." + */ + if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) & USB_CDC_NCM_NTH32_SIGN) { + dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit\n"); + err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + USB_CDC_NCM_NTB16_FORMAT, + iface_no, NULL, 0); + if (err < 0) + dev_err(&dev->intf->dev, "SET_NTB_FORMAT failed\n"); + } + + /* set initial device values */ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); @@@ -97,72 -376,79 +376,79 @@@ ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); /* devices prior to NCM Errata shall set this field to zero */ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); - ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); - - /* there are some minor differences in NCM and MBIM defaults */ - if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { - if (!ctx->mbim_desc) - return -EINVAL; - eth_hlen = 0; - flags = ctx->mbim_desc->bmNetworkCapabilities; - ctx->max_datagram_size = le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize); - if (ctx->max_datagram_size < CDC_MBIM_MIN_DATAGRAM_SIZE) - ctx->max_datagram_size = CDC_MBIM_MIN_DATAGRAM_SIZE; - } else { - if (!ctx->func_desc) - return -EINVAL; - eth_hlen = ETH_HLEN; - flags = ctx->func_desc->bmNetworkCapabilities; - ctx->max_datagram_size = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); - if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE) - ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE; - } - - /* common absolute max for NCM and MBIM */ - if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE) - ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
dev_dbg(&dev->intf->dev, "dwNtbInMaxSize=%u dwNtbOutMaxSize=%u wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n", ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, - ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags); + ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev));
/* max count of tx datagrams */ if ((ctx->tx_max_datagrams == 0) || (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
- /* verify maximum size of received NTB in bytes */ - if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) { - dev_dbg(&dev->intf->dev, "Using min receive length=%d\n", - USB_CDC_NCM_NTB_MIN_IN_SIZE); - ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE; - } + /* set up maximum NDP size */ + ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16);
- if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) { - dev_dbg(&dev->intf->dev, "Using default maximum receive length=%d\n", - CDC_NCM_NTB_MAX_SIZE_RX); - ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX; - } + /* initial coalescing timer interval */ + ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC;
- /* inform device about NTB input size changes */ - if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { - __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); + return 0; + }
- err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, - USB_TYPE_CLASS | USB_DIR_OUT - | USB_RECIP_INTERFACE, - 0, iface_no, &dwNtbInMaxSize, 4); - if (err < 0) - dev_dbg(&dev->intf->dev, "Setting NTB Input Size failed\n"); + /* set a new max datagram size */ + static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size) + { + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; + __le16 max_datagram_size; + u16 mbim_mtu; + int err; + + /* set default based on descriptors */ + ctx->max_datagram_size = clamp_t(u32, new_size, + cdc_ncm_min_dgram_size(dev), + CDC_NCM_MAX_DATAGRAM_SIZE); + + /* inform the device about the selected Max Datagram Size? */ + if (!(cdc_ncm_flags(dev) & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE)) + goto out; + + /* read current mtu value from device */ + err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, + USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, + 0, iface_no, &max_datagram_size, 2); + if (err < 0) { + dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); + goto out; }
- /* verify maximum size of transmitted NTB in bytes */ - if (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX) { - dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", - CDC_NCM_NTB_MAX_SIZE_TX); - ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; + if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size) + goto out; + + max_datagram_size = cpu_to_le16(ctx->max_datagram_size); + err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, + USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, + 0, iface_no, &max_datagram_size, 2); + if (err < 0) + dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); + + out: + /* set MTU to max supported by the device if necessary */ + dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev)); + + /* do not exceed operater preferred MTU */ + if (ctx->mbim_extended_desc) { + mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU); + if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu) + dev->net->mtu = mbim_mtu; } + } + + static void cdc_ncm_fix_modulus(struct usbnet *dev) + { + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + u32 val;
/* * verify that the structure alignment is: @@@ -199,68 -485,31 +485,31 @@@ }
/* adjust TX-remainder according to NCM specification. */ - ctx->tx_remainder = ((ctx->tx_remainder - eth_hlen) & + ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) & (ctx->tx_modulus - 1)); + }
- /* additional configuration */ - - /* set CRC Mode */ - if (flags & USB_CDC_NCM_NCAP_CRC_MODE) { - err = usbnet_write_cmd(dev, USB_CDC_SET_CRC_MODE, - USB_TYPE_CLASS | USB_DIR_OUT - | USB_RECIP_INTERFACE, - USB_CDC_NCM_CRC_NOT_APPENDED, - iface_no, NULL, 0); - if (err < 0) - dev_dbg(&dev->intf->dev, "Setting CRC mode off failed\n"); - } - - /* set NTB format, if both formats are supported */ - if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) { - err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, - USB_TYPE_CLASS | USB_DIR_OUT - | USB_RECIP_INTERFACE, - USB_CDC_NCM_NTB16_FORMAT, - iface_no, NULL, 0); - if (err < 0) - dev_dbg(&dev->intf->dev, "Setting NTB format to 16-bit failed\n"); - } - - /* inform the device about the selected Max Datagram Size */ - if (!(flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE)) - goto out; - - /* read current mtu value from device */ - err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, - USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, 2); - if (err < 0) { - dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); - goto out; - } - - if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size) - goto out; + static int cdc_ncm_setup(struct usbnet *dev) + { + struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; + u32 def_rx, def_tx;
- max_datagram_size = cpu_to_le16(ctx->max_datagram_size); - err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, - USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, - 0, iface_no, &max_datagram_size, 2); - if (err < 0) - dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); + /* be conservative when selecting intial buffer size to + * increase the number of hosts this will work for + */ + def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX, + le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)); + def_tx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_TX, + le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
- out: - /* set MTU to max supported by the device if necessary */ - if (dev->net->mtu > ctx->max_datagram_size - eth_hlen) - dev->net->mtu = ctx->max_datagram_size - eth_hlen; + /* clamp rx_max and tx_max and inform device */ + cdc_ncm_update_rxtx_max(dev, def_rx, def_tx);
- /* do not exceed operater preferred MTU */ - if (ctx->mbim_extended_desc) { - mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU); - if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu) - dev->net->mtu = mbim_mtu; - } + /* sanitize the modulus and remainder values */ + cdc_ncm_fix_modulus(dev);
+ /* set max datagram size */ + cdc_ncm_set_dgram_size(dev, cdc_ncm_max_dgram_size(dev)); return 0; }
@@@ -424,10 -673,21 +673,21 @@@ advance }
/* check if we got everything */ - if (!ctx->data || (!ctx->mbim_desc && !ctx->ether_desc)) { - dev_dbg(&intf->dev, "CDC descriptors missing\n"); + if (!ctx->data) { + dev_dbg(&intf->dev, "CDC Union missing and no IAD found\n"); goto error; } + if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) { + if (!ctx->mbim_desc) { + dev_dbg(&intf->dev, "MBIM functional descriptor missing\n"); + goto error; + } + } else { + if (!ctx->ether_desc || !ctx->func_desc) { + dev_dbg(&intf->dev, "NCM or ECM functional descriptors missing\n"); + goto error; + } + }
/* claim data interface, if different from control */ if (ctx->data != ctx->control) { @@@ -447,8 -707,8 +707,8 @@@ goto error2; }
- /* initialize data interface */ - if (cdc_ncm_setup(dev)) + /* initialize basic device settings */ + if (cdc_ncm_init(dev)) goto error2;
/* configure data interface */ @@@ -477,18 -737,11 +737,11 @@@ dev_info(&intf->dev, "MAC-Address: %pM\n", dev->net->dev_addr); }
- /* usbnet use these values for sizing tx/rx queues */ - dev->hard_mtu = ctx->tx_max; - dev->rx_urb_size = ctx->rx_max; + /* finish setting up the device specific data */ + cdc_ncm_setup(dev);
- /* cdc_ncm_setup will override dwNtbOutMaxSize if it is - * outside the sane range. Adding a pad byte here if necessary - * simplifies the handling in cdc_ncm_fill_tx_frame, making - * tx_max always represent the real skb max size. - */ - if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && - ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) - ctx->tx_max++; + /* override ethtool_ops */ + dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
return 0;
@@@ -541,10 -794,10 +794,10 @@@ void cdc_ncm_unbind(struct usbnet *dev } EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
- /* Select the MBIM altsetting iff it is preferred and available, - * returning the number of the corresponding data interface altsetting + /* Return the number of the MBIM control interface altsetting iff it + * is preferred and available, */ - u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) + u8 cdc_ncm_select_altsetting(struct usb_interface *intf) { struct usb_host_interface *alt;
@@@ -563,15 -816,15 +816,15 @@@ * the rules given in section 6 (USB Device Model) of this * specification." */ - if (prefer_mbim && intf->num_altsetting == 2) { + if (intf->num_altsetting < 2) + return intf->cur_altsetting->desc.bAlternateSetting; + + if (prefer_mbim) { alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); - if (alt && cdc_ncm_comm_intf_is_mbim(alt) && - !usb_set_interface(dev->udev, - intf->cur_altsetting->desc.bInterfaceNumber, - CDC_NCM_COMM_ALTSETTING_MBIM)) - return CDC_NCM_DATA_ALTSETTING_MBIM; + if (alt && cdc_ncm_comm_intf_is_mbim(alt)) + return CDC_NCM_COMM_ALTSETTING_MBIM; } - return CDC_NCM_DATA_ALTSETTING_NCM; + return CDC_NCM_COMM_ALTSETTING_NCM; } EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
@@@ -580,12 -833,11 +833,11 @@@ static int cdc_ncm_bind(struct usbnet * int ret;
/* MBIM backwards compatible function? */ - cdc_ncm_select_altsetting(dev, intf); - if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) + if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) return -ENODEV;
- /* NCM data altsetting is always 1 */ - ret = cdc_ncm_bind_common(dev, intf, 1); + /* The NCM data altsetting is fixed */ + ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM);
/* * We should get an event when network connection is "connected" or @@@ -628,7 -880,7 +880,7 @@@ static struct usb_cdc_ncm_ndp16 *cdc_nc cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max);
/* verify that there is room for the NDP and the datagram (reserve) */ - if ((ctx->tx_max - skb->len - reserve) < CDC_NCM_NDP_SIZE) + if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size) return NULL;
/* link to it */ @@@ -638,7 -890,7 +890,7 @@@ nth16->wNdpIndex = cpu_to_le16(skb->len);
/* push a new empty NDP */ - ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, CDC_NCM_NDP_SIZE), 0, CDC_NCM_NDP_SIZE); + ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); ndp16->dwSignature = sign; ndp16->wLength = cpu_to_le16(sizeof(struct usb_cdc_ncm_ndp16) + sizeof(struct usb_cdc_ncm_dpe16)); return ndp16; @@@ -683,6 -935,9 +935,9 @@@ cdc_ncm_fill_tx_frame(struct usbnet *de
/* count total number of frames in this NTB */ ctx->tx_curr_frame_num = 0; + + /* recent payload counter for this skb_out */ + ctx->tx_curr_frame_payload = 0; }
for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) { @@@ -720,6 -975,7 +975,7 @@@ ctx->tx_rem_sign = sign; skb = NULL; ready2send = 1; + ctx->tx_reason_ntb_full++; /* count reason for transmitting */ } break; } @@@ -733,12 -989,14 +989,14 @@@ ndp16->dpe16[index].wDatagramIndex = cpu_to_le16(skb_out->len); ndp16->wLength = cpu_to_le16(ndplen + sizeof(struct usb_cdc_ncm_dpe16)); memcpy(skb_put(skb_out, skb->len), skb->data, skb->len); + ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */ dev_kfree_skb_any(skb); skb = NULL;
/* send now if this NDP is full */ if (index >= CDC_NCM_DPT_DATAGRAMS_MAX) { ready2send = 1; + ctx->tx_reason_ndp_full++; /* count reason for transmitting */ break; } } @@@ -758,7 -1016,7 +1016,7 @@@ ctx->tx_curr_skb = skb_out; goto exit_no_skb;
- } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) { + } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) { /* wait for more frames */ /* push variables */ ctx->tx_curr_skb = skb_out; @@@ -768,11 -1026,13 +1026,13 @@@ goto exit_no_skb;
} else { + if (n == ctx->tx_max_datagrams) + ctx->tx_reason_max_datagram++; /* count reason for transmitting */ /* frame goes out */ /* variables will be reset at next call */ }
- /* If collected data size is less or equal CDC_NCM_MIN_TX_PKT + /* If collected data size is less or equal ctx->min_tx_pkt * bytes, we send buffers as it is. If we get more data, it * would be more efficient for USB HS mobile device with DMA * engine to receive a full size NTB, than canceling DMA @@@ -782,7 -1042,7 +1042,7 @@@ * a ZLP after full sized NTBs. */ if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && - skb_out->len > CDC_NCM_MIN_TX_PKT) + skb_out->len > ctx->min_tx_pkt) memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len); else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) @@@ -795,11 -1055,22 +1055,22 @@@ /* return skb */ ctx->tx_curr_skb = NULL; dev->net->stats.tx_packets += ctx->tx_curr_frame_num; + + /* keep private stats: framing overhead and number of NTBs */ + ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; + ctx->tx_ntbs++; + + /* usbnet has already counted all the framing overhead. + * Adjust the stats so that the tx_bytes counter show real + * payload data instead. + */ + dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload; + return skb_out;
exit_no_skb: - /* Start timer, if there is a remaining skb */ - if (ctx->tx_curr_skb != NULL) + /* Start timer, if there is a remaining non-empty skb */ + if (ctx->tx_curr_skb != NULL && n > 0) cdc_ncm_tx_timeout_start(ctx); return NULL; } @@@ -810,7 -1081,7 +1081,7 @@@ static void cdc_ncm_tx_timeout_start(st /* start timer, if not already started */ if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop))) hrtimer_start(&ctx->tx_timer, - ktime_set(0, CDC_NCM_TIMER_INTERVAL), + ktime_set(0, ctx->timer_interval), HRTIMER_MODE_REL); }
@@@ -835,6 -1106,7 +1106,7 @@@ static void cdc_ncm_txpath_bh(unsigned cdc_ncm_tx_timeout_start(ctx); spin_unlock_bh(&ctx->mtx); } else if (dev->net != NULL) { + ctx->tx_reason_timeout++; /* count reason for transmitting */ spin_unlock_bh(&ctx->mtx); netif_tx_lock_bh(dev->net); usbnet_start_xmit(NULL, dev->net); @@@ -970,6 -1242,7 +1242,7 @@@ int cdc_ncm_rx_fixup(struct usbnet *dev struct usb_cdc_ncm_dpe16 *dpe16; int ndpoffset; int loopcount = 50; /* arbitrary max preventing infinite loop */ + u32 payload = 0;
ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in); if (ndpoffset < 0) @@@ -1022,6 -1295,7 +1295,7 @@@ next_ndp skb->data = ((u8 *)skb_in->data) + offset; skb_set_tail_pointer(skb, len); usbnet_skb_return(dev, skb); + payload += len; /* count payload bytes in this NTB */ } } err_ndp: @@@ -1030,6 -1304,10 +1304,10 @@@ if (ndpoffset && loopcount--) goto next_ndp;
+ /* update stats */ + ctx->rx_overhead += skb_in->len - payload; + ctx->rx_ntbs++; + return 1; error: return 0; @@@ -1049,14 -1327,14 +1327,14 @@@ cdc_ncm_speed_change(struct usbnet *dev */ if ((tx_speed > 1000000) && (rx_speed > 1000000)) { netif_info(dev, link, dev->net, - "%u mbit/s downlink %u mbit/s uplink\n", - (unsigned int)(rx_speed / 1000000U), - (unsigned int)(tx_speed / 1000000U)); + "%u mbit/s downlink %u mbit/s uplink\n", + (unsigned int)(rx_speed / 1000000U), + (unsigned int)(tx_speed / 1000000U)); } else { netif_info(dev, link, dev->net, - "%u kbit/s downlink %u kbit/s uplink\n", - (unsigned int)(rx_speed / 1000U), - (unsigned int)(tx_speed / 1000U)); + "%u kbit/s downlink %u kbit/s uplink\n", + (unsigned int)(rx_speed / 1000U), + (unsigned int)(tx_speed / 1000U)); } }
@@@ -1086,11 -1364,10 +1364,10 @@@ static void cdc_ncm_status(struct usbne * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. */ - ctx->connected = le16_to_cpu(event->wValue); netif_info(dev, link, dev->net, "network connection: %sconnected\n", - ctx->connected ? "" : "dis"); - usbnet_link_change(dev, ctx->connected, 0); + !!event->wValue ? "" : "dis"); + usbnet_link_change(dev, !!event->wValue, 0); break;
case USB_CDC_NOTIFY_SPEED_CHANGE: @@@ -1110,23 -1387,11 +1387,11 @@@ } }
- static int cdc_ncm_check_connect(struct usbnet *dev) - { - struct cdc_ncm_ctx *ctx; - - ctx = (struct cdc_ncm_ctx *)dev->data[0]; - if (ctx == NULL) - return 1; /* disconnected */ - - return !ctx->connected; - } - static const struct driver_info cdc_ncm_info = { .description = "CDC NCM", .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, - .check_connect = cdc_ncm_check_connect, .manage_power = usbnet_manage_power, .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, @@@ -1140,7 -1405,6 +1405,6 @@@ static const struct driver_info wwan_in | FLAG_WWAN, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, - .check_connect = cdc_ncm_check_connect, .manage_power = usbnet_manage_power, .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, @@@ -1154,7 -1418,6 +1418,6 @@@ static const struct driver_info wwan_no | FLAG_WWAN | FLAG_NOARP, .bind = cdc_ncm_bind, .unbind = cdc_ncm_unbind, - .check_connect = cdc_ncm_check_connect, .manage_power = usbnet_manage_power, .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, diff --combined drivers/net/wireless/iwlwifi/mvm/coex.c index 0489314,8f4b03d..4284672 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@@ -104,11 -104,8 +104,8 @@@ static const u8 iwl_bt_prio_tbl[BT_COEX #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD (-65) #define BT_ANTENNA_COUPLING_THRESHOLD (30)
- int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) + static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm) { - if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX)) - return 0; - return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC, sizeof(struct iwl_bt_coex_prio_tbl_cmd), &iwl_bt_prio_tbl); @@@ -573,8 -570,9 +570,9 @@@ int iwl_send_bt_init_conf(struct iwl_mv int ret; u32 flags;
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX)) - return 0; + ret = iwl_send_bt_prio_tbl(mvm); + if (ret) + return ret;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL); if (!bt_cmd) @@@ -582,10 -580,12 +580,12 @@@ cmd.data[0] = bt_cmd;
bt_cmd->max_kill = 5; - bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD, - bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling, - bt_cmd->bt4_tx_tx_delta_freq_thr = 15, - bt_cmd->bt4_tx_rx_max_freq0 = 15, + bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD; + bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling; + bt_cmd->bt4_tx_tx_delta_freq_thr = 15; + bt_cmd->bt4_tx_rx_max_freq0 = 15; + bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT; + bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
flags = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; @@@ -611,14 -611,14 +611,14 @@@ bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (IWL_MVM_BT_COEX_CORUNNING) { - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | + BT_VALID_CORUN_LUT_40); bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); }
if (IWL_MVM_BT_COEX_MPLUT) { bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); }
if (mvm->cfg->bt_shared_single_ant) @@@ -1215,6 -1215,17 +1215,17 @@@ bool iwl_mvm_bt_coex_is_mimo_allowed(st return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT; }
+ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, + enum ieee80211_band band) + { + u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading); + + if (band != IEEE80211_BAND_2GHZ) + return false; + + return bt_activity >= BT_LOW_TRAFFIC; + } + u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac) { @@@ -1249,9 -1260,6 +1260,6 @@@
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { - if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX)) - return; - iwl_mvm_bt_coex_notif_handle(mvm); }
diff --combined drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index d73a89e,6174c02..6959fda --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@@ -169,8 -169,12 +169,12 @@@ enum iwl_scan_type SCAN_TYPE_DISCOVERY_FORCED = 6, }; /* SCAN_ACTIVITY_TYPE_E_VER_1 */
- /* Maximal number of channels to scan */ - #define MAX_NUM_SCAN_CHANNELS 0x24 + /** + * Maximal number of channels to scan + * it should be equal to: + * max(IWL_NUM_CHANNELS, IWL_NUM_CHANNELS_FAMILY_8000) + */ + #define MAX_NUM_SCAN_CHANNELS 50
/** * struct iwl_scan_cmd - scan request command @@@ -183,9 -187,9 +187,9 @@@ * this number of packets were received (typically 1) * @passive2active: is auto switching from passive to active during scan allowed * @rxchain_sel_flags: RXON_RX_CHAIN_* - * @max_out_time: in usecs, max out of serving channel time + * @max_out_time: in TUs, max out of serving channel time * @suspend_time: how long to pause scan when returning to service channel: - * bits 0-19: beacon interal in usecs (suspend before executing) + * bits 0-19: beacon interal in TUs (suspend before executing) * bits 20-23: reserved * bits 24-31: number of beacons (suspend between channels) * @rxon_flags: RXON_FLG_* @@@ -383,8 -387,8 +387,8 @@@ enum scan_framework_client * @quiet_plcp_th: quiet channel num of packets threshold * @good_CRC_th: passive to active promotion threshold * @rx_chain: RXON rx chain. - * @max_out_time: max uSec to be out of assoceated channel - * @suspend_time: pause scan this long when returning to service channel + * @max_out_time: max TUs to be out of assoceated channel + * @suspend_time: pause scan this TUs when returning to service channel * @flags: RXON flags * @filter_flags: RXONfilter * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. @@@ -534,13 -538,16 +538,16 @@@ struct iwl_scan_offload_schedule * * IWL_SCAN_OFFLOAD_FLAG_PASS_ALL: pass all results - no filtering. * IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL: add cached channels to partial scan. - * IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN: use energy based scan before partial scan - * on A band. + * IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE: EBS duration is 100mSec - typical + * beacon period. Finding channel activity in this mode is not guaranteed. + * IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE: EBS duration is 200mSec. + * Assuming beacon period is 100ms finding channel activity is guaranteed. */ enum iwl_scan_offload_flags { IWL_SCAN_OFFLOAD_FLAG_PASS_ALL = BIT(0), IWL_SCAN_OFFLOAD_FLAG_CACHED_CHANNEL = BIT(2), - IWL_SCAN_OFFLOAD_FLAG_ENERGY_SCAN = BIT(3), + IWL_SCAN_OFFLOAD_FLAG_EBS_QUICK_MODE = BIT(5), + IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE = BIT(6), };
/** @@@ -563,17 -570,24 +570,24 @@@ enum iwl_scan_offload_compleate_status IWL_SCAN_OFFLOAD_ABORTED = 2, };
+ enum iwl_scan_ebs_status { + IWL_SCAN_EBS_SUCCESS, + IWL_SCAN_EBS_FAILED, + IWL_SCAN_EBS_CHAN_NOT_FOUND, + }; + /** * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1 * @last_schedule_line: last schedule line executed (fast or regular) * @last_schedule_iteration: last scan iteration executed before scan abort * @status: enum iwl_scan_offload_compleate_status + * @ebs_status: last EBS status, see IWL_SCAN_EBS_* */ struct iwl_scan_offload_complete { u8 last_schedule_line; u8 last_schedule_iteration; u8 status; - u8 reserved; + u8 ebs_status; } __packed;
/** diff --combined drivers/net/wireless/iwlwifi/mvm/mac80211.c index b41dc84,97c3dea..32682ed --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@@ -276,6 -276,7 +276,7 @@@ int iwl_mvm_mac_setup_register(struct i IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_TIMING_BEACON_ONLY | IEEE80211_HW_CONNECTION_MONITOR | + IEEE80211_HW_SUPPORTS_UAPSD | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_SUPPORTS_STATIC_SMPS;
@@@ -285,6 -286,8 +286,8 @@@ IEEE80211_RADIOTAP_MCS_HAVE_STBC; hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC; hw->rate_control_algorithm = "iwl-mvm-rs"; + hw->uapsd_queues = IWL_UAPSD_AC_INFO; + hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/* * Enable 11w if advertised by firmware and software crypto @@@ -295,11 -298,9 +298,9 @@@ !iwlwifi_mod_params.sw_crypto) hw->flags |= IEEE80211_HW_MFP_CAPABLE;
- if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) { - hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD; - hw->uapsd_queues = IWL_UAPSD_AC_INFO; - hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; - } + /* Disable uAPSD due to firmware issues */ + if (true) + hw->flags &= ~IEEE80211_HW_SUPPORTS_UAPSD;
hw->sta_data_size = sizeof(struct iwl_mvm_sta); hw->vif_data_size = sizeof(struct iwl_mvm_vif); @@@ -309,11 -310,8 +310,8 @@@ BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_DEVICE); - - /* IBSS has bugs in older versions */ - if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8) - hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | @@@ -365,14 -363,11 +363,11 @@@ else hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) { - hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; - hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; - hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; - /* we create the 802.11 header and zero length SSID IE. */ - hw->wiphy->max_sched_scan_ie_len = - SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; - } + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; + hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; + hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; + /* we create the 802.11 header and zero length SSID IE. */ + hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_P2P_GO_OPPPS; @@@ -386,7 -381,11 +381,11 @@@ }
#ifdef CONFIG_PM_SLEEP - if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && + if (iwl_mvm_is_d0i3_supported(mvm) && + device_can_wakeup(mvm->trans->dev)) { + mvm->wowlan.flags = WIPHY_WOWLAN_ANY; + hw->wiphy->wowlan = &mvm->wowlan; + } else if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len && mvm->trans->ops->d3_suspend && mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { @@@ -827,8 -826,7 +826,7 @@@ static int iwl_mvm_mac_add_interface(st goto out_remove_mac;
if (!mvm->bf_allowed_vif && - vif->type == NL80211_IFTYPE_STATION && !vif->p2p && - mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){ + vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { mvm->bf_allowed_vif = mvmvif; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_CQM_RSSI; @@@ -1007,7 -1005,7 +1005,7 @@@ static void iwl_mvm_mc_iface_iterator(v memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
- ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } @@@ -1023,7 -1021,7 +1021,7 @@@ static void iwl_mvm_recalc_multicast(st if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return;
- ieee80211_iterate_active_interfaces( + ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); } @@@ -1223,6 -1221,10 +1221,10 @@@ static int iwl_mvm_configure_bcast_filt if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) return 0;
+ /* bcast filtering isn't supported for P2P client */ + if (vif->p2p) + return 0; + if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) return 0;
@@@ -1697,6 -1699,11 +1699,11 @@@ static int iwl_mvm_mac_sta_state(struc ret = iwl_mvm_add_sta(mvm, vif, sta); } else if (old_state == IEEE80211_STA_NONE && new_state == IEEE80211_STA_AUTH) { + /* + * EBS may be disabled due to previous failures reported by FW. + * Reset EBS status here assuming environment has been changed. + */ + mvm->last_ebs_successful = true; ret = 0; } else if (old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC) { @@@ -1807,11 -1814,6 +1814,11 @@@ static int iwl_mvm_mac_sched_scan_start
mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_is_idle(mvm)) { + ret = -EBUSY; + goto out; + } + switch (mvm->scan_status) { case IWL_MVM_SCAN_OS: IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); diff --combined drivers/net/wireless/iwlwifi/mvm/mvm.h index f1ec098,17c42da..107d864 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@@ -164,7 -164,6 +164,6 @@@ enum iwl_dbgfs_pm_mask MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2), MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3), MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4), - MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5), MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8), @@@ -177,7 -176,6 +176,6 @@@ struct iwl_dbgfs_pm u32 tx_data_timeout; bool skip_over_dtim; u8 skip_dtim_periods; - bool disable_power_off; bool lprx_ena; u32 lprx_rssi_threshold; bool snooze_ena; @@@ -232,6 -230,7 +230,7 @@@ enum iwl_mvm_ref_type IWL_MVM_REF_USER, IWL_MVM_REF_TX, IWL_MVM_REF_TX_AGG, + IWL_MVM_REF_EXIT_WORK,
IWL_MVM_REF_COUNT, }; @@@ -265,6 -264,7 +264,7 @@@ struct iwl_mvm_vif_bf_data * @uploaded: indicates the MAC context has been added to the device * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface * should get quota etc. + * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. * @low_latency: indicates that this interface is in low-latency mode @@@ -283,6 -283,7 +283,7 @@@ struct iwl_mvm_vif
bool uploaded; bool ap_ibss_active; + bool pm_enabled; bool monitor_active; bool low_latency; struct iwl_mvm_vif_bf_data bf_data; @@@ -451,6 -452,11 +452,11 @@@ struct iwl_mvm_frame_stats int last_frame_idx; };
+ enum { + D0I3_DEFER_WAKEUP, + D0I3_PENDING_WAKEUP, + }; + struct iwl_mvm { /* for logger access */ struct device *dev; @@@ -535,6 -541,8 +541,8 @@@ /* Internal station */ struct iwl_mvm_int_sta aux_sta;
+ bool last_ebs_successful; + u8 scan_last_antenna_idx; /* to toggle TX between antennas */ u8 mgmt_last_antenna_idx;
@@@ -578,6 -586,8 +586,8 @@@ void *fw_error_dump; void *fw_error_sram; u32 fw_error_sram_len; + u32 *fw_error_rxf; + u32 fw_error_rxf_len;
struct led_classdev led;
@@@ -601,6 -611,9 +611,9 @@@ bool d0i3_offloading; struct work_struct d0i3_exit_work; struct sk_buff_head d0i3_tx; + /* protect d0i3_suspend_flags */ + struct mutex d0i3_suspend_mutex; + unsigned long d0i3_suspend_flags; /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ spinlock_t d0i3_tx_lock; wait_queue_head_t d0i3_exit_waitq; @@@ -629,8 -642,6 +642,6 @@@
/* Indicate if device power save is allowed */ bool ps_disabled; - /* Indicate if device power management is allowed */ - bool pm_disabled; };
/* Extract MVM priv from op_mode and _hw */ @@@ -705,6 -716,7 +716,7 @@@ void iwl_mvm_dump_nic_error_log(struct #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm); + void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm); #endif u8 first_antenna(u8 mask); u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx); @@@ -874,8 -886,6 +886,6 @@@ void iwl_mvm_update_frame_stats(struct int rs_pretty_print_rate(char *buf, const u32 rate);
/* power management */ - int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm); - int iwl_mvm_power_update_device(struct iwl_mvm *mvm); int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@@ -922,9 -932,9 +932,9 @@@ int iwl_mvm_send_proto_offload(struct i void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type); void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq); + int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
/* BT Coex */ - int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm); int iwl_send_bt_init_conf(struct iwl_mvm *mvm); int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, @@@ -936,6 -946,8 +946,8 @@@ u16 iwl_mvm_coex_agg_time_limit(struct struct ieee80211_sta *sta); bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct ieee80211_sta *sta); + bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm, + enum ieee80211_band band); u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, struct ieee80211_tx_info *info, u8 ac); int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable); @@@ -1003,9 -1015,6 +1015,9 @@@ static inline bool iwl_mvm_vif_low_late return mvmvif->low_latency; }
+/* Assoc status */ +bool iwl_mvm_is_idle(struct iwl_mvm *mvm); + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); diff --combined drivers/net/wireless/iwlwifi/mvm/rs.c index e1c8388,d44b2b3..857ddaf --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@@ -527,6 -527,9 +527,9 @@@ static void rs_rate_scale_clear_tbl_win IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); for (i = 0; i < IWL_RATE_COUNT; i++) rs_rate_scale_clear_window(&tbl->win[i]); + + for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++) + rs_rate_scale_clear_window(&tbl->tpc_win[i]); }
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type) @@@ -656,17 -659,34 +659,34 @@@ static int _rs_collect_tx_data(struct i return 0; }
- static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl, - int scale_index, int attempts, int successes) + static int rs_collect_tx_data(struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes, + u8 reduced_txp) { struct iwl_rate_scale_data *window = NULL; + int ret;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) return -EINVAL;
+ if (tbl->column != RS_COLUMN_INVALID) { + lq_sta->tx_stats[tbl->column][scale_index].total += attempts; + lq_sta->tx_stats[tbl->column][scale_index].success += successes; + } + /* Select window for current tx bit rate */ window = &(tbl->win[scale_index]);
+ ret = _rs_collect_tx_data(tbl, scale_index, attempts, successes, + window); + if (ret) + return ret; + + if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION)) + return -EINVAL; + + window = &tbl->tpc_win[reduced_txp]; return _rs_collect_tx_data(tbl, scale_index, attempts, successes, window); } @@@ -1000,6 -1020,7 +1020,7 @@@ static void rs_tx_status(void *mvm_r, s u32 ucode_rate; struct rs_rate rate; struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; + u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
/* Treat uninitialized rate scaling data same as non-existing. */ if (!lq_sta) { @@@ -1010,7 -1031,7 +1031,7 @@@ return; }
-#ifdef CPTCFG_MAC80211_DEBUGFS +#ifdef CONFIG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate */ if (lq_sta->dbg_fixed_rate) { IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); @@@ -1141,9 -1162,10 +1162,10 @@@ if (info->flags & IEEE80211_TX_STAT_AMPDU) { ucode_rate = le32_to_cpu(table->rs_table[0]); rs_rate_from_ucode_rate(ucode_rate, info->band, &rate); - rs_collect_tx_data(curr_tbl, rate.index, + rs_collect_tx_data(lq_sta, curr_tbl, rate.index, info->status.ampdu_len, - info->status.ampdu_ack_len); + info->status.ampdu_ack_len, + reduced_txp);
/* Update success/fail counts if not searching for new mode */ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { @@@ -1176,8 -1198,9 +1198,9 @@@ else continue;
- rs_collect_tx_data(tmp_tbl, rate.index, 1, - i < retries ? 0 : legacy_success); + rs_collect_tx_data(lq_sta, tmp_tbl, rate.index, 1, + i < retries ? 0 : legacy_success, + reduced_txp); }
/* Update success/fail counts if not searching for new mode */ @@@ -1188,6 -1211,7 +1211,7 @@@ } /* The last TX rate is cached in lq_sta; it's set in if/else above */ lq_sta->last_rate_n_flags = ucode_rate; + IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp); done: /* See if there's a better rate or modulation mode to try. */ if (sta && sta->supp_rates[sband->band]) @@@ -1769,6 -1793,198 +1793,198 @@@ out return action; }
+ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index, + int *weaker, int *stronger) + { + *weaker = index + TPC_TX_POWER_STEP; + if (*weaker > TPC_MAX_REDUCTION) + *weaker = TPC_INVALID; + + *stronger = index - TPC_TX_POWER_STEP; + if (*stronger < 0) + *stronger = TPC_INVALID; + } + + static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct rs_rate *rate, + enum ieee80211_band band) + { + int index = rate->index; + + /* + * allow tpc only if power management is enabled, or bt coex + * activity grade allows it and we are on 2.4Ghz. + */ + if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM && + !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band)) + return false; + + IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type); + if (is_legacy(rate)) + return index == IWL_RATE_54M_INDEX; + if (is_ht(rate)) + return index == IWL_RATE_MCS_7_INDEX; + if (is_vht(rate)) + return index == IWL_RATE_MCS_7_INDEX || + index == IWL_RATE_MCS_8_INDEX || + index == IWL_RATE_MCS_9_INDEX; + + WARN_ON_ONCE(1); + return false; + } + + enum tpc_action { + TPC_ACTION_STAY, + TPC_ACTION_DECREASE, + TPC_ACTION_INCREASE, + TPC_ACTION_NO_RESTIRCTION, + }; + + static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm, + s32 sr, int weak, int strong, + int current_tpt, + int weak_tpt, int strong_tpt) + { + /* stay until we have valid tpt */ + if (current_tpt == IWL_INVALID_VALUE) { + IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n"); + return TPC_ACTION_STAY; + } + + /* Too many failures, increase txp */ + if (sr <= TPC_SR_FORCE_INCREASE || current_tpt == 0) { + IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n"); + return TPC_ACTION_NO_RESTIRCTION; + } + + /* try decreasing first if applicable */ + if (weak != TPC_INVALID) { + if (weak_tpt == IWL_INVALID_VALUE && + (strong_tpt == IWL_INVALID_VALUE || + current_tpt >= strong_tpt)) { + IWL_DEBUG_RATE(mvm, + "no weak txp measurement. decrease txp\n"); + return TPC_ACTION_DECREASE; + } + + if (weak_tpt > current_tpt) { + IWL_DEBUG_RATE(mvm, + "lower txp has better tpt. decrease txp\n"); + return TPC_ACTION_DECREASE; + } + } + + /* next, increase if needed */ + if (sr < TPC_SR_NO_INCREASE && strong != TPC_INVALID) { + if (weak_tpt == IWL_INVALID_VALUE && + strong_tpt != IWL_INVALID_VALUE && + current_tpt < strong_tpt) { + IWL_DEBUG_RATE(mvm, + "higher txp has better tpt. increase txp\n"); + return TPC_ACTION_INCREASE; + } + + if (weak_tpt < current_tpt && + (strong_tpt == IWL_INVALID_VALUE || + strong_tpt > current_tpt)) { + IWL_DEBUG_RATE(mvm, + "lower txp has worse tpt. increase txp\n"); + return TPC_ACTION_INCREASE; + } + } + + IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n"); + return TPC_ACTION_STAY; + } + + static bool rs_tpc_perform(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl) + { + struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv; + struct ieee80211_vif *vif = mvm_sta->vif; + struct ieee80211_chanctx_conf *chanctx_conf; + enum ieee80211_band band; + struct iwl_rate_scale_data *window; + struct rs_rate *rate = &tbl->rate; + enum tpc_action action; + s32 sr; + u8 cur = lq_sta->lq.reduced_tpc; + int current_tpt; + int weak, strong; + int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE; + + #ifdef CONFIG_MAC80211_DEBUGFS + if (lq_sta->dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) { + IWL_DEBUG_RATE(mvm, "fixed tpc: %d", + lq_sta->dbg_fixed_txp_reduction); + lq_sta->lq.reduced_tpc = lq_sta->dbg_fixed_txp_reduction; + return cur != lq_sta->dbg_fixed_txp_reduction; + } + #endif + + rcu_read_lock(); + chanctx_conf = rcu_dereference(vif->chanctx_conf); + if (WARN_ON(!chanctx_conf)) + band = IEEE80211_NUM_BANDS; + else + band = chanctx_conf->def.chan->band; + rcu_read_unlock(); + + if (!rs_tpc_allowed(mvm, rate, band)) { + IWL_DEBUG_RATE(mvm, + "tpc is not allowed. remove txp restrictions"); + lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION; + return cur != TPC_NO_REDUCTION; + } + + rs_get_adjacent_txp(mvm, cur, &weak, &strong); + + /* Collect measured throughputs for current and adjacent rates */ + window = tbl->tpc_win; + sr = window[cur].success_ratio; + current_tpt = window[cur].average_tpt; + if (weak != TPC_INVALID) + weak_tpt = window[weak].average_tpt; + if (strong != TPC_INVALID) + strong_tpt = window[strong].average_tpt; + + IWL_DEBUG_RATE(mvm, + "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n", + cur, current_tpt, sr, weak, strong, + weak_tpt, strong_tpt); + + action = rs_get_tpc_action(mvm, sr, weak, strong, + current_tpt, weak_tpt, strong_tpt); + + /* override actions if we are on the edge */ + if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) { + IWL_DEBUG_RATE(mvm, "already in lowest txp, stay"); + action = TPC_ACTION_STAY; + } else if (strong == TPC_INVALID && + (action == TPC_ACTION_INCREASE || + action == TPC_ACTION_NO_RESTIRCTION)) { + IWL_DEBUG_RATE(mvm, "already in highest txp, stay"); + action = TPC_ACTION_STAY; + } + + switch (action) { + case TPC_ACTION_DECREASE: + lq_sta->lq.reduced_tpc = weak; + return true; + case TPC_ACTION_INCREASE: + lq_sta->lq.reduced_tpc = strong; + return true; + case TPC_ACTION_NO_RESTIRCTION: + lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION; + return true; + case TPC_ACTION_STAY: + /* do nothing */ + break; + } + return false; + } + /* * Do rate scaling and search for new modulation mode. */ @@@ -2019,6 -2235,8 +2235,8 @@@ static void rs_rate_scale_perform(struc break; case RS_ACTION_STAY: /* No change */ + update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl); + break; default: break; } @@@ -2478,6 -2696,7 +2696,7 @@@ void iwl_mvm_rs_rate_init(struct iwl_mv lq_sta->is_agg = 0; #ifdef CONFIG_MAC80211_DEBUGFS lq_sta->dbg_fixed_rate = 0; + lq_sta->dbg_fixed_txp_reduction = TPC_INVALID; #endif #ifdef CONFIG_IWLWIFI_DEBUGFS iwl_mvm_reset_frame_stats(mvm, &mvm->drv_rx_stats); @@@ -2653,6 -2872,7 +2872,7 @@@ static void rs_fill_lq_cmd(struct iwl_m rs_build_rates_table_from_fixed(mvm, lq_cmd, lq_sta->band, lq_sta->dbg_fixed_rate); + lq_cmd->reduced_tpc = 0; ant = (lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; } else @@@ -2783,7 -3003,6 +3003,6 @@@ static ssize_t rs_sta_dbgfs_scale_table size_t buf_size; u32 parsed_rate;
- mvm = lq_sta->drv; memset(buf, 0, sizeof(buf)); buf_size = min(count, sizeof(buf) - 1); @@@ -2856,6 -3075,7 +3075,7 @@@ static ssize_t rs_sta_dbgfs_scale_table lq_sta->lq.agg_disable_start_th, lq_sta->lq.agg_frame_cnt_limit);
+ desc += sprintf(buff+desc, "reduced tpc=%d\n", lq_sta->lq.reduced_tpc); desc += sprintf(buff+desc, "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", lq_sta->lq.initial_rate_index[0], @@@ -2928,6 -3148,94 +3148,94 @@@ static const struct file_operations rs_ .llseek = default_llseek, };
+ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) + { + static const char * const column_name[] = { + [RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A", + [RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B", + [RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A", + [RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B", + [RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI", + [RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI", + [RS_COLUMN_MIMO2] = "MIMO2", + [RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI", + }; + + static const char * const rate_name[] = { + [IWL_RATE_1M_INDEX] = "1M", + [IWL_RATE_2M_INDEX] = "2M", + [IWL_RATE_5M_INDEX] = "5.5M", + [IWL_RATE_11M_INDEX] = "11M", + [IWL_RATE_6M_INDEX] = "6M|MCS0", + [IWL_RATE_9M_INDEX] = "9M", + [IWL_RATE_12M_INDEX] = "12M|MCS1", + [IWL_RATE_18M_INDEX] = "18M|MCS2", + [IWL_RATE_24M_INDEX] = "24M|MCS3", + [IWL_RATE_36M_INDEX] = "36M|MCS4", + [IWL_RATE_48M_INDEX] = "48M|MCS5", + [IWL_RATE_54M_INDEX] = "54M|MCS6", + [IWL_RATE_MCS_7_INDEX] = "MCS7", + [IWL_RATE_MCS_8_INDEX] = "MCS8", + [IWL_RATE_MCS_9_INDEX] = "MCS9", + }; + + char *buff, *pos, *endpos; + int col, rate; + ssize_t ret; + struct iwl_lq_sta *lq_sta = file->private_data; + struct rs_rate_stats *stats; + static const size_t bufsz = 1024; + + buff = kmalloc(bufsz, GFP_KERNEL); + if (!buff) + return -ENOMEM; + + pos = buff; + endpos = pos + bufsz; + + pos += scnprintf(pos, endpos - pos, "COLUMN,"); + for (rate = 0; rate < IWL_RATE_COUNT; rate++) + pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]); + pos += scnprintf(pos, endpos - pos, "\n"); + + for (col = 0; col < RS_COLUMN_COUNT; col++) { + pos += scnprintf(pos, endpos - pos, + "%s,", column_name[col]); + + for (rate = 0; rate < IWL_RATE_COUNT; rate++) { + stats = &(lq_sta->tx_stats[col][rate]); + pos += scnprintf(pos, endpos - pos, + "%llu/%llu,", + stats->success, + stats->total); + } + pos += scnprintf(pos, endpos - pos, "\n"); + } + + ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff); + kfree(buff); + return ret; + } + + static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) + { + struct iwl_lq_sta *lq_sta = file->private_data; + memset(lq_sta->tx_stats, 0, sizeof(lq_sta->tx_stats)); + + return count; + } + + static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = { + .read = rs_sta_dbgfs_drv_tx_stats_read, + .write = rs_sta_dbgfs_drv_tx_stats_write, + .open = simple_open, + .llseek = default_llseek, + }; + static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) { struct iwl_lq_sta *lq_sta = mvm_sta; @@@ -2937,9 -3245,15 +3245,15 @@@ lq_sta->rs_sta_dbgfs_stats_table_file = debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta, &rs_sta_dbgfs_stats_table_ops); + lq_sta->rs_sta_dbgfs_drv_tx_stats_file = + debugfs_create_file("drv_tx_stats", S_IRUSR | S_IWUSR, dir, + lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops); lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, &lq_sta->tx_agg_tid_en); + lq_sta->rs_sta_dbgfs_reduced_txp_file = + debugfs_create_u8("reduced_tpc", S_IRUSR | S_IWUSR, dir, + &lq_sta->dbg_fixed_txp_reduction); }
static void rs_remove_debugfs(void *mvm, void *mvm_sta) @@@ -2947,7 -3261,9 +3261,9 @@@ struct iwl_lq_sta *lq_sta = mvm_sta; debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_drv_tx_stats_file); debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); + debugfs_remove(lq_sta->rs_sta_dbgfs_reduced_txp_file); } #endif
diff --combined drivers/net/wireless/iwlwifi/mvm/scan.c index c28de54,63e7b16..36ae01a --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@@ -277,22 -277,51 +277,22 @@@ static void iwl_mvm_scan_calc_params(st IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_condition_iterator, &global_bound); - /* - * Under low latency traffic passive scan is fragmented meaning - * that dwell on a particular channel will be fragmented. Each fragment - * dwell time is 20ms and fragments period is 105ms. Skipping to next - * channel will be delayed by the same period - 105ms. So suspend_time - * parameter describing both fragments and channels skipping periods is - * set to 105ms. This value is chosen so that overall passive scan - * duration will not be too long. Max_out_time in this case is set to - * 70ms, so for active scanning operating channel will be left for 70ms - * while for passive still for 20ms (fragment dwell). - */ - if (global_bound) { - if (!iwl_mvm_low_latency(mvm)) { - params->suspend_time = ieee80211_tu_to_usec(100); - params->max_out_time = ieee80211_tu_to_usec(600); - } else { - params->suspend_time = ieee80211_tu_to_usec(105); - /* P2P doesn't support fragmented passive scan, so - * configure max_out_time to be at least longest dwell - * time for passive scan. - */ - if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { - params->max_out_time = ieee80211_tu_to_usec(70); - params->passive_fragmented = true; - } else { - u32 passive_dwell;
- /* - * Use band G so that passive channel dwell time - * will be assigned with maximum value. - */ - band = IEEE80211_BAND_2GHZ; - passive_dwell = iwl_mvm_get_passive_dwell(band); - params->max_out_time = - ieee80211_tu_to_usec(passive_dwell); - } - } + if (!global_bound) + goto not_bound; + + params->suspend_time = 100; + params->max_out_time = 600; + + if (iwl_mvm_low_latency(mvm)) { + params->suspend_time = 250; + params->max_out_time = 250; }
+not_bound: + for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { - if (params->passive_fragmented) - params->dwell[band].passive = 20; - else - params->dwell[band].passive = - iwl_mvm_get_passive_dwell(band); + params->dwell[band].passive = iwl_mvm_get_passive_dwell(band); params->dwell[band].active = iwl_mvm_get_active_dwell(band, n_ssids); } @@@ -319,7 -348,10 +319,10 @@@ int iwl_mvm_scan_request(struct iwl_mv struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex); - BUG_ON(mvm->scan_cmd == NULL); + + /* we should have failed registration if scan_cmd was NULL */ + if (WARN_ON(mvm->scan_cmd == NULL)) + return -ENOMEM;
IWL_DEBUG_SCAN(mvm, "Handling mac80211 scan request\n"); mvm->scan_status = IWL_MVM_SCAN_OS; @@@ -538,9 -570,13 +541,13 @@@ int iwl_mvm_rx_scan_offload_complete_no /* scan status must be locked for proper checking */ lockdep_assert_held(&mvm->mutex);
- IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n", + IWL_DEBUG_SCAN(mvm, + "Scheduled scan completed, status %s EBS status %s:%d\n", scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? - "completed" : "aborted"); + "completed" : "aborted", scan_notif->ebs_status == + IWL_SCAN_EBS_SUCCESS ? "success" : "failed", + scan_notif->ebs_status); +
/* only call mac80211 completion if the stop was initiated by FW */ if (mvm->scan_status == IWL_MVM_SCAN_SCHED) { @@@ -548,6 -584,8 +555,8 @@@ ieee80211_sched_scan_stopped(mvm->hw); }
+ mvm->last_ebs_successful = !scan_notif->ebs_status; + return 0; }
@@@ -732,7 -770,7 +741,7 @@@ int iwl_mvm_config_sched_scan(struct iw int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; int head = 0; - int tail = band_2ghz + band_5ghz; + int tail = band_2ghz + band_5ghz - 1; u32 ssid_bitmap; int cmd_len; int ret; @@@ -884,6 -922,11 +893,11 @@@ int iwl_mvm_sched_scan_start(struct iwl scan_req.flags |= cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_PASS_ALL); }
+ if (mvm->last_ebs_successful && + mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) + scan_req.flags |= + cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_EBS_ACCURATE_MODE); + return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC, sizeof(scan_req), &scan_req); } diff --combined drivers/net/wireless/iwlwifi/mvm/utils.c index 2180902,c5f4532..eb2ca64 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@@ -64,6 -64,7 +64,7 @@@
#include "iwl-debug.h" #include "iwl-io.h" + #include "iwl-prph.h"
#include "mvm.h" #include "fw-api-rs.h" @@@ -469,6 -470,8 +470,8 @@@ void iwl_mvm_dump_nic_error_log(struct mvm->status, table.valid); }
+ /* Do not change this output - scripts rely on it */ + IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, @@@ -522,7 -525,7 +525,7 @@@ void iwl_mvm_fw_error_sram_dump(struct u32 ofs, sram_len; void *sram;
- if (!mvm->ucode_loaded || mvm->fw_error_sram) + if (!mvm->ucode_loaded || mvm->fw_error_sram || mvm->fw_error_dump) return;
img = &mvm->fw->img[mvm->cur_ucode]; @@@ -538,6 -541,47 +541,47 @@@ mvm->fw_error_sram_len = sram_len; }
+ void iwl_mvm_fw_error_rxf_dump(struct iwl_mvm *mvm) + { + int i, reg_val; + unsigned long flags; + + if (!mvm->ucode_loaded || mvm->fw_error_rxf || mvm->fw_error_dump) + return; + + /* reading buffer size */ + reg_val = iwl_trans_read_prph(mvm->trans, RXF_SIZE_ADDR); + mvm->fw_error_rxf_len = + (reg_val & RXF_SIZE_BYTE_CNT_MSK) >> RXF_SIZE_BYTE_CND_POS; + + /* the register holds the value divided by 128 */ + mvm->fw_error_rxf_len = mvm->fw_error_rxf_len << 7; + + if (!mvm->fw_error_rxf_len) + return; + + mvm->fw_error_rxf = kzalloc(mvm->fw_error_rxf_len, GFP_ATOMIC); + if (!mvm->fw_error_rxf) { + mvm->fw_error_rxf_len = 0; + return; + } + + if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags)) { + kfree(mvm->fw_error_rxf); + mvm->fw_error_rxf = NULL; + mvm->fw_error_rxf_len = 0; + return; + } + + for (i = 0; i < (mvm->fw_error_rxf_len / sizeof(u32)); i++) { + iwl_trans_write_prph(mvm->trans, RXF_LD_FENCE_OFFSET_ADDR, + i * sizeof(u32)); + mvm->fw_error_rxf[i] = + iwl_trans_read_prph(mvm->trans, RXF_FIFO_RD_FENCE_ADDR); + } + iwl_trans_release_nic_access(mvm->trans, &flags); + } + /** * iwl_mvm_send_lq_cmd() - Send link quality command * @init: This command is sent as part of station initialization right @@@ -644,22 -688,3 +688,22 @@@ bool iwl_mvm_low_latency(struct iwl_mv
return result; } + +static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + bool *idle = _data; + + if (!vif->bss_conf.idle) + *idle = false; +} + +bool iwl_mvm_is_idle(struct iwl_mvm *mvm) +{ + bool idle = true; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_idle_iter, &idle); + + return idle; +} diff --combined drivers/net/wireless/iwlwifi/pcie/trans.c index 2365553,f98ef1e..c76b148 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@@ -103,7 -103,6 +103,6 @@@ static void iwl_pcie_set_pwr(struct iwl
/* PCI registers */ #define PCI_CFG_RETRY_TIMEOUT 0x041 - #define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
static void iwl_pcie_apm_config(struct iwl_trans *trans) { @@@ -1053,6 -1052,12 +1052,12 @@@ static void iwl_trans_pcie_write_prph(s iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); }
+ static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) + { + WARN_ON(1); + return 0; + } + static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { @@@ -1079,6 -1084,18 +1084,18 @@@
trans_pcie->command_names = trans_cfg->command_names; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; + + /* Initialize NAPI here - it should be before registering to mac80211 + * in the opmode but after the HW struct is allocated. + * As this function may be called again in some corner cases don't + * do anything if NAPI was already initialized. + */ + if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) { + init_dummy_netdev(&trans_pcie->napi_dev); + iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi, + &trans_pcie->napi_dev, + iwl_pcie_dummy_napi_poll, 64); + } }
void iwl_trans_pcie_free(struct iwl_trans *trans) @@@ -1099,6 -1116,9 +1116,9 @@@ pci_disable_device(trans_pcie->pci_dev); kmem_cache_destroy(trans->dev_cmd_pool);
+ if (trans_pcie->napi.poll) + netif_napi_del(&trans_pcie->napi); + kfree(trans); }
@@@ -1237,7 -1257,7 +1257,7 @@@ static int iwl_trans_pcie_write_mem(str
#define IWL_FLUSH_WAIT_MS 2000
- static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) + static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; @@@ -1250,13 -1270,31 +1270,31 @@@
/* waiting for all the tx frames complete might take a while */ for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + u8 wr_ptr; + if (cnt == trans_pcie->cmd_queue) continue; + if (!test_bit(cnt, trans_pcie->queue_used)) + continue; + if (!(BIT(cnt) & txq_bm)) + continue; + + IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt); txq = &trans_pcie->txq[cnt]; q = &txq->q; - while (q->read_ptr != q->write_ptr && !time_after(jiffies, - now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) + wr_ptr = ACCESS_ONCE(q->write_ptr); + + while (q->read_ptr != ACCESS_ONCE(q->write_ptr) && + !time_after(jiffies, + now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { + u8 write_ptr = ACCESS_ONCE(q->write_ptr); + + if (WARN_ONCE(wr_ptr != write_ptr, + "WR pointer moved while flushing %d -> %d\n", + wr_ptr, write_ptr)) + return -ETIMEDOUT; msleep(1); + }
if (q->read_ptr != q->write_ptr) { IWL_ERR(trans, @@@ -1264,6 -1302,7 +1302,7 @@@ ret = -ETIMEDOUT; break; } + IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt); }
if (!ret) @@@ -1749,10 -1788,6 +1788,10 @@@ struct iwl_trans *iwl_trans_pcie_alloc( * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+ trans->dev = &pdev->dev; + trans_pcie->pci_dev = pdev; + iwl_disable_interrupts(trans); + err = pci_enable_msi(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); @@@ -1764,6 -1799,8 +1803,6 @@@ } }
- trans->dev = &pdev->dev; - trans_pcie->pci_dev = pdev; trans->hw_rev = iwl_read32(trans, CSR_HW_REV); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), @@@ -1789,6 -1826,8 +1828,6 @@@ goto out_pci_disable_msi; }
- trans_pcie->inta_mask = CSR_INI_SET_MASK; - if (iwl_pcie_alloc_ict(trans)) goto out_free_cmd_pool;
@@@ -1800,8 -1839,6 +1839,8 @@@ goto out_free_ict; }
+ trans_pcie->inta_mask = CSR_INI_SET_MASK; + return trans;
out_free_ict: diff --combined drivers/net/xen-netback/interface.c index 20e9def,a755733..53cdcdf --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@@ -75,8 -75,32 +75,8 @@@ static int xenvif_poll(struct napi_stru work_done = xenvif_tx_action(vif, budget);
if (work_done < budget) { - int more_to_do = 0; - unsigned long flags; - - /* It is necessary to disable IRQ before calling - * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might - * lose event from the frontend. - * - * Consider: - * RING_HAS_UNCONSUMED_REQUESTS - * <frontend generates event to trigger napi_schedule> - * __napi_complete - * - * This handler is still in scheduled state so the - * event has no effect at all. After __napi_complete - * this handler is descheduled and cannot get - * scheduled again. We lose event in this case and the ring - * will be completely stalled. - */ - - local_irq_save(flags); - - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); - if (!more_to_do) - __napi_complete(napi); - - local_irq_restore(flags); + napi_complete(napi); + xenvif_napi_schedule_or_enable_events(vif); }
return work_done; @@@ -170,7 -194,7 +170,7 @@@ static void xenvif_up(struct xenvif *vi enable_irq(vif->tx_irq); if (vif->tx_irq != vif->rx_irq) enable_irq(vif->rx_irq); - xenvif_check_rx_xenvif(vif); + xenvif_napi_schedule_or_enable_events(vif); }
static void xenvif_down(struct xenvif *vif) @@@ -362,7 -386,7 +362,7 @@@ struct xenvif *xenvif_alloc(struct devi NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; dev->features = dev->hw_features | NETIF_F_RXCSUM; - SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); + dev->ethtool_ops = &xenvif_ethtool_ops;
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
diff --combined drivers/s390/net/qeth_core_main.c index e89f38c,34993009..7b7a686 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@@ -20,9 -20,9 +20,10 @@@ #include <linux/kthread.h> #include <linux/slab.h> #include <net/iucv/af_iucv.h> + #include <net/dsfield.h>
#include <asm/ebcdic.h> +#include <asm/chpid.h> #include <asm/io.h> #include <asm/sysinfo.h> #include <asm/compat.h> @@@ -1345,7 -1345,16 +1346,7 @@@ static void qeth_set_multiple_write_que static void qeth_update_from_chp_desc(struct qeth_card *card) { struct ccw_device *ccwdev; - struct channelPath_dsc { - u8 flags; - u8 lsn; - u8 desc; - u8 chpid; - u8 swla; - u8 zeroes; - u8 chla; - u8 chpp; - } *chp_dsc; + struct channel_path_desc *chp_dsc;
QETH_DBF_TEXT(SETUP, 2, "chp_desc");
@@@ -3662,42 -3671,56 +3663,56 @@@ void qeth_qdio_output_handler(struct cc } EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
+ /** + * Note: Function assumes that we have 4 outbound queues. + */ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, int ipv, int cast_type) { - if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX)) - return card->qdio.default_out_queue; - switch (card->qdio.no_out_queues) { - case 4: - if (cast_type && card->info.is_multicast_different) - return card->info.is_multicast_different & - (card->qdio.no_out_queues - 1); - if (card->qdio.do_prio_queueing && (ipv == 4)) { - const u8 tos = ip_hdr(skb)->tos; - - if (card->qdio.do_prio_queueing == - QETH_PRIO_Q_ING_TOS) { - if (tos & IP_TOS_NOTIMPORTANT) - return 3; - if (tos & IP_TOS_HIGHRELIABILITY) - return 2; - if (tos & IP_TOS_HIGHTHROUGHPUT) - return 1; - if (tos & IP_TOS_LOWDELAY) - return 0; - } - if (card->qdio.do_prio_queueing == - QETH_PRIO_Q_ING_PREC) - return 3 - (tos >> 6); - } else if (card->qdio.do_prio_queueing && (ipv == 6)) { - /* TODO: IPv6!!! */ + __be16 *tci; + u8 tos; + + if (cast_type && card->info.is_multicast_different) + return card->info.is_multicast_different & + (card->qdio.no_out_queues - 1); + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_TOS: + case QETH_PRIO_Q_ING_PREC: + switch (ipv) { + case 4: + tos = ipv4_get_dsfield(ip_hdr(skb)); + break; + case 6: + tos = ipv6_get_dsfield(ipv6_hdr(skb)); + break; + default: + return card->qdio.default_out_queue; } - return card->qdio.default_out_queue; - case 1: /* fallthrough for single-out-queue 1920-device */ + if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) + return ~tos >> 6 & 3; + if (tos & IPTOS_MINCOST) + return 3; + if (tos & IPTOS_RELIABILITY) + return 2; + if (tos & IPTOS_THROUGHPUT) + return 1; + if (tos & IPTOS_LOWDELAY) + return 0; + break; + case QETH_PRIO_Q_ING_SKB: + if (skb->priority > 5) + return 0; + return ~skb->priority >> 1 & 3; + case QETH_PRIO_Q_ING_VLAN: + tci = &((struct ethhdr *)skb->data)->h_proto; + if (*tci == ETH_P_8021Q) + return ~*(tci + 1) >> (VLAN_PRIO_SHIFT + 1) & 3; + break; default: - return card->qdio.default_out_queue; + break; } + return card->qdio.default_out_queue; } EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
@@@ -5816,7 -5839,7 +5831,7 @@@ static int __init qeth_core_init(void if (rc) goto out_err; qeth_core_root_dev = root_device_register("qeth"); - rc = PTR_RET(qeth_core_root_dev); + rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) goto register_err; qeth_core_header_cache = kmem_cache_create("qeth_hdr", diff --combined include/linux/if_vlan.h index b2acc4a,8c0fb7f..4967916 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@@ -73,7 -73,7 +73,7 @@@ static inline struct vlan_ethhdr *vlan_ /* found in socket.c */ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
-static inline int is_vlan_dev(struct net_device *dev) +static inline bool is_vlan_dev(struct net_device *dev) { return dev->priv_flags & IFF_802_1Q_VLAN; } @@@ -106,7 -106,7 +106,7 @@@ struct vlan_pcpu_stats
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, + extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev); @@@ -159,7 -159,6 +159,7 @@@ struct vlan_dev_priv #ifdef CONFIG_NET_POLL_CONTROLLER struct netpoll *netpoll; #endif + unsigned int nest_level; };
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) @@@ -198,15 -197,9 +198,15 @@@ extern void vlan_vids_del_by_dev(struc const struct net_device *by_dev);
extern bool vlan_uses_dev(const struct net_device *dev); + +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG_ON(!is_vlan_dev(dev)); + return vlan_dev_priv(dev)->nest_level; +} #else static inline struct net_device * - __vlan_find_dev_deep(struct net_device *real_dev, + __vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id) { return NULL; @@@ -270,11 -263,6 +270,11 @@@ static inline bool vlan_uses_dev(const { return false; } +static inline int vlan_get_encap_level(struct net_device *dev) +{ + BUG(); + return 0; +} #endif
static inline bool vlan_hw_offload_capable(netdev_features_t features, @@@ -495,5 -483,4 +495,5 @@@ static inline void vlan_set_encap_proto */ skb->protocol = htons(ETH_P_802_2); } + #endif /* !(_LINUX_IF_VLAN_H_) */ diff --combined include/linux/netdevice.h index b42d07b,2dea98c..a7f9ad0 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@@ -56,9 -56,6 +56,6 @@@ struct device struct phy_device; /* 802.11 specific */ struct wireless_dev; - /* source back-compat hooks */ - #define SET_ETHTOOL_OPS(netdev,ops) \ - ( (netdev)->ethtool_ops = (ops) )
void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@@ -1144,7 -1141,6 +1141,7 @@@ struct net_device_ops netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb, struct net_device *dev, void *priv); + int (*ndo_get_lock_subclass)(struct net_device *dev); };
/** @@@ -2634,6 -2630,7 +2631,7 @@@ int dev_get_phys_port_id(struct net_dev struct netdev_phys_port_id *ppid); int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq); + int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
@@@ -2951,12 -2948,7 +2949,12 @@@ static inline void netif_addr_lock(stru
static inline void netif_addr_lock_nested(struct net_device *dev) { - spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); + int subclass = SINGLE_DEPTH_NESTING; + + if (dev->netdev_ops->ndo_get_lock_subclass) + subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); + + spin_lock_nested(&dev->addr_list_lock, subclass); }
static inline void netif_addr_lock_bh(struct net_device *dev) @@@ -3056,19 -3048,10 +3054,19 @@@ extern int weight_p extern int bpf_jit_enable;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); +struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter); struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter);
/* iterate through upper list, must be called under RCU read lock */ +#define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ + for (iter = &(dev)->adj_list.upper, \ + updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ + updev; \ + updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) + +/* iterate through upper list, must be called under RCU read lock */ #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ for (iter = &(dev)->all_adj_list.upper, \ updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ @@@ -3092,14 -3075,6 +3090,14 @@@ void *netdev_lower_get_next_private_rcu priv; \ priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+void *netdev_lower_get_next(struct net_device *dev, + struct list_head **iter); +#define netdev_for_each_lower_dev(dev, ldev, iter) \ + for (iter = &(dev)->adj_list.lower, \ + ldev = netdev_lower_get_next(dev, &(iter)); \ + ldev; \ + ldev = netdev_lower_get_next(dev, &(iter))) + void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); @@@ -3115,8 -3090,6 +3113,8 @@@ void netdev_upper_dev_unlink(struct net void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); void *netdev_lower_dev_get_private(struct net_device *dev, struct net_device *lower_dev); +int dev_get_nest_level(struct net_device *dev, + bool (*type_check)(struct net_device *dev)); int skb_checksum_help(struct sk_buff *skb); struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); diff --combined include/linux/usb/cdc_ncm.h index 44b38b9,8c5e388..7c9b484 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@@ -52,6 -52,10 +52,10 @@@ #define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */ #define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */
+ /* Initial NTB length */ + #define CDC_NCM_NTB_DEF_SIZE_TX 16384 /* bytes */ + #define CDC_NCM_NTB_DEF_SIZE_RX 16384 /* bytes */ + /* Minimum value for MaxDatagramSize, ch. 6.2.9 */ #define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
@@@ -72,16 -76,9 +76,9 @@@ /* Restart the timer, if amount of datagrams is less than given value */ #define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3 #define CDC_NCM_TIMER_PENDING_CNT 2 - #define CDC_NCM_TIMER_INTERVAL (400UL * NSEC_PER_USEC) - - /* The following macro defines the minimum header space */ - #define CDC_NCM_MIN_HDR_SIZE \ - (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \ - (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) - - #define CDC_NCM_NDP_SIZE \ - (sizeof(struct usb_cdc_ncm_ndp16) + \ - (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16)) + #define CDC_NCM_TIMER_INTERVAL_USEC 400UL + #define CDC_NCM_TIMER_INTERVAL_MIN 5UL -#define CDC_NCM_TIMER_INTERVAL_MAX (15UL * USEC_PER_SEC) ++#define CDC_NCM_TIMER_INTERVAL_MAX (U32_MAX / NSEC_PER_USEC)
#define cdc_ncm_comm_intf_is_mbim(x) ((x)->desc.bInterfaceSubClass == USB_CDC_SUBCLASS_MBIM && \ (x)->desc.bInterfaceProtocol == USB_CDC_PROTO_NONE) @@@ -107,6 -104,9 +104,9 @@@ struct cdc_ncm_ctx spinlock_t mtx; atomic_t stop;
- u64 timer_interval; ++ u32 timer_interval; + u32 max_ndp_size; + u32 tx_timer_pending; u32 tx_curr_frame_num; u32 rx_max; @@@ -118,10 -118,21 +118,21 @@@ u16 tx_ndp_modulus; u16 tx_seq; u16 rx_seq; - u16 connected; + u16 min_tx_pkt; + + /* statistics */ + u32 tx_curr_frame_payload; + u32 tx_reason_ntb_full; + u32 tx_reason_ndp_full; + u32 tx_reason_timeout; + u32 tx_reason_max_datagram; + u64 tx_overhead; + u64 tx_ntbs; + u64 rx_overhead; + u64 rx_ntbs; };
- u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); + u8 cdc_ncm_select_altsetting(struct usb_interface *intf); int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign); diff --combined include/net/ip6_route.h index 216cecc,38e41e4..1d09b46 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@@ -127,7 -127,6 +127,7 @@@ int rt6_dump_route(struct rt6_info *rt void rt6_ifdown(struct net *net, struct net_device *dev); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); +void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
/* @@@ -186,7 -185,7 +186,7 @@@ static inline bool ip6_sk_accept_pmtu(c inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT; }
- static inline bool ip6_sk_local_df(const struct sock *sk) + static inline bool ip6_sk_ignore_df(const struct sock *sk) { return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO || inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; diff --combined include/uapi/linux/audit.h index 1b1efdd,dfa4c86..b21ea45 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@@ -331,17 -331,9 +331,17 @@@ enum #define AUDIT_FAIL_PRINTK 1 #define AUDIT_FAIL_PANIC 2
+/* + * These bits disambiguate different calling conventions that share an + * ELF machine type, bitness, and endianness + */ +#define __AUDIT_ARCH_CONVENTION_MASK 0x30000000 +#define __AUDIT_ARCH_CONVENTION_MIPS64_N32 0x20000000 + /* distinguish syscall tables */ #define __AUDIT_ARCH_64BIT 0x80000000 #define __AUDIT_ARCH_LE 0x40000000 + #define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE) #define AUDIT_ARCH_ARMEB (EM_ARM) @@@ -354,11 -346,7 +354,11 @@@ #define AUDIT_ARCH_MIPS (EM_MIPS) #define AUDIT_ARCH_MIPSEL (EM_MIPS|__AUDIT_ARCH_LE) #define AUDIT_ARCH_MIPS64 (EM_MIPS|__AUDIT_ARCH_64BIT) +#define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\ + __AUDIT_ARCH_CONVENTION_MIPS64_N32) #define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\ + __AUDIT_ARCH_CONVENTION_MIPS64_N32) #define AUDIT_ARCH_OPENRISC (EM_OPENRISC) #define AUDIT_ARCH_PARISC (EM_PARISC) #define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT) @@@ -385,6 -373,14 +385,14 @@@ */ #define AUDIT_MESSAGE_TEXT_MAX 8560
+ /* Multicast Netlink socket groups (default up to 32) */ + enum audit_nlgrps { + AUDIT_NLGRP_NONE, /* Group 0 not used */ + AUDIT_NLGRP_READLOG, /* "best effort" read only socket */ + __AUDIT_NLGRP_MAX + }; + #define AUDIT_NLGRP_MAX (__AUDIT_NLGRP_MAX - 1) + struct audit_status { __u32 mask; /* Bit mask for valid entries */ __u32 enabled; /* 1 = enabled, 0 = disabled */ diff --combined include/uapi/linux/nl80211.h index 194c1ea,406010d..9922b9b --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@@ -1579,6 -1579,10 +1579,10 @@@ enum nl80211_commands * @NL80211_ATTR_TDLS_PEER_CAPABILITY: flags for TDLS peer capabilities, u32. * As specified in the &enum nl80211_tdls_peer_capability. * + * @NL80211_ATTR_IFACE_SOCKET_OWNER: flag attribute, if set during interface + * creation then the new interface will be owned by the netlink socket + * that created it and will be destroyed when the socket is closed + * * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use */ @@@ -1914,6 -1918,8 +1918,8 @@@ enum nl80211_attrs
NL80211_ATTR_TDLS_PEER_CAPABILITY,
+ NL80211_ATTR_IFACE_SOCKET_OWNER, + /* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST, @@@ -2336,9 -2342,34 +2342,34 @@@ enum nl80211_band_attr * using this channel as the primary or any of the secondary channels * isn't possible * @NL80211_FREQUENCY_ATTR_DFS_CAC_TIME: DFS CAC time in milliseconds. + * @NL80211_FREQUENCY_ATTR_INDOOR_ONLY: Only indoor use is permitted on this + * channel. A channel that has the INDOOR_ONLY attribute can only be + * used when there is a clear assessment that the device is operating in + * an indoor surroundings, i.e., it is connected to AC power (and not + * through portable DC inverters) or is under the control of a master + * that is acting as an AP and is connected to AC power. + * @NL80211_FREQUENCY_ATTR_GO_CONCURRENT: GO operation is allowed on this + * channel if it's connected concurrently to a BSS on the same channel on + * the 2 GHz band or to a channel in the same UNII band (on the 5 GHz + * band), and IEEE80211_CHAN_RADAR is not set. Instantiating a GO on a + * channel that has the GO_CONCURRENT attribute set can be done when there + * is a clear assessment that the device is operating under the guidance of + * an authorized master, i.e., setting up a GO while the device is also + * connected to an AP with DFS and radar detection on the UNII band (it is + * up to user-space, i.e., wpa_supplicant to perform the required + * verifications) + * @NL80211_FREQUENCY_ATTR_NO_20MHZ: 20 MHz operation is not allowed + * on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_NO_10MHZ: 10 MHz operation is not allowed + * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use + * + * See https://apps.fcc.gov/eas/comments/GetPublishedDocument.html?id=327&tn=52... + * for more information on the FCC description of the relaxations allowed + * by NL80211_FREQUENCY_ATTR_INDOOR_ONLY and + * NL80211_FREQUENCY_ATTR_GO_CONCURRENT. */ enum nl80211_frequency_attr { __NL80211_FREQUENCY_ATTR_INVALID, @@@ -2355,6 -2386,10 +2386,10 @@@ NL80211_FREQUENCY_ATTR_NO_80MHZ, NL80211_FREQUENCY_ATTR_NO_160MHZ, NL80211_FREQUENCY_ATTR_DFS_CAC_TIME, + NL80211_FREQUENCY_ATTR_INDOOR_ONLY, + NL80211_FREQUENCY_ATTR_GO_CONCURRENT, + NL80211_FREQUENCY_ATTR_NO_20MHZ, + NL80211_FREQUENCY_ATTR_NO_10MHZ,
/* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@@ -2573,10 -2608,13 +2608,13 @@@ enum nl80211_dfs_regions * present has been registered with the wireless core that * has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a * supported feature. + * @NL80211_USER_REG_HINT_INDOOR: a user sent an hint indicating that the + * platform is operating in an indoor environment. */ enum nl80211_user_reg_hint_type { NL80211_USER_REG_HINT_USER = 0, NL80211_USER_REG_HINT_CELL_BASE = 1, + NL80211_USER_REG_HINT_INDOOR = 2, };
/** @@@ -3856,8 -3894,6 +3894,8 @@@ enum nl80211_ap_sme_features * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested * to work properly to suppport receiving regulatory hints from * cellular base stations. + * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only + * here to reserve the value for API/ABI compatibility) * @NL80211_FEATURE_SAE: This driver supports simultaneous authentication of * equals (SAE) with user space SME (NL80211_CMD_AUTHENTICATE) in station * mode @@@ -3893,13 -3929,16 +3931,16 @@@ * interface. An active monitor interface behaves like a normal monitor * interface, but gets added to the driver. It ensures that incoming * unicast packets directed at the configured interface address get ACKed. + * @NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE: This driver supports dynamic + * channel bandwidth change (e.g., HT 20 <-> 40 MHz channel) during the + * lifetime of a BSS. */ enum nl80211_feature_flags { NL80211_FEATURE_SK_TX_STATUS = 1 << 0, NL80211_FEATURE_HT_IBSS = 1 << 1, NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2, NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3, - /* bit 4 is reserved - don't use */ + NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL = 1 << 4, NL80211_FEATURE_SAE = 1 << 5, NL80211_FEATURE_LOW_PRIORITY_SCAN = 1 << 6, NL80211_FEATURE_SCAN_FLUSH = 1 << 7, @@@ -3913,6 -3952,7 +3954,7 @@@ NL80211_FEATURE_FULL_AP_CLIENT_STATE = 1 << 15, NL80211_FEATURE_USERSPACE_MPM = 1 << 16, NL80211_FEATURE_ACTIVE_MONITOR = 1 << 17, + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE = 1 << 18, };
/** diff --combined kernel/sysctl.c index 3c202d0,e36ae4b..5bcaf5c --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@@ -152,6 -152,10 +152,6 @@@ static unsigned long hung_task_timeout_ #ifdef CONFIG_SPARC #endif
-#ifdef CONFIG_SPARC64 -extern int sysctl_tsb_ratio; -#endif - #ifdef __hppa__ extern int pwrsw_enabled; #endif @@@ -2497,11 -2501,11 +2497,11 @@@ int proc_do_large_bitmap(struct ctl_tab bool first = 1; size_t left = *lenp; unsigned long bitmap_len = table->maxlen; - unsigned long *bitmap = (unsigned long *) table->data; + unsigned long *bitmap = *(unsigned long **) table->data; unsigned long *tmp_bitmap = NULL; char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c;
- if (!bitmap_len || !left || (*ppos && !write)) { + if (!bitmap || !bitmap_len || !left || (*ppos && !write)) { *lenp = 0; return 0; } diff --combined lib/Kconfig.debug index e548aa0,d1b7bdf..6da2c25 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@@ -180,7 -180,7 +180,7 @@@ config STRIP_ASM_SYM
config READABLE_ASM bool "Generate readable assembler code" - depends on DEBUG_KERNEL + depends on DEBUG_KERNEL && !LTO help Disable some compiler optimizations that tend to generate human unreadable assembler output. This may make the kernel slightly slower, but it helps @@@ -1620,6 -1620,19 +1620,19 @@@ config TEST_USER_COP
If unsure, say N.
+ config TEST_BPF + tristate "Test BPF filter functionality" + default n + depends on m && NET + help + This builds the "test_bpf" module that runs various test vectors + against the BPF interpreter or BPF JIT compiler depending on the + current setting. This is in particular useful for BPF JIT compiler + development, but also to run regression tests against changes in + the interpreter code. + + If unsure, say N. + source "samples/Kconfig"
source "lib/Kconfig.kgdb" diff --combined net/8021q/vlan_dev.c index 019efb7,8f025af..4aef04e --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@@ -493,10 -493,48 +493,10 @@@ static void vlan_dev_change_rx_flags(st } }
-static int vlan_calculate_locking_subclass(struct net_device *real_dev) -{ - int subclass = 0; - - while (is_vlan_dev(real_dev)) { - subclass++; - real_dev = vlan_dev_priv(real_dev)->real_dev; - } - - return subclass; -} - -static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from) -{ - int err = 0, subclass; - - subclass = vlan_calculate_locking_subclass(to); - - spin_lock_nested(&to->addr_list_lock, subclass); - err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); - if (!err) - __dev_set_rx_mode(to); - spin_unlock(&to->addr_list_lock); -} - -static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from) -{ - int err = 0, subclass; - - subclass = vlan_calculate_locking_subclass(to); - - spin_lock_nested(&to->addr_list_lock, subclass); - err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); - if (!err) - __dev_set_rx_mode(to); - spin_unlock(&to->addr_list_lock); -} - static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) { - vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); - vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); + dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); + dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); }
/* @@@ -524,11 -562,6 +524,11 @@@ static void vlan_dev_set_lockdep_class( netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass); }
+static int vlan_dev_get_lock_subclass(struct net_device *dev) +{ + return vlan_dev_priv(dev)->nest_level; +} + static const struct header_ops vlan_header_ops = { .create = vlan_dev_hard_header, .rebuild = vlan_dev_rebuild_header, @@@ -564,6 -597,7 +564,6 @@@ static const struct net_device_ops vlan static int vlan_dev_init(struct net_device *dev) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; - int subclass = 0;
netif_carrier_off(dev);
@@@ -612,7 -646,8 +612,7 @@@
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- subclass = vlan_calculate_locking_subclass(dev); - vlan_dev_set_lockdep_class(dev, subclass); + vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan_dev_priv(dev)->vlan_pcpu_stats) @@@ -671,38 -706,36 +671,36 @@@ static void vlan_ethtool_get_drvinfo(st
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { + struct vlan_pcpu_stats *p; + u32 rx_errors = 0, tx_dropped = 0; + int i;
- if (vlan_dev_priv(dev)->vlan_pcpu_stats) { - struct vlan_pcpu_stats *p; - u32 rx_errors = 0, tx_dropped = 0; - int i; - - for_each_possible_cpu(i) { - u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; - unsigned int start; - - p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); - do { - start = u64_stats_fetch_begin_irq(&p->syncp); - rxpackets = p->rx_packets; - rxbytes = p->rx_bytes; - rxmulticast = p->rx_multicast; - txpackets = p->tx_packets; - txbytes = p->tx_bytes; - } while (u64_stats_fetch_retry_irq(&p->syncp, start)); - - stats->rx_packets += rxpackets; - stats->rx_bytes += rxbytes; - stats->multicast += rxmulticast; - stats->tx_packets += txpackets; - stats->tx_bytes += txbytes; - /* rx_errors & tx_dropped are u32 */ - rx_errors += p->rx_errors; - tx_dropped += p->tx_dropped; - } - stats->rx_errors = rx_errors; - stats->tx_dropped = tx_dropped; + for_each_possible_cpu(i) { + u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; + unsigned int start; + + p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + rxpackets = p->rx_packets; + rxbytes = p->rx_bytes; + rxmulticast = p->rx_multicast; + txpackets = p->tx_packets; + txbytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + + stats->rx_packets += rxpackets; + stats->rx_bytes += rxbytes; + stats->multicast += rxmulticast; + stats->tx_packets += txpackets; + stats->tx_bytes += txbytes; + /* rx_errors & tx_dropped are u32 */ + rx_errors += p->rx_errors; + tx_dropped += p->tx_dropped; } + stats->rx_errors = rx_errors; + stats->tx_dropped = tx_dropped; + return stats; }
@@@ -784,7 -817,6 +782,7 @@@ static const struct net_device_ops vlan .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, #endif .ndo_fix_features = vlan_dev_fix_features, + .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, };
void vlan_setup(struct net_device *dev) diff --combined net/batman-adv/distributed-arp-table.c index aa5d494,60889df..dcd99b2 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@@ -662,6 -662,7 +662,7 @@@ static void batadv_dat_tvlv_container_u void batadv_dat_status_update(struct net_device *net_dev) { struct batadv_priv *bat_priv = netdev_priv(net_dev); + batadv_dat_tvlv_container_update(bat_priv); }
@@@ -940,7 -941,8 +941,7 @@@ bool batadv_dat_snoop_outgoing_arp_requ * additional DAT answer may trigger kernel warnings about * a packet coming from the wrong port. */ - if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, - BATADV_NO_FLAGS)) { + if (batadv_is_my_client(bat_priv, dat_entry->mac_addr, vid)) { ret = true; goto out; } diff --combined net/core/dev.c index 9abc503,867adb2..0355ca5 --- a/net/core/dev.c +++ b/net/core/dev.c @@@ -1661,6 -1661,29 +1661,29 @@@ bool is_skb_forwardable(struct net_devi } EXPORT_SYMBOL_GPL(is_skb_forwardable);
+ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) + { + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { + if (skb_copy_ubufs(skb, GFP_ATOMIC)) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + } + + if (unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_scrub_packet(skb, true); + skb->protocol = eth_type_trans(skb, dev); + + return 0; + } + EXPORT_SYMBOL_GPL(__dev_forward_skb); + /** * dev_forward_skb - loopback an skb to another netif * @@@ -1681,24 -1704,7 +1704,7 @@@ */ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { - if (skb_copy_ubufs(skb, GFP_ATOMIC)) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - } - - if (unlikely(!is_skb_forwardable(dev, skb))) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; - } - - skb_scrub_packet(skb, true); - skb->protocol = eth_type_trans(skb, dev); - - return netif_rx_internal(skb); + return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb);
@@@ -3951,7 -3957,6 +3957,7 @@@ static enum gro_result dev_gro_receive( } NAPI_GRO_CB(skb)->count = 1; NAPI_GRO_CB(skb)->age = jiffies; + NAPI_GRO_CB(skb)->last = skb; skb_shinfo(skb)->gso_size = skb_gro_len(skb); skb->next = napi->gro_list; napi->gro_list = skb; @@@ -4542,32 -4547,6 +4548,32 @@@ void *netdev_adjacent_get_private(struc EXPORT_SYMBOL(netdev_adjacent_get_private);
/** + * netdev_upper_get_next_dev_rcu - Get the next dev from upper list + * @dev: device + * @iter: list_head ** of the current position + * + * Gets the next device from the dev's upper list, starting from iter + * position. The caller must hold RCU read lock. + */ +struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, + struct list_head **iter) +{ + struct netdev_adjacent *upper; + + WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held()); + + upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list); + + if (&upper->list == &dev->adj_list.upper) + return NULL; + + *iter = &upper->list; + + return upper->dev; +} +EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu); + +/** * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list * @dev: device * @iter: list_head ** of the current position @@@ -4649,32 -4628,6 +4655,32 @@@ void *netdev_lower_get_next_private_rcu EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
/** + * netdev_lower_get_next - Get the next device from the lower neighbour + * list + * @dev: device + * @iter: list_head ** of the current position + * + * Gets the next netdev_adjacent from the dev's lower neighbour + * list, starting from iter position. The caller must hold RTNL lock or + * its own locking that guarantees that the neighbour lower + * list will remain unchainged. + */ +void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter) +{ + struct netdev_adjacent *lower; + + lower = list_entry((*iter)->next, struct netdev_adjacent, list); + + if (&lower->list == &dev->adj_list.lower) + return NULL; + + *iter = &lower->list; + + return lower->dev; +} +EXPORT_SYMBOL(netdev_lower_get_next); + +/** * netdev_lower_get_first_private_rcu - Get the first ->private from the * lower neighbour list, RCU * variant @@@ -5124,30 -5077,6 +5130,30 @@@ void *netdev_lower_dev_get_private(stru } EXPORT_SYMBOL(netdev_lower_dev_get_private);
+ +int dev_get_nest_level(struct net_device *dev, + bool (*type_check)(struct net_device *dev)) +{ + struct net_device *lower = NULL; + struct list_head *iter; + int max_nest = -1; + int nest; + + ASSERT_RTNL(); + + netdev_for_each_lower_dev(dev, lower, iter) { + nest = dev_get_nest_level(lower, type_check); + if (max_nest < nest) + max_nest = nest; + } + + if (type_check(dev)) + max_nest++; + + return max_nest; +} +EXPORT_SYMBOL(dev_get_nest_level); + static void dev_change_rx_flags(struct net_device *dev, int flags) { const struct net_device_ops *ops = dev->netdev_ops; @@@ -5313,6 -5242,7 +5319,6 @@@ void __dev_set_rx_mode(struct net_devic if (ops->ndo_set_rx_mode) ops->ndo_set_rx_mode(dev); } -EXPORT_SYMBOL(__dev_set_rx_mode);
void dev_set_rx_mode(struct net_device *dev) { @@@ -5617,7 -5547,7 +5623,7 @@@ static int dev_new_index(struct net *ne
/* Delayed registration/unregisteration */ static LIST_HEAD(net_todo_list); -static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq); +DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
static void net_set_todo(struct net_device *dev) { @@@ -5674,10 -5604,6 +5680,6 @@@ static void rollback_registered_many(st */ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
- if (!dev->rtnl_link_ops || - dev->rtnl_link_state == RTNL_LINK_INITIALIZED) - rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); - /* * Flush the unicast and multicast chains */ @@@ -5687,6 -5613,10 +5689,10 @@@ if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev);
+ if (!dev->rtnl_link_ops || + dev->rtnl_link_state == RTNL_LINK_INITIALIZED) + rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL); + /* Notifier chain MUST detach us all upper devices. */ WARN_ON(netdev_has_any_upper_dev(dev));
diff --combined net/core/net_namespace.c index 7c8ffd9,05e949d..85b6269 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@@ -24,7 -24,7 +24,7 @@@
static LIST_HEAD(pernet_list); static struct list_head *first_device = &pernet_list; -static DEFINE_MUTEX(net_mutex); +DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list); @@@ -273,7 -273,7 +273,7 @@@ static void cleanup_net(struct work_str { const struct pernet_operations *ops; struct net *net, *tmp; - LIST_HEAD(net_kill_list); + struct list_head net_kill_list; LIST_HEAD(net_exit_list);
/* Atomically snapshot the list of namespaces to cleanup */ diff --combined net/core/skbuff.c index 8383b2b,3d74530..3f6c7e8 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@@ -694,7 -694,7 +694,7 @@@ static void __copy_skb_header(struct sk #endif memcpy(new->cb, old->cb, sizeof(old->cb)); new->csum = old->csum; - new->local_df = old->local_df; + new->ignore_df = old->ignore_df; new->pkt_type = old->pkt_type; new->ip_summed = old->ip_summed; skb_copy_queue_mapping(new, old); @@@ -3076,7 -3076,7 +3076,7 @@@ int skb_gro_receive(struct sk_buff **he if (unlikely(p->len + len >= 65536)) return -E2BIG;
- lp = NAPI_GRO_CB(p)->last ?: p; + lp = NAPI_GRO_CB(p)->last; pinfo = skb_shinfo(lp);
if (headlen <= offset) { @@@ -3192,7 -3192,7 +3192,7 @@@ merge
__skb_pull(skb, offset);
- if (!NAPI_GRO_CB(p)->last) + if (NAPI_GRO_CB(p)->last == p) skb_shinfo(p)->frag_list = skb; else NAPI_GRO_CB(p)->last->next = skb; @@@ -3913,7 -3913,7 +3913,7 @@@ void skb_scrub_packet(struct sk_buff *s skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->skb_iif = 0; - skb->local_df = 0; + skb->ignore_df = 0; skb_dst_drop(skb); skb->mark = 0; secpath_reset(skb); diff --combined net/ipv4/ip_tunnel.c index 49721d7,289c6ee..6b3df33 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@@ -395,11 -395,10 +395,10 @@@ static struct ip_tunnel *ip_tunnel_crea struct ip_tunnel_net *itn, struct ip_tunnel_parm *parms) { - struct ip_tunnel *nt, *fbt; + struct ip_tunnel *nt; struct net_device *dev;
BUG_ON(!itn->fb_tunnel_dev); - fbt = netdev_priv(itn->fb_tunnel_dev); dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms); if (IS_ERR(dev)) return ERR_CAST(dev); @@@ -540,10 -539,9 +539,10 @@@ void ip_tunnel_xmit(struct sk_buff *skb unsigned int max_headroom; /* The extra header space needed */ __be32 dst; int err; - bool connected = true; + bool connected;
inner_iph = (const struct iphdr *)skb_inner_network_header(skb); + connected = (tunnel->parms.iph.daddr != 0);
dst = tnl_params->daddr; if (dst == 0) { @@@ -756,10 -754,8 +755,8 @@@ int ip_tunnel_ioctl(struct net_device *
if (!t && (cmd == SIOCADDTUNNEL)) { t = ip_tunnel_create(net, itn, p); - if (IS_ERR(t)) { - err = PTR_ERR(t); - break; - } + err = PTR_ERR_OR_ZERO(t); + break; } if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { if (t != NULL) { diff --combined net/ipv4/xfrm4_output.c index 186a8ec,8e8c018..d5f6bd9 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@@ -25,7 -25,7 +25,7 @@@ static int xfrm4_tunnel_check_size(stru if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE) goto out;
- if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) + if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df) goto out;
mtu = dst_mtu(skb_dst(skb)); @@@ -62,7 -62,10 +62,7 @@@ int xfrm4_prepare_output(struct xfrm_st if (err) return err;
- memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED; - - skb->protocol = htons(ETH_P_IP); + IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
return x->outer_mode->output2(x, skb); } @@@ -70,34 -73,27 +70,34 @@@ EXPORT_SYMBOL(xfrm4_prepare_output)
int xfrm4_output_finish(struct sk_buff *skb) { + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + skb->protocol = htons(ETH_P_IP); + +#ifdef CONFIG_NETFILTER + IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; +#endif + + return xfrm_output(skb); +} + +static int __xfrm4_output(struct sk_buff *skb) +{ + struct xfrm_state *x = skb_dst(skb)->xfrm; + #ifdef CONFIG_NETFILTER - if (!skb_dst(skb)->xfrm) { + if (!x) { IPCB(skb)->flags |= IPSKB_REROUTED; return dst_output(skb); } - - IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; #endif
- skb->protocol = htons(ETH_P_IP); - return xfrm_output(skb); + return x->outer_mode->afinfo->output_finish(skb); }
int xfrm4_output(struct sock *sk, struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); - struct xfrm_state *x = dst->xfrm; - return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, - NULL, dst->dev, - x->outer_mode->afinfo->output_finish, + NULL, skb_dst(skb)->dev, __xfrm4_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); }
diff --combined net/ipv6/ip6_output.c index fbf1156,ab0cc57..85aaeca --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@@ -219,7 -219,7 +219,7 @@@ int ip6_xmit(struct sock *sk, struct sk skb->mark = sk->sk_mark;
mtu = dst_mtu(dst); - if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) { + if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUT, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, @@@ -347,11 -347,11 +347,11 @@@ static bool ip6_pkt_too_big(const struc if (skb->len <= mtu) return false;
- /* ipv6 conntrack defrag sets max_frag_size + local_df */ + /* ipv6 conntrack defrag sets max_frag_size + ignore_df */ if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu) return true;
- if (skb->local_df) + if (skb->ignore_df) return false;
if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu) @@@ -559,7 -559,7 +559,7 @@@ int ip6_fragment(struct sk_buff *skb, i /* We must not fragment if the socket is set to force MTU discovery * or if the skb it not generated by a local socket. */ - if (unlikely(!skb->local_df && skb->len > mtu) || + if (unlikely(!skb->ignore_df && skb->len > mtu) || (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) { if (skb->sk && dst_allfrag(skb_dst(skb))) @@@ -1229,12 -1229,12 +1229,12 @@@ int ip6_append_data(struct sock *sk, in unsigned int maxnonfragsize, headersize;
headersize = sizeof(struct ipv6hdr) + - (opt ? opt->tot_len : 0) + + (opt ? opt->opt_flen + opt->opt_nflen : 0) + (dst_allfrag(&rt->dst) ? sizeof(struct frag_hdr) : 0) + rt->rt6i_nfheader_len;
- if (ip6_sk_local_df(sk)) + if (ip6_sk_ignore_df(sk)) maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN; else maxnonfragsize = mtu; @@@ -1544,7 -1544,7 +1544,7 @@@ int ip6_push_pending_frames(struct soc }
/* Allow local fragmentation. */ - skb->local_df = ip6_sk_local_df(sk); + skb->ignore_df = ip6_sk_ignore_df(sk);
*final_dst = fl6->daddr; __skb_pull(skb, skb_network_header_len(skb)); diff --combined net/ipv6/ip6_vti.c index 6cc9f93,2953c0c..9aaa6bb --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@@ -511,7 -511,6 +511,7 @@@ static int vti6_err(struct sk_buff *skb u8 type, u8 code, int offset, __be32 info) { __be32 spi; + __u32 mark; struct xfrm_state *x; struct ip6_tnl *t; struct ip_esp_hdr *esph; @@@ -525,8 -524,6 +525,8 @@@ if (!t) return -1;
+ mark = be32_to_cpu(t->parms.o_key); + switch (protocol) { case IPPROTO_ESP: esph = (struct ip_esp_hdr *)(skb->data + offset); @@@ -548,7 -545,7 +548,7 @@@ type != NDISC_REDIRECT) return 0;
- x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, + x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr, spi, protocol, AF_INET6); if (!x) return 0; @@@ -795,15 -792,12 +795,12 @@@ static const struct net_device_ops vti6 **/ static void vti6_dev_setup(struct net_device *dev) { - struct ip6_tnl *t; - dev->netdev_ops = &vti6_netdev_ops; dev->destructor = vti6_dev_free;
dev->type = ARPHRD_TUNNEL6; dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr); dev->mtu = ETH_DATA_LEN; - t = netdev_priv(dev); dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; @@@ -1100,6 -1094,7 +1097,6 @@@ static int __init vti6_tunnel_init(void
err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP); if (err < 0) { - unregister_pernet_device(&vti6_net_ops); pr_err("%s: can't register vti6 protocol\n", __func__);
goto out; @@@ -1108,6 -1103,7 +1105,6 @@@ err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH); if (err < 0) { xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); - unregister_pernet_device(&vti6_net_ops); pr_err("%s: can't register vti6 protocol\n", __func__);
goto out; @@@ -1117,6 -1113,7 +1114,6 @@@ if (err < 0) { xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); - unregister_pernet_device(&vti6_net_ops); pr_err("%s: can't register vti6 protocol\n", __func__);
goto out; diff --combined net/ipv6/route.c index 6ebdb7b6,f0a8ff9..70e6502 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@@ -1176,7 -1176,7 +1176,7 @@@ void ip6_update_pmtu(struct sk_buff *sk
memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; - fl6.flowi6_mark = mark; + fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark); fl6.daddr = iph->daddr; fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); @@@ -2234,27 -2234,6 +2234,27 @@@ void rt6_remove_prefsrc(struct inet6_if fib6_clean_all(net, fib6_remove_prefsrc, &adni); }
+#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) +#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) + +/* Remove routers and update dst entries when gateway turn into host. */ +static int fib6_clean_tohost(struct rt6_info *rt, void *arg) +{ + struct in6_addr *gateway = (struct in6_addr *)arg; + + if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) || + ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) && + ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { + return -1; + } + return 0; +} + +void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) +{ + fib6_clean_all(net, fib6_clean_tohost, gateway); +} + struct arg_dev_net { struct net_device *dev; struct net *net; @@@ -2730,9 -2709,6 +2730,9 @@@ static int inet6_rtm_getroute(struct sk if (tb[RTA_OIF]) oif = nla_get_u32(tb[RTA_OIF]);
+ if (tb[RTA_MARK]) + fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); + if (iif) { struct net_device *dev; int flags = 0; diff --combined net/ipv6/xfrm6_output.c index b930d08,f47c8b1..433672d --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@@ -78,7 -78,7 +78,7 @@@ static int xfrm6_tunnel_check_size(stru if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU;
- if (!skb->local_df && skb->len > mtu) { + if (!skb->ignore_df && skb->len > mtu) { skb->dev = dst->dev;
if (xfrm6_local_dontfrag(skb)) @@@ -114,7 -114,13 +114,7 @@@ int xfrm6_prepare_output(struct xfrm_st if (err) return err;
- skb->local_df = 1; - memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); -#ifdef CONFIG_NETFILTER - IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; -#endif - - skb->protocol = htons(ETH_P_IPV6); + skb->ignore_df = 1;
return x->outer_mode->output2(x, skb); } @@@ -122,13 -128,11 +122,13 @@@ EXPORT_SYMBOL(xfrm6_prepare_output)
int xfrm6_output_finish(struct sk_buff *skb) { + memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + skb->protocol = htons(ETH_P_IPV6); + #ifdef CONFIG_NETFILTER IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED; #endif
- skb->protocol = htons(ETH_P_IPV6); return xfrm_output(skb); }
@@@ -138,13 -142,6 +138,13 @@@ static int __xfrm6_output(struct sk_buf struct xfrm_state *x = dst->xfrm; int mtu;
+#ifdef CONFIG_NETFILTER + if (!x) { + IP6CB(skb)->flags |= IP6SKB_REROUTED; + return dst_output(skb); + } +#endif + if (skb->protocol == htons(ETH_P_IPV6)) mtu = ip6_skb_dst_mtu(skb); else @@@ -153,7 -150,7 +153,7 @@@ if (skb->len > mtu && xfrm6_local_dontfrag(skb)) { xfrm6_local_rxpmtu(skb, mtu); return -EMSGSIZE; - } else if (!skb->local_df && skb->len > mtu && skb->sk) { + } else if (!skb->ignore_df && skb->len > mtu && skb->sk) { xfrm_local_error(skb, mtu); return -EMSGSIZE; } @@@ -168,7 -165,6 +168,7 @@@
int xfrm6_output(struct sock *sk, struct sk_buff *skb) { - return NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, - skb_dst(skb)->dev, __xfrm6_output); + return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, + NULL, skb_dst(skb)->dev, __xfrm6_output, + !(IP6CB(skb)->flags & IP6SKB_REROUTED)); } diff --combined net/mac80211/ieee80211_i.h index f169b6e,b455f62..487c2ef --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@@ -260,7 -260,7 +260,7 @@@ struct ieee80211_if_ap
/* to be used after channel switch. */ struct cfg80211_beacon_data *next_beacon; - struct list_head vlans; + struct list_head vlans; /* write-protected with RTNL and local->mtx */
struct ps_data ps; atomic_t num_mcast_sta; /* number of stations receiving multicast */ @@@ -276,7 -276,7 +276,7 @@@ struct ieee80211_if_wds };
struct ieee80211_if_vlan { - struct list_head list; + struct list_head list; /* write-protected with RTNL and local->mtx */
/* used for all tx if the VLAN is configured to 4-addr mode */ struct sta_info __rcu *sta; @@@ -317,7 -317,6 +317,7 @@@ struct ieee80211_roc_work
bool started, abort, hw_begun, notified; bool to_be_freed; + bool on_channel;
unsigned long hw_start_time;
@@@ -692,8 -691,10 +692,10 @@@ struct ieee80211_chanctx struct list_head list; struct rcu_head rcu_head;
+ struct list_head assigned_vifs; + struct list_head reserved_vifs; + enum ieee80211_chanctx_mode mode; - int refcount; bool driver_present;
struct ieee80211_chanctx_conf conf; @@@ -757,6 -758,14 +759,14 @@@ struct ieee80211_sub_if_data bool csa_radar_required; struct cfg80211_chan_def csa_chandef;
+ struct list_head assigned_chanctx_list; /* protected by chanctx_mtx */ + struct list_head reserved_chanctx_list; /* protected by chanctx_mtx */ + + /* context reservation -- protected with chanctx_mtx */ + struct ieee80211_chanctx *reserved_chanctx; + struct cfg80211_chan_def reserved_chandef; + bool reserved_radar_required; + /* used to reconfigure hardware SM PS */ struct work_struct recalc_smps;
@@@ -1772,6 -1781,16 +1782,16 @@@ ieee80211_vif_use_channel(struct ieee80 const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode mode); int __must_check + ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode mode, + bool radar_required); + int __must_check + ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata, + u32 *changed); + int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata); + + int __must_check ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, u32 *changed); @@@ -1783,6 -1802,8 +1803,8 @@@ void ieee80211_vif_release_channel(stru void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata, bool clear); + int ieee80211_chanctx_refcount(struct ieee80211_local *local, + struct ieee80211_chanctx *ctx);
void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx); @@@ -1806,6 -1827,11 +1828,11 @@@ int ieee80211_cs_headroom(struct ieee80 enum nl80211_iftype iftype); void ieee80211_recalc_dtim(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); + int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, + const struct cfg80211_chan_def *chandef, + enum ieee80211_chanctx_mode chanmode, + u8 radar_detect); + int ieee80211_max_num_channels(struct ieee80211_local *local);
#ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline diff --combined net/mac80211/mlme.c index 27600a9,488826f..bfb5e20 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@@ -1089,7 -1089,7 +1089,7 @@@ ieee80211_sta_process_chanswitch(struc } chanctx = container_of(rcu_access_pointer(sdata->vif.chanctx_conf), struct ieee80211_chanctx, conf); - if (chanctx->refcount > 1) { + if (ieee80211_chanctx_refcount(local, chanctx) > 1) { sdata_info(sdata, "channel switch with multiple interfaces on the same channel, disconnecting\n"); ieee80211_queue_work(&local->hw, @@@ -3598,24 -3598,18 +3598,24 @@@ void ieee80211_mgd_quiesce(struct ieee8
sdata_lock(sdata);
- if (ifmgd->auth_data) { + if (ifmgd->auth_data || ifmgd->assoc_data) { + const u8 *bssid = ifmgd->auth_data ? + ifmgd->auth_data->bss->bssid : + ifmgd->assoc_data->bss->bssid; + /* - * If we are trying to authenticate while suspending, cfg80211 - * won't know and won't actually abort those attempts, thus we - * need to do that ourselves. + * If we are trying to authenticate / associate while suspending, + * cfg80211 won't know and won't actually abort those attempts, + * thus we need to do that ourselves. */ - ieee80211_send_deauth_disassoc(sdata, - ifmgd->auth_data->bss->bssid, + ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, WLAN_REASON_DEAUTH_LEAVING, false, frame_buf); - ieee80211_destroy_auth_data(sdata, false); + if (ifmgd->assoc_data) + ieee80211_destroy_assoc_data(sdata, false); + if (ifmgd->auth_data) + ieee80211_destroy_auth_data(sdata, false); cfg80211_tx_mlme_mgmt(sdata->dev, frame_buf, IEEE80211_DEAUTH_FRAME_LEN); } @@@ -3707,7 -3701,7 +3707,7 @@@ int ieee80211_max_network_latency(struc ieee80211_recalc_ps(local, latency_usec); mutex_unlock(&local->iflist_mtx);
- return 0; + return NOTIFY_OK; }
static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,