[linux-next] LinuxNextTracking branch, master, updated. next-20190708
by batmanīŧ open-mesh.org
The following commit has been merged in the master branch:
commit b60b0b3ece4e405498b49d668f67195de3c91760
Merge: afcfd7b1c8ce1b9c9f1e2fad554fbe6943afff2c ad7b134f65385627ebe9b3162a652267c8d991ee
Author: Stephen Rothwell <sfr(a)canb.auug.org.au>
Date: Mon Jul 8 13:06:47 2019 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS
index 0d589af5a8aa,449e7cdb3303..43426fe42f18
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -321,7 -321,7 +321,7 @@@ F: drivers/pnp/pnpacpi
F: include/linux/acpi.h
F: include/linux/fwnode.h
F: include/acpi/
-F: Documentation/acpi/
+F: Documentation/firmware-guide/acpi/
F: Documentation/ABI/testing/sysfs-bus-acpi
F: Documentation/ABI/testing/configfs-acpi
F: drivers/pci/*acpi*
@@@ -668,13 -668,6 +668,13 @@@ S: Maintaine
F: Documentation/i2c/busses/i2c-ali1563
F: drivers/i2c/busses/i2c-ali1563.c
+ALLEGRO DVT VIDEO IP CORE DRIVER
+M: Michael Tretter <m.tretter(a)pengutronix.de>
+R: Pengutronix Kernel Team <kernel(a)pengutronix.de>
+L: linux-media(a)vger.kernel.org
+S: Maintained
+F: drivers/staging/media/allegro-dvt/
+
ALLWINNER SECURITY SYSTEM
M: Corentin Labbe <clabbe.montjoie(a)gmail.com>
L: linux-crypto(a)vger.kernel.org
@@@ -917,7 -910,7 +917,7 @@@ F: drivers/iio/adc/ad7768-1.
F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt
ANALOG DEVICES INC AD9389B DRIVER
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
S: Maintained
F: drivers/media/i2c/ad9389b*
@@@ -949,19 -942,19 +949,19 @@@ S: Maintaine
F: drivers/media/i2c/adv748x/*
ANALOG DEVICES INC ADV7511 DRIVER
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
S: Maintained
F: drivers/media/i2c/adv7511*
ANALOG DEVICES INC ADV7604 DRIVER
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
S: Maintained
F: drivers/media/i2c/adv7604*
ANALOG DEVICES INC ADV7842 DRIVER
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
S: Maintained
F: drivers/media/i2c/adv7842*
@@@ -1147,6 -1140,15 +1147,15 @@@ L: linux-media(a)vger.kernel.or
S: Maintained
F: drivers/media/i2c/aptina-pll.*
+ AQUANTIA ETHERNET DRIVER (atlantic)
+ M: Igor Russkikh <igor.russkikh(a)aquantia.com>
+ L: netdev(a)vger.kernel.org
+ S: Supported
+ W: http://www.aquantia.com
+ Q: http://patchwork.ozlabs.org/project/netdev/list/
+ F: drivers/net/ethernet/aquantia/atlantic/
+ F: Documentation/networking/device_drivers/aquantia/atlantic.txt
+
ARC FRAMEBUFFER DRIVER
M: Jaya Kumar <jayalk(a)intworks.biz>
S: Maintained
@@@ -1238,7 -1240,7 +1247,7 @@@ F: include/uapi/drm/panfrost_drm.
ARM MFM AND FLOPPY DRIVERS
M: Ian Molton <spyro(a)f2s.com>
S: Maintained
-F: arch/arm/lib/floppydma.S
+F: arch/arm/mach-rpc/floppydma.S
F: arch/arm/include/asm/floppy.h
ARM PMU PROFILING AND DEBUGGING
@@@ -1831,7 -1833,6 +1840,7 @@@ F: arch/arm/mach-orion5x
F: arch/arm/plat-orion/
F: arch/arm/boot/dts/dove*
F: arch/arm/boot/dts/orion5x*
+T: git git://git.infradead.org/linux-mvebu.git
ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support
M: Jason Cooper <jason(a)lakedaemon.net>
@@@ -1852,7 -1853,6 +1861,7 @@@ F: drivers/irqchip/irq-armada-370-xp.
F: drivers/irqchip/irq-mvebu-*
F: drivers/pinctrl/mvebu/
F: drivers/rtc/rtc-armada38x.c
+T: git git://git.infradead.org/linux-mvebu.git
ARM/Mediatek RTC DRIVER
M: Eddie Huang <eddie.huang(a)mediatek.com>
@@@ -2059,6 -2059,7 +2068,6 @@@ S: Maintaine
ARM/QUALCOMM SUPPORT
M: Andy Gross <agross(a)kernel.org>
-M: David Brown <david.brown(a)linaro.org>
L: linux-arm-msm(a)vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/soc/qcom/
@@@ -2080,7 -2081,7 +2089,7 @@@ F: drivers/i2c/busses/i2c-qup.
F: drivers/i2c/busses/i2c-qcom-geni.c
F: drivers/mfd/ssbi.c
F: drivers/mmc/host/mmci_qcom*
-F: drivers/mmc/host/sdhci_msm.c
+F: drivers/mmc/host/sdhci-msm.c
F: drivers/pci/controller/dwc/pcie-qcom.c
F: drivers/phy/qualcomm/
F: drivers/power/*/msm*
@@@ -2352,7 -2353,7 +2361,7 @@@ L: linux-arm-kernel(a)lists.infradead.or
S: Maintained
ARM/TEGRA HDMI CEC SUBSYSTEM SUPPORT
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-tegra(a)vger.kernel.org
L: linux-media(a)vger.kernel.org
S: Maintained
@@@ -3687,7 -3688,7 +3696,7 @@@ F: drivers/crypto/ccree
W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocelâĻ
CEC FRAMEWORK
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: http://linuxtv.org
@@@ -3704,7 -3705,7 +3713,7 @@@ F: Documentation/devicetree/bindings/me
F: Documentation/ABI/testing/debugfs-cec-error-inj
CEC GPIO DRIVER
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: http://linuxtv.org
@@@ -3896,7 -3897,7 +3905,7 @@@ F: Documentation/devicetree/bindings/hw
F: Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
F: Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
F: Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt
-F: Documentation/hwmon/lochnagar
+F: Documentation/hwmon/lochnagar.rst
CISCO FCOE HBA DRIVER
M: Satish Kharat <satishkh(a)cisco.com>
@@@ -3950,14 -3951,6 +3959,14 @@@ M: Miguel Ojeda <miguel.ojeda.sandonis@
S: Maintained
F: .clang-format
+CLANG/LLVM BUILD SUPPORT
+L: clang-built-linux(a)googlegroups.com
+W: https://clangbuiltlinux.github.io/
+B: https://github.com/ClangBuiltLinux/linux/issues
+C: irc://chat.freenode.net/clangbuiltlinux
+S: Supported
+K: \b(?i:clang|llvm)\b
+
CLEANCACHE API
M: Konrad Rzeszutek Wilk <konrad.wilk(a)oracle.com>
L: linux-kernel(a)vger.kernel.org
@@@ -3988,7 -3981,7 +3997,7 @@@ S: Supporte
F: drivers/platform/x86/classmate-laptop.c
COBALT MEDIA DRIVER
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: https://linuxtv.org
@@@ -4807,7 -4800,7 +4816,7 @@@ S: Maintaine
W: http://plugable.com/category/projects/udlfb/
F: drivers/video/fbdev/udlfb.c
F: include/video/udlfb.h
-F: Documentation/fb/udlfb.txt
+F: Documentation/fb/udlfb.rst
DISTRIBUTED LOCK MANAGER (DLM)
M: Christine Caulfield <ccaulfie(a)redhat.com>
@@@ -4880,7 -4873,7 +4889,7 @@@ S: Maintaine
F: Documentation/
F: scripts/kernel-doc
X: Documentation/ABI/
-X: Documentation/acpi/
+X: Documentation/firmware-guide/acpi/
X: Documentation/devicetree/
X: Documentation/i2c/
X: Documentation/media/
@@@ -4940,13 -4933,6 +4949,6 @@@ L: linux-kernel(a)vger.kernel.or
S: Maintained
F: drivers/staging/fsl-dpaa2/ethsw
- DPAA2 PTP CLOCK DRIVER
- M: Yangbo Lu <yangbo.lu(a)nxp.com>
- L: netdev(a)vger.kernel.org
- S: Maintained
- F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp*
- F: drivers/net/ethernet/freescale/dpaa2/dprtc*
-
DPT_I2O SCSI RAID DRIVER
M: Adaptec OEM Raid Solutions <aacraid(a)microsemi.com>
L: linux-scsi(a)vger.kernel.org
@@@ -5618,7 -5604,8 +5620,8 @@@ F: include/linux/dynamic_debug.
DYNAMIC INTERRUPT MODERATION
M: Tal Gilboa <talgi(a)mellanox.com>
S: Maintained
- F: include/linux/net_dim.h
+ F: include/linux/dim.h
+ F: lib/dim/
DZ DECSTATION DZ11 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro(a)linux-mips.org>
@@@ -6050,7 -6037,7 +6053,7 @@@ S: Maintaine
F: drivers/extcon/
F: include/linux/extcon/
F: include/linux/extcon.h
-F: Documentation/extcon/
+F: Documentation/firmware-guide/acpi/extcon-intel-int3496.rst
F: Documentation/devicetree/bindings/extcon/
EXYNOS DP DRIVER
@@@ -6268,7 -6255,7 +6271,7 @@@ FPGA DFL DRIVER
M: Wu Hao <hao.wu(a)intel.com>
L: linux-fpga(a)vger.kernel.org
S: Maintained
-F: Documentation/fpga/dfl.txt
+F: Documentation/fpga/dfl.rst
F: include/uapi/linux/fpga-dfl.h
F: drivers/fpga/dfl*
@@@ -6345,13 -6332,6 +6348,13 @@@ L: linux-i2c(a)vger.kernel.or
S: Maintained
F: drivers/i2c/busses/i2c-cpm.c
+FREESCALE IMX DDR PMU DRIVER
+M: Frank Li <Frank.li(a)nxp.com>
+L: linux-arm-kernel(a)lists.infradead.org
+S: Maintained
+F: drivers/perf/fsl_imx8_ddr_perf.c
+F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
+
FREESCALE IMX LPI2C DRIVER
M: Dong Aisheng <aisheng.dong(a)nxp.com>
L: linux-i2c(a)vger.kernel.org
@@@ -6395,6 -6375,8 +6398,8 @@@ FREESCALE QORIQ PTP CLOCK DRIVE
M: Yangbo Lu <yangbo.lu(a)nxp.com>
L: netdev(a)vger.kernel.org
S: Maintained
+ F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp*
+ F: drivers/net/ethernet/freescale/dpaa2/dprtc*
F: drivers/net/ethernet/freescale/enetc/enetc_ptp.c
F: drivers/ptp/ptp_qoriq.c
F: drivers/ptp/ptp_qoriq_debugfs.c
@@@ -6440,7 -6422,6 +6445,7 @@@ M: Li Yang <leoyang.li(a)nxp.com
L: linuxppc-dev(a)lists.ozlabs.org
L: linux-arm-kernel(a)lists.infradead.org
S: Maintained
+F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
F: Documentation/devicetree/bindings/soc/fsl/
F: drivers/soc/fsl/
F: include/linux/fsl/
@@@ -6483,7 -6464,7 +6488,7 @@@ M: "Rafael J. Wysocki" <rjw(a)rjwysocki.n
M: Pavel Machek <pavel(a)ucw.cz>
L: linux-pm(a)vger.kernel.org
S: Supported
-F: Documentation/power/freezing-of-tasks.txt
+F: Documentation/power/freezing-of-tasks.rst
F: include/linux/freezer.h
F: kernel/freezer.c
@@@ -6716,9 -6697,7 +6721,7 @@@ M: Paul Bolle <pebolle(a)tiscali.nl
L: gigaset307x-common(a)lists.sourceforge.net
W: http://gigaset307x.sourceforge.net/
S: Odd Fixes
- F: Documentation/isdn/README.gigaset
- F: drivers/isdn/gigaset/
- F: include/uapi/linux/gigaset_dev.h
+ F: drivers/staging/isdn/gigaset/
GNSS SUBSYSTEM
M: Johan Hovold <johan(a)kernel.org>
@@@ -6730,7 -6709,7 +6733,7 @@@ F: drivers/gnss
F: include/linux/gnss.h
GO7007 MPEG CODEC
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
S: Maintained
F: drivers/media/usb/go7007/
@@@ -6741,6 -6720,15 +6744,15 @@@ L: linux-input(a)vger.kernel.or
S: Maintained
F: drivers/input/touchscreen/goodix.c
+ GOOGLE ETHERNET DRIVERS
+ M: Catherine Sullivan <csully(a)google.com>
+ R: Sagi Shahar <sagis(a)google.com>
+ R: Jon Olson <jonolson(a)google.com>
+ L: netdev(a)vger.kernel.org
+ S: Supported
+ F: Documentation/networking/device_drivers/google/gve.txt
+ F: drivers/net/ethernet/google
+
GPD POCKET FAN DRIVER
M: Hans de Goede <hdegoede(a)redhat.com>
L: platform-driver-x86(a)vger.kernel.org
@@@ -7034,7 -7022,7 +7046,7 @@@ F: drivers/media/usb/hdpvr
HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
M: Jerry Hoemann <jerry.hoemann(a)hpe.com>
S: Supported
-F: Documentation/watchdog/hpwdt.txt
+F: Documentation/watchdog/hpwdt.rst
F: drivers/watchdog/hpwdt.c
HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
@@@ -7217,7 -7205,7 +7229,7 @@@ F: drivers/net/ethernet/hp/hp100.
HPET: High Precision Event Timers driver
M: Clemens Ladisch <clemens(a)ladisch.de>
S: Maintained
-F: Documentation/timers/hpet.txt
+F: Documentation/timers/hpet.rst
F: drivers/char/hpet.c
F: include/linux/hpet.h
F: include/uapi/linux/hpet.h
@@@ -7635,7 -7623,7 +7647,7 @@@ IDE/ATAPI DRIVER
M: Borislav Petkov <bp(a)alien8.de>
L: linux-ide(a)vger.kernel.org
S: Maintained
-F: Documentation/cdrom/ide-cd
+F: Documentation/cdrom/ide-cd.rst
F: drivers/ide/ide-cd*
IDEAPAD LAPTOP EXTRAS DRIVER
@@@ -7826,34 -7814,7 +7838,34 @@@ INGENIC JZ4780 NAND DRIVE
M: Harvey Hunt <harveyhuntnexus(a)gmail.com>
L: linux-mtd(a)lists.infradead.org
S: Maintained
-F: drivers/mtd/nand/raw/jz4780_*
+F: drivers/mtd/nand/raw/ingenic/
+
+INGENIC JZ47xx SoCs
+M: Paul Cercueil <paul(a)crapouillou.net>
+S: Maintained
+F: arch/mips/boot/dts/ingenic/
+F: arch/mips/include/asm/mach-jz4740/
+F: arch/mips/jz4740/
+F: drivers/clk/ingenic/
+F: drivers/dma/dma-jz4780.c
+F: drivers/gpu/drm/ingenic/
+F: drivers/i2c/busses/i2c-jz4780.c
+F: drivers/iio/adc/ingenic-adc.c
+F: drivers/irqchip/irq-ingenic.c
+F: drivers/memory/jz4780-nemc.c
+F: drivers/mmc/host/jz4740_mmc.c
+F: drivers/mtd/nand/raw/ingenic/
+F: drivers/pinctrl/pinctrl-ingenic.c
+F: drivers/power/supply/ingenic-battery.c
+F: drivers/pwm/pwm-jz4740.c
+F: drivers/rtc/rtc-jz4740.c
+F: drivers/tty/serial/8250/8250_ingenic.c
+F: drivers/usb/musb/jz4740.c
+F: drivers/watchdog/jz4740_wdt.c
+F: include/dt-bindings/iio/adc/ingenic,adc.h
+F: include/linux/mfd/ingenic-tcu.h
+F: sound/soc/jz4740/
+F: sound/soc/codecs/jz47*
INOTIFY
M: Jan Kara <jack(a)suse.cz>
@@@ -7975,7 -7936,7 +7987,7 @@@ INTEL FRAMEBUFFER DRIVER (excluding 81
M: Maik Broemme <mbroemme(a)libmpq.org>
L: linux-fbdev(a)vger.kernel.org
S: Maintained
-F: Documentation/fb/intelfb.txt
+F: Documentation/fb/intelfb.rst
F: drivers/video/fbdev/intelfb/
INTEL GPIO DRIVERS
@@@ -8427,18 -8388,26 +8439,26 @@@ S: Supporte
W: http://www.linux-iscsi.org
F: drivers/infiniband/ulp/isert
- ISDN SUBSYSTEM
+ ISDN/mISDN SUBSYSTEM
M: Karsten Keil <isdn(a)linux-pingi.de>
L: isdn4linux(a)listserv.isdn4linux.de (subscribers-only)
L: netdev(a)vger.kernel.org
W: http://www.isdn4linux.de
- T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git
S: Maintained
+ F: drivers/isdn/mISDN
+ F: drivers/isdn/hardware
+
+ ISDN/CAPI SUBSYSTEM
+ M: Karsten Keil <isdn(a)linux-pingi.de>
+ L: isdn4linux(a)listserv.isdn4linux.de (subscribers-only)
+ L: netdev(a)vger.kernel.org
+ W: http://www.isdn4linux.de
+ S: Odd Fixes
F: Documentation/isdn/
- F: drivers/isdn/
- F: include/linux/isdn.h
+ F: drivers/isdn/capi/
+ F: drivers/staging/isdn/
+ F: net/bluetooth/cmtp/
F: include/linux/isdn/
- F: include/uapi/linux/isdn.h
F: include/uapi/linux/isdn/
IT87 HARDWARE MONITORING DRIVER
@@@ -9688,17 -9657,6 +9708,17 @@@ L: linux-iio(a)vger.kernel.or
S: Maintained
F: drivers/iio/dac/cio-dac.c
+MEDIA CONTROLLER FRAMEWORK
+M: Sakari Ailus <sakari.ailus(a)linux.intel.com>
+M: Laurent Pinchart <laurent.pinchart(a)ideasonboard.com>
+L: linux-media(a)vger.kernel.org
+W: https://www.linuxtv.org
+T: git git://linuxtv.org/media_tree.git
+S: Supported
+F: drivers/media/mc/
+F: include/media/media-*.h
+F: include/uapi/linux/media.h
+
MEDIA DRIVERS FOR ASCOT2E
M: Sergey Kozlov <serjk(a)netup.ru>
M: Abylay Ospan <aospan(a)netup.ru>
@@@ -10020,13 -9978,6 +10040,13 @@@ L: linux-wireless(a)vger.kernel.or
S: Maintained
F: drivers/net/wireless/mediatek/mt7601u/
+MEDIATEK MT7621/28/88 I2C DRIVER
+M: Stefan Roese <sr(a)denx.de>
+L: linux-i2c(a)vger.kernel.org
+S: Maintained
+F: drivers/i2c/busses/i2c-mt7621.c
+F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt
+
MEDIATEK NAND CONTROLLER DRIVER
M: Xiaolei Li <xiaolei.li(a)mediatek.com>
L: linux-mtd(a)lists.infradead.org
@@@ -10172,6 -10123,7 +10192,7 @@@ Q: http://patchwork.ozlabs.org/project/
S: Supported
F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
+ F: Documentation/networking/device_drivers/mellanox/
MELLANOX MLX5 IB driver
M: Leon Romanovsky <leonro(a)mellanox.com>
@@@ -10282,7 -10234,7 +10303,7 @@@ F: drivers/watchdog/menz69_wdt.
MESON AO CEC DRIVER FOR AMLOGIC SOCS
M: Neil Armstrong <narmstrong(a)baylibre.com>
-L: linux-media(a)lists.freedesktop.org
+L: linux-media(a)vger.kernel.org
L: linux-amlogic(a)lists.infradead.org
W: http://linux-meson.com/
S: Supported
@@@ -10298,14 -10250,6 +10319,14 @@@ S: Maintaine
F: drivers/mtd/nand/raw/meson_*
F: Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
+MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS
+M: Maxime Jourdan <mjourdan(a)baylibre.com>
+L: linux-media(a)vger.kernel.org
+L: linux-amlogic(a)lists.infradead.org
+S: Supported
+F: drivers/staging/media/meson/vdec/
+T: git git://linuxtv.org/media_tree.git
+
METHODE UDPU SUPPORT
M: Vladimir Vid <vladimir.vid(a)sartura.hr>
S: Maintained
@@@ -10359,9 -10303,7 +10380,9 @@@ MICROCHIP ISC DRIVE
M: Eugen Hristev <eugen.hristev(a)microchip.com>
L: linux-media(a)vger.kernel.org
S: Supported
-F: drivers/media/platform/atmel/atmel-isc.c
+F: drivers/media/platform/atmel/atmel-sama5d2-isc.c
+F: drivers/media/platform/atmel/atmel-isc.h
+F: drivers/media/platform/atmel/atmel-isc-base.c
F: drivers/media/platform/atmel/atmel-isc-regs.h
F: Documentation/devicetree/bindings/media/atmel-isc.txt
@@@ -10921,6 -10863,14 +10942,6 @@@ F: driver/net/net_failover.
F: include/net/net_failover.h
F: Documentation/networking/net_failover.rst
-NETEFFECT IWARP RNIC DRIVER (IW_NES)
-M: Faisal Latif <faisal.latif(a)intel.com>
-L: linux-rdma(a)vger.kernel.org
-W: http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-ClusterâĻ
-S: Supported
-F: drivers/infiniband/hw/nes/
-F: include/uapi/rdma/nes-abi.h
-
NETEM NETWORK EMULATOR
M: Stephen Hemminger <stephen(a)networkplumber.org>
L: netem(a)lists.linux-foundation.org (moderated for non-subscribers)
@@@ -10937,7 -10887,7 +10958,7 @@@ F: drivers/net/ethernet/neterion
NETFILTER
M: Pablo Neira Ayuso <pablo(a)netfilter.org>
- M: Jozsef Kadlecsik <kadlec(a)blackhole.kfki.hu>
+ M: Jozsef Kadlecsik <kadlec(a)netfilter.org>
M: Florian Westphal <fw(a)strlen.de>
L: netfilter-devel(a)vger.kernel.org
L: coreteam(a)netfilter.org
@@@ -11150,6 -11100,15 +11171,15 @@@ L: netdev(a)vger.kernel.or
S: Supported
F: drivers/net/ethernet/qlogic/netxen/
+ NEXTHOP
+ M: David Ahern <dsahern(a)kernel.org>
+ L: netdev(a)vger.kernel.org
+ S: Maintained
+ F: include/net/nexthop.h
+ F: include/uapi/linux/nexthop.h
+ F: include/net/netns/nexthop.h
+ F: net/ipv4/nexthop.c
+
NFC SUBSYSTEM
L: netdev(a)vger.kernel.org
S: Orphan
@@@ -11353,7 -11312,7 +11383,7 @@@ NXP FXAS21002C DRIVE
M: Rui Miguel Silva <rmfrfs(a)gmail.com>
L: linux-iio(a)vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/iio/gyroscope/fxas21002c.txt
+F: Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.txt
F: drivers/iio/gyro/fxas21002c_core.c
F: drivers/iio/gyro/fxas21002c.h
F: drivers/iio/gyro/fxas21002c_i2c.c
@@@ -11755,13 -11714,11 +11785,13 @@@ F: drivers/scsi/st.
OP-TEE DRIVER
M: Jens Wiklander <jens.wiklander(a)linaro.org>
+L: tee-dev(a)lists.linaro.org
S: Maintained
F: drivers/tee/optee/
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
M: Sumit Garg <sumit.garg(a)linaro.org>
+L: tee-dev(a)lists.linaro.org
S: Maintained
F: drivers/char/hw_random/optee-rng.c
@@@ -11848,7 -11805,7 +11878,7 @@@ S: Maintaine
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
F: drivers/opp/
F: include/linux/pm_opp.h
-F: Documentation/power/opp.txt
+F: Documentation/power/opp.rst
F: Documentation/devicetree/bindings/opp/
OPL4 DRIVER
@@@ -12227,7 -12184,7 +12257,7 @@@ M: Sam Bobroff <sbobroff(a)linux.ibm.com
M: Oliver O'Halloran <oohall(a)gmail.com>
L: linuxppc-dev(a)lists.ozlabs.org
S: Supported
-F: Documentation/PCI/pci-error-recovery.txt
+F: Documentation/PCI/pci-error-recovery.rst
F: drivers/pci/pcie/aer.c
F: drivers/pci/pcie/dpc.c
F: drivers/pci/pcie/err.c
@@@ -12240,7 -12197,7 +12270,7 @@@ PCI ERROR RECOVER
M: Linas Vepstas <linasvepstas(a)gmail.com>
L: linux-pci(a)vger.kernel.org
S: Supported
-F: Documentation/PCI/pci-error-recovery.txt
+F: Documentation/PCI/pci-error-recovery.rst
PCI MSI DRIVER FOR ALTERA MSI IP
M: Ley Foon Tan <lftan(a)altera.com>
@@@ -12733,7 -12690,7 +12763,7 @@@ M: Rodolfo Giometti <giometti@enneenne.
W: http://wiki.enneenne.com/index.php/LinuxPPS_support
L: linuxpps(a)ml.enneenne.com (subscribers-only)
S: Maintained
-F: Documentation/pps/
+F: Documentation/driver-api/pps.rst
F: Documentation/devicetree/bindings/pps/pps-gpio.txt
F: Documentation/ABI/testing/sysfs-pps
F: drivers/pps/
@@@ -12839,7 -12796,7 +12869,7 @@@ L: netdev(a)vger.kernel.or
S: Maintained
W: http://linuxptp.sourceforge.net/
F: Documentation/ABI/testing/sysfs-ptp
-F: Documentation/ptp/*
+F: Documentation/driver-api/ptp.rst
F: drivers/net/phy/dp83640*
F: drivers/ptp/*
F: include/linux/ptp_cl*
@@@ -12853,6 -12810,7 +12883,6 @@@ F: include/linux/regset.
F: include/linux/tracehook.h
F: include/uapi/linux/ptrace.h
F: include/uapi/linux/ptrace.h
-F: include/asm-generic/ptrace.h
F: kernel/ptrace.c
F: arch/*/ptrace*.c
F: arch/*/*/ptrace*.c
@@@ -13126,7 -13084,7 +13156,7 @@@ M: Niklas Cassel <niklas.cassel@linaro.
L: netdev(a)vger.kernel.org
S: Maintained
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
-F: Documentation/devicetree/bindings/net/qcom,dwmac.txt
+F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
QUALCOMM GENERIC INTERFACE I2C DRIVER
M: Alok Chauhan <alokc(a)codeaurora.org>
@@@ -13549,7 -13507,7 +13579,7 @@@ RISC-V ARCHITECTUR
M: Palmer Dabbelt <palmer(a)sifive.com>
M: Albert Ou <aou(a)eecs.berkeley.edu>
L: linux-riscv(a)lists.infradead.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/palmer/riscv-linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
S: Supported
F: arch/riscv/
K: riscv
@@@ -13570,11 -13528,11 +13600,11 @@@ S: Maintaine
F: drivers/media/platform/rockchip/rga/
F: Documentation/devicetree/bindings/media/rockchip-rga.txt
-ROCKCHIP VPU CODEC DRIVER
+HANTRO VPU CODEC DRIVER
M: Ezequiel Garcia <ezequiel(a)collabora.com>
L: linux-media(a)vger.kernel.org
S: Maintained
-F: drivers/staging/media/platform/rockchip/vpu/
+F: drivers/staging/media/platform/hantro/
F: Documentation/devicetree/bindings/media/rockchip-vpu.txt
ROCKER DRIVER
@@@ -13775,7 -13733,7 +13805,7 @@@ L: linux-s390(a)vger.kernel.or
L: kvm(a)vger.kernel.org
S: Supported
F: drivers/s390/cio/vfio_ccw*
-F: Documentation/s390/vfio-ccw.txt
+F: Documentation/s390/vfio-ccw.rst
F: include/uapi/linux/vfio_ccw.h
S390 ZCRYPT DRIVER
@@@ -13795,7 -13753,7 +13825,7 @@@ S: Supporte
F: drivers/s390/crypto/vfio_ap_drv.c
F: drivers/s390/crypto/vfio_ap_private.h
F: drivers/s390/crypto/vfio_ap_ops.c
-F: Documentation/s390/vfio-ap.txt
+F: Documentation/s390/vfio-ap.rst
S390 ZFCP DRIVER
M: Steffen Maier <maier(a)linux.ibm.com>
@@@ -14405,7 -14363,7 +14435,7 @@@ M: Paul Walmsley <paul.walmsley@sifive.
L: linux-riscv(a)lists.infradead.org
T: git git://github.com/sifive/riscv-linux.git
S: Supported
-K: sifive
+K: [^@]sifive
N: sifive
SIFIVE FU540 SYSTEM-ON-CHIP
@@@ -14432,7 -14390,7 +14462,7 @@@ M: Sudip Mukherjee <sudip.mukherjee@cod
L: linux-fbdev(a)vger.kernel.org
S: Maintained
F: drivers/video/fbdev/sm712*
-F: Documentation/fb/sm712fb.txt
+F: Documentation/fb/sm712fb.rst
SIMPLE FIRMWARE INTERFACE (SFI)
M: Len Brown <lenb(a)kernel.org>
@@@ -14502,7 -14460,7 +14532,7 @@@ SIS FRAMEBUFFER DRIVE
M: Thomas Winischhofer <thomas(a)winischhofer.net>
W: http://www.winischhofer.net/linuxsisvga.shtml
S: Maintained
-F: Documentation/fb/sisfb.txt
+F: Documentation/fb/sisfb.rst
F: drivers/video/fbdev/sis/
F: include/video/sisfb.h
@@@ -15538,7 -15496,6 +15568,7 @@@ F: include/media/i2c/tw9910.
TEE SUBSYSTEM
M: Jens Wiklander <jens.wiklander(a)linaro.org>
+L: tee-dev(a)lists.linaro.org
S: Maintained
F: include/linux/tee_drv.h
F: include/uapi/linux/tee.h
@@@ -15568,7 -15525,6 +15598,7 @@@ F: drivers/dma/tegra
TEGRA I2C DRIVER
M: Laxman Dewangan <ldewangan(a)nvidia.com>
+R: Dmitry Osipenko <digetx(a)gmail.com>
S: Supported
F: drivers/i2c/busses/i2c-tegra.c
@@@ -15694,7 -15650,7 +15724,7 @@@ M: Viresh Kumar <viresh.kumar(a)linaro.or
M: Javi Merino <javi.merino(a)kernel.org>
L: linux-pm(a)vger.kernel.org
S: Supported
-F: Documentation/thermal/cpu-cooling-api.txt
+F: Documentation/thermal/cpu-cooling-api.rst
F: drivers/thermal/cpu_cooling.c
F: include/linux/cpu_cooling.h
@@@ -16393,7 -16349,7 +16423,7 @@@ M: Benjamin Tissoires <benjamin.tissoir
L: linux-usb(a)vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
S: Maintained
-F: Documentation/hid/hiddev.txt
+F: Documentation/hid/hiddev.rst
F: drivers/hid/usbhid/
USB INTEL XHCI ROLE MUX DRIVER
@@@ -16692,7 -16648,7 +16722,7 @@@ M: Michal Januszewski <spock(a)gentoo.org
L: linux-fbdev(a)vger.kernel.org
W: https://github.com/mjanusz/v86d
S: Maintained
-F: Documentation/fb/uvesafb.txt
+F: Documentation/fb/uvesafb.rst
F: drivers/video/fbdev/uvesafb.*
VF610 NAND DRIVER
@@@ -16767,7 -16723,7 +16797,7 @@@ S: Maintaine
F: drivers/net/ethernet/via/via-velocity.*
VICODEC VIRTUAL CODEC DRIVER
-M: Hans Verkuil <hans.verkuil(a)cisco.com>
+M: Hans Verkuil <hverkuil-cisco(a)xs4all.nl>
L: linux-media(a)vger.kernel.org
T: git git://linuxtv.org/media_tree.git
W: https://linuxtv.org
@@@ -16790,7 -16746,6 +16820,7 @@@ VIDEOBUF2 FRAMEWOR
M: Pawel Osciak <pawel(a)osciak.com>
M: Marek Szyprowski <m.szyprowski(a)samsung.com>
M: Kyungmin Park <kyungmin.park(a)samsung.com>
+R: Tomasz Figa <tfiga(a)chromium.org>
L: linux-media(a)vger.kernel.org
S: Maintained
F: drivers/media/common/videobuf2/*
@@@ -17349,7 -17304,6 +17379,7 @@@ N: xd
XDP SOCKETS (AF_XDP)
M: BjÃļrn TÃļpel <bjorn.topel(a)intel.com>
M: Magnus Karlsson <magnus.karlsson(a)intel.com>
+R: Jonathan Lemon <jonathan.lemon(a)gmail.com>
L: netdev(a)vger.kernel.org
L: bpf(a)vger.kernel.org
S: Maintained
@@@ -17443,13 -17397,7 +17473,13 @@@ W: http://xfs.org
T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
S: Supported
F: Documentation/filesystems/xfs.txt
+F: Documentation/ABI/testing/sysfs-fs-xfs
+F: Documentation/filesystems/xfs.txt
+F: Documentation/filesystems/xfs-delayed-logging-design.txt
+F: Documentation/filesystems/xfs-self-describing-metadata.txt
F: fs/xfs/
+F: include/uapi/linux/dqblk_xfs.h
+F: include/uapi/linux/fsmap.h
XILINX AXI ETHERNET DRIVER
M: Anirudha Sarangi <anirudh(a)xilinx.com>
diff --combined arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index e83c080acc8b,22a1c74dddf3..7975519b4f56
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@@ -70,27 -70,6 +70,27 @@@
clock-output-names = "sysclk";
};
+ dpclk: clock-dp {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ clock-output-names= "dpclk";
+ };
+
+ aclk: clock-axi {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <650000000>;
+ clock-output-names= "aclk";
+ };
+
+ pclk: clock-apb {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <650000000>;
+ clock-output-names= "pclk";
+ };
+
reboot {
compatible ="syscon-reboot";
regmap = <&dcfg>;
@@@ -306,24 -285,13 +306,24 @@@
#interrupt-cells = <2>;
};
- wdog0: watchdog@23c0000 {
- compatible = "fsl,ls1028a-wdt", "fsl,imx21-wdt";
- reg = <0x0 0x23c0000 0x0 0x10000>;
- interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clockgen 4 1>;
- big-endian;
- status = "disabled";
+ usb0: usb@3100000 {
+ compatible = "fsl,ls1028a-dwc3", "snps,dwc3";
+ reg = <0x0 0x3100000 0x0 0x10000>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "host";
+ snps,dis_rxdet_inp3_quirk;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
+ };
+
+ usb1: usb@3110000 {
+ compatible = "fsl,ls1028a-dwc3", "snps,dwc3";
+ reg = <0x0 0x3110000 0x0 0x10000>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ dr_mode = "host";
+ snps,dis_rxdet_inp3_quirk;
+ snps,quirk-frame-length-adjustment = <0x20>;
+ snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>;
};
sata: sata@3200000 {
@@@ -388,79 -356,6 +388,79 @@@
<GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
};
+ crypto: crypto@8000000 {
+ compatible = "fsl,sec-v5.0", "fsl,sec-v4.0";
+ fsl,sec-era = <10>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0x8000000 0x100000>;
+ reg = <0x00 0x8000000 0x0 0x100000>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
+
+ sec_jr0: jr@10000 {
+ compatible = "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x10000 0x10000>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr1: jr@20000 {
+ compatible = "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x20000 0x10000>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr2: jr@30000 {
+ compatible = "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x30000 0x10000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ sec_jr3: jr@40000 {
+ compatible = "fsl,sec-v5.0-job-ring",
+ "fsl,sec-v4.0-job-ring";
+ reg = <0x40000 0x10000>;
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ qdma: dma-controller@8380000 {
+ compatible = "fsl,ls1028a-qdma", "fsl,ls1021a-qdma";
+ reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */
+ <0x0 0x8390000 0x0 0x10000>, /* Status regs */
+ <0x0 0x83a0000 0x0 0x40000>; /* Block regs */
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "qdma-error", "qdma-queue0",
+ "qdma-queue1", "qdma-queue2", "qdma-queue3";
+ dma-channels = <8>;
+ block-number = <1>;
+ block-offset = <0x10000>;
+ fsl,dma-queues = <2>;
+ status-sizes = <64>;
+ queue-sizes = <64 64>;
+ };
+
+ cluster1_core0_watchdog: watchdog@c000000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0 0xc000000 0x0 0x1000>;
+ clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
+ cluster1_core1_watchdog: watchdog@c010000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0 0xc010000 0x0 0x1000>;
+ clocks = <&clockgen 4 15>, <&clockgen 4 15>;
+ clock-names = "apb_pclk", "wdog_clk";
+ };
+
sai1: audio-controller@f100000 {
#sound-dai-cells = <0>;
compatible = "fsl,vf610-sai";
@@@ -536,23 -431,12 +536,29 @@@
compatible = "fsl,enetc";
reg = <0x000100 0 0 0 0>;
};
+ ethernet@0,4 {
+ compatible = "fsl,enetc-ptp";
+ reg = <0x000400 0 0 0 0>;
+ clocks = <&clockgen 4 0>;
+ little-endian;
+ };
};
};
+
+ malidp0: display@f080000 {
+ compatible = "arm,mali-dp500";
+ reg = <0x0 0xf080000 0x0 0x10000>;
+ interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
+ <0 223 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "DE", "SE";
+ clocks = <&dpclk>, <&aclk>, <&aclk>, <&pclk>;
+ clock-names = "pxlclk", "mclk", "aclk", "pclk";
+ arm,malidp-output-port-lines = /bits/ 8 <8 8 8>;
+
+ port {
+ dp0_out: endpoint {
+
+ };
+ };
+ };
};
diff --combined arch/mips/configs/malta_defconfig
index c9c4145c6fc0,0de92ac1ca64..59eedf55419d
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@@ -210,11 -210,11 +210,10 @@@ CONFIG_NET_ACT_NAT=
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
- CONFIG_NET_CLS_IND=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_MESH=y
CONFIG_RFKILL=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=m
CONFIG_MTD=y
diff --combined arch/mips/configs/malta_kvm_defconfig
index 841f19adaec7,efc3abace048..8ef612552a19
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@@ -215,11 -215,11 +215,10 @@@ CONFIG_NET_ACT_NAT=
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
- CONFIG_NET_CLS_IND=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_MESH=y
CONFIG_RFKILL=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=m
CONFIG_MTD=y
diff --combined arch/mips/configs/malta_kvm_guest_defconfig
index 764ba62f7a5c,c6ceeca4394d..d2a008c9907c
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@@ -212,11 -212,11 +212,10 @@@ CONFIG_NET_ACT_NAT=
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
- CONFIG_NET_CLS_IND=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_MESH=y
CONFIG_RFKILL=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=m
CONFIG_MTD=y
diff --combined arch/mips/configs/maltaup_xpa_defconfig
index de5bb1c9aeb8,56861aef2756..970df6d42728
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@@ -212,11 -212,11 +212,10 @@@ CONFIG_NET_ACT_NAT=
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
- CONFIG_NET_CLS_IND=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_MAC80211_MESH=y
CONFIG_RFKILL=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_CONNECTOR=m
diff --combined arch/mips/configs/rb532_defconfig
index 97d96117e11a,864c70fbe668..5b947183852b
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@@ -103,8 -103,8 +103,7 @@@ CONFIG_GACT_PROB=
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_PEDIT=m
- CONFIG_NET_CLS_IND=y
CONFIG_HAMRADIO=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_BLOCK2MTD=y
diff --combined arch/powerpc/configs/ppc6xx_defconfig
index 0e09c39afd52,aa51b9b66fa2..9dca4cffa623
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@@ -301,7 -301,6 +301,6 @@@ CONFIG_NET_ACT_NAT=
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
- CONFIG_NET_CLS_IND=y
CONFIG_IRDA=m
CONFIG_IRLAN=m
CONFIG_IRNET=m
@@@ -346,6 -345,7 +345,6 @@@ CONFIG_MAC80211_LEDS=
CONFIG_MAC80211_DEBUGFS=y
CONFIG_NET_9P=m
CONFIG_NET_9P_VIRTIO=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEBUG_DEVRES=y
CONFIG_CONNECTOR=y
CONFIG_PARPORT=m
@@@ -1123,7 -1123,6 +1122,7 @@@ CONFIG_NLS_KOI8_R=
CONFIG_NLS_KOI8_U=m
CONFIG_DEBUG_INFO=y
CONFIG_UNUSED_SYMBOLS=y
+CONFIG_HEADERS_INSTALL=y
CONFIG_HEADERS_CHECK=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
@@@ -1148,6 -1147,7 +1147,6 @@@ CONFIG_FAIL_MAKE_REQUEST=
CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
-CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
diff --combined arch/sh/configs/se7712_defconfig
index 6ac7d362e106,1e116529735f..9a527f978106
--- a/arch/sh/configs/se7712_defconfig
+++ b/arch/sh/configs/se7712_defconfig
@@@ -63,7 -63,7 +63,6 @@@ CONFIG_NET_SCH_NETEM=
CONFIG_NET_CLS_TCINDEX=y
CONFIG_NET_CLS_ROUTE4=y
CONFIG_NET_CLS_FW=y
- CONFIG_NET_CLS_IND=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
diff --combined arch/sh/configs/se7721_defconfig
index ffd15acc2a04,c66e512719ab..3b0e1eb6e874
--- a/arch/sh/configs/se7721_defconfig
+++ b/arch/sh/configs/se7721_defconfig
@@@ -62,7 -62,7 +62,6 @@@ CONFIG_NET_SCH_NETEM=
CONFIG_NET_CLS_TCINDEX=y
CONFIG_NET_CLS_ROUTE4=y
CONFIG_NET_CLS_FW=y
- CONFIG_NET_CLS_IND=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
diff --combined arch/sh/configs/titan_defconfig
index 1c1c78e74fbb,171ab05ce4fc..4ec961ace688
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@@ -142,7 -142,7 +142,6 @@@ CONFIG_GACT_PROB=
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_PEDIT=m
- CONFIG_NET_CLS_IND=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_FW_LOADER=m
CONFIG_CONNECTOR=m
CONFIG_MTD=m
diff --combined arch/x86/net/bpf_jit_comp32.c
index 1d12d2174085,133433d181ba..393d251798c0
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@@ -253,13 -253,14 +253,14 @@@ static inline void emit_ia32_mov_r(cons
/* dst = src */
static inline void emit_ia32_mov_r64(const bool is64, const u8 dst[],
const u8 src[], bool dstk,
- bool sstk, u8 **pprog)
+ bool sstk, u8 **pprog,
+ const struct bpf_prog_aux *aux)
{
emit_ia32_mov_r(dst_lo, src_lo, dstk, sstk, pprog);
if (is64)
/* complete 8 byte move */
emit_ia32_mov_r(dst_hi, src_hi, dstk, sstk, pprog);
- else
+ else if (!aux->verifier_zext)
/* zero out high 4 bytes */
emit_ia32_mov_i(dst_hi, 0, dstk, pprog);
}
@@@ -313,7 -314,8 +314,8 @@@ static inline void emit_ia32_mul_r(cons
}
static inline void emit_ia32_to_le_r64(const u8 dst[], s32 val,
- bool dstk, u8 **pprog)
+ bool dstk, u8 **pprog,
+ const struct bpf_prog_aux *aux)
{
u8 *prog = *pprog;
int cnt = 0;
@@@ -334,12 -336,14 +336,14 @@@
*/
EMIT2(0x0F, 0xB7);
EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo));
- /* xor dreg_hi,dreg_hi */
- EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
+ if (!aux->verifier_zext)
+ /* xor dreg_hi,dreg_hi */
+ EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
break;
case 32:
- /* xor dreg_hi,dreg_hi */
- EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
+ if (!aux->verifier_zext)
+ /* xor dreg_hi,dreg_hi */
+ EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
break;
case 64:
/* nop */
@@@ -358,7 -362,8 +362,8 @@@
}
static inline void emit_ia32_to_be_r64(const u8 dst[], s32 val,
- bool dstk, u8 **pprog)
+ bool dstk, u8 **pprog,
+ const struct bpf_prog_aux *aux)
{
u8 *prog = *pprog;
int cnt = 0;
@@@ -380,16 -385,18 +385,18 @@@
EMIT2(0x0F, 0xB7);
EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo));
- /* xor dreg_hi,dreg_hi */
- EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
+ if (!aux->verifier_zext)
+ /* xor dreg_hi,dreg_hi */
+ EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
break;
case 32:
/* Emit 'bswap eax' to swap lower 4 bytes */
EMIT1(0x0F);
EMIT1(add_1reg(0xC8, dreg_lo));
- /* xor dreg_hi,dreg_hi */
- EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
+ if (!aux->verifier_zext)
+ /* xor dreg_hi,dreg_hi */
+ EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
break;
case 64:
/* Emit 'bswap eax' to swap lower 4 bytes */
@@@ -569,7 -576,7 +576,7 @@@ static inline void emit_ia32_alu_r(cons
static inline void emit_ia32_alu_r64(const bool is64, const u8 op,
const u8 dst[], const u8 src[],
bool dstk, bool sstk,
- u8 **pprog)
+ u8 **pprog, const struct bpf_prog_aux *aux)
{
u8 *prog = *pprog;
@@@ -577,7 -584,7 +584,7 @@@
if (is64)
emit_ia32_alu_r(is64, true, op, dst_hi, src_hi, dstk, sstk,
&prog);
- else
+ else if (!aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
*pprog = prog;
}
@@@ -668,7 -675,8 +675,8 @@@ static inline void emit_ia32_alu_i(cons
/* ALU operation (64 bit) */
static inline void emit_ia32_alu_i64(const bool is64, const u8 op,
const u8 dst[], const u32 val,
- bool dstk, u8 **pprog)
+ bool dstk, u8 **pprog,
+ const struct bpf_prog_aux *aux)
{
u8 *prog = *pprog;
u32 hi = 0;
@@@ -679,7 -687,7 +687,7 @@@
emit_ia32_alu_i(is64, false, op, dst_lo, val, dstk, &prog);
if (is64)
emit_ia32_alu_i(is64, true, op, dst_hi, hi, dstk, &prog);
- else
+ else if (!aux->verifier_zext)
emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
*pprog = prog;
@@@ -724,6 -732,9 +732,6 @@@ static inline void emit_ia32_lsh_r64(co
{
u8 *prog = *pprog;
int cnt = 0;
- static int jmp_label1 = -1;
- static int jmp_label2 = -1;
- static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@@@ -742,22 -753,78 +750,22 @@@
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
- /* cmp ecx,32 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
- /* Jumps when >= 32 */
- if (is_imm8(jmp_label(jmp_label1, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
-
- /* < 32 */
- /* shl dreg_hi,cl */
- EMIT2(0xD3, add_1reg(0xE0, dreg_hi));
- /* mov ebx,dreg_lo */
- EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
+ /* shld dreg_hi,dreg_lo,cl */
+ EMIT3(0x0F, 0xA5, add_2reg(0xC0, dreg_hi, dreg_lo));
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
- /* IA32_ECX = -IA32_ECX + 32 */
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shr ebx,cl */
- EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
- /* or dreg_hi,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
-
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 32 */
- if (jmp_label1 == -1)
- jmp_label1 = cnt;
+ /* if ecx >= 32, mov dreg_lo into dreg_hi and clear dreg_lo */
- /* cmp ecx,64 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
- /* Jumps when >= 64 */
- if (is_imm8(jmp_label(jmp_label2, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
+ /* cmp ecx,32 */
+ EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+ /* skip the next two instructions (4 bytes) when < 32 */
+ EMIT2(IA32_JB, 4);
- /* >= 32 && < 64 */
- /* sub ecx,32 */
- EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
- /* shl dreg_lo,cl */
- EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* mov dreg_hi,dreg_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
-
- /* xor dreg_lo,dreg_lo */
- EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
-
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 64 */
- if (jmp_label2 == -1)
- jmp_label2 = cnt;
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
- /* xor dreg_hi,dreg_hi */
- EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
-
- if (jmp_label3 == -1)
- jmp_label3 = cnt;
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
@@@ -777,6 -844,9 +785,6 @@@ static inline void emit_ia32_arsh_r64(c
{
u8 *prog = *pprog;
int cnt = 0;
- static int jmp_label1 = -1;
- static int jmp_label2 = -1;
- static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@@@ -795,22 -865,78 +803,22 @@@
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
- /* cmp ecx,32 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
- /* Jumps when >= 32 */
- if (is_imm8(jmp_label(jmp_label1, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
-
- /* < 32 */
- /* lshr dreg_lo,cl */
- EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
- /* mov ebx,dreg_hi */
- EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
- /* ashr dreg_hi,cl */
+ /* shrd dreg_lo,dreg_hi,cl */
+ EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
+ /* sar dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
- /* IA32_ECX = -IA32_ECX + 32 */
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shl ebx,cl */
- EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
- /* or dreg_lo,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
-
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 32 */
- if (jmp_label1 == -1)
- jmp_label1 = cnt;
+ /* if ecx >= 32, mov dreg_hi to dreg_lo and set/clear dreg_hi depending on sign */
- /* cmp ecx,64 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
- /* Jumps when >= 64 */
- if (is_imm8(jmp_label(jmp_label2, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
+ /* cmp ecx,32 */
+ EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+ /* skip the next two instructions (5 bytes) when < 32 */
+ EMIT2(IA32_JB, 5);
- /* >= 32 && < 64 */
- /* sub ecx,32 */
- EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
- /* ashr dreg_hi,cl */
- EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
-
- /* ashr dreg_hi,imm8 */
- EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
-
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 64 */
- if (jmp_label2 == -1)
- jmp_label2 = cnt;
- /* ashr dreg_hi,imm8 */
+ /* sar dreg_hi,31 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
- /* mov dreg_lo,dreg_hi */
- EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
-
- if (jmp_label3 == -1)
- jmp_label3 = cnt;
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
@@@ -830,6 -956,9 +838,6 @@@ static inline void emit_ia32_rsh_r64(co
{
u8 *prog = *pprog;
int cnt = 0;
- static int jmp_label1 = -1;
- static int jmp_label2 = -1;
- static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@@@ -848,23 -977,77 +856,23 @@@
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
- /* cmp ecx,32 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
- /* Jumps when >= 32 */
- if (is_imm8(jmp_label(jmp_label1, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
-
- /* < 32 */
- /* lshr dreg_lo,cl */
- EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
- /* mov ebx,dreg_hi */
- EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
+ /* shrd dreg_lo,dreg_hi,cl */
+ EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
- /* IA32_ECX = -IA32_ECX + 32 */
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shl ebx,cl */
- EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
- /* or dreg_lo,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
+ /* if ecx >= 32, mov dreg_hi to dreg_lo and clear dreg_hi */
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 32 */
- if (jmp_label1 == -1)
- jmp_label1 = cnt;
- /* cmp ecx,64 */
- EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
- /* Jumps when >= 64 */
- if (is_imm8(jmp_label(jmp_label2, 2)))
- EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
- else
- EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
+ /* cmp ecx,32 */
+ EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
+ /* skip the next two instructions (4 bytes) when < 32 */
+ EMIT2(IA32_JB, 4);
- /* >= 32 && < 64 */
- /* sub ecx,32 */
- EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
- /* shr dreg_hi,cl */
- EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
- /* goto out; */
- if (is_imm8(jmp_label(jmp_label3, 2)))
- EMIT2(0xEB, jmp_label(jmp_label3, 2));
- else
- EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
-
- /* >= 64 */
- if (jmp_label2 == -1)
- jmp_label2 = cnt;
- /* xor dreg_lo,dreg_lo */
- EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
- /* xor dreg_hi,dreg_hi */
- EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
-
- if (jmp_label3 == -1)
- jmp_label3 = cnt;
-
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@@@ -894,10 -1077,27 +902,10 @@@ static inline void emit_ia32_lsh_i64(co
}
/* Do LSH operation */
if (val < 32) {
- /* shl dreg_hi,imm8 */
- EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val);
- /* mov ebx,dreg_lo */
- EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
+ /* shld dreg_hi,dreg_lo,imm8 */
+ EMIT4(0x0F, 0xA4, add_2reg(0xC0, dreg_hi, dreg_lo), val);
/* shl dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
-
- /* IA32_ECX = 32 - val */
- /* mov ecx,val */
- EMIT2(0xB1, val);
- /* movzx ecx,ecx */
- EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shr ebx,cl */
- EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
- /* or dreg_hi,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
@@@ -943,10 -1143,27 +951,10 @@@ static inline void emit_ia32_rsh_i64(co
/* Do RSH operation */
if (val < 32) {
- /* shr dreg_lo,imm8 */
- EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
- /* mov ebx,dreg_hi */
- EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
+ /* shrd dreg_lo,dreg_hi,imm8 */
+ EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* shr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
-
- /* IA32_ECX = 32 - val */
- /* mov ecx,val */
- EMIT2(0xB1, val);
- /* movzx ecx,ecx */
- EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shl ebx,cl */
- EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
- /* or dreg_lo,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
@@@ -991,10 -1208,27 +999,10 @@@ static inline void emit_ia32_arsh_i64(c
}
/* Do RSH operation */
if (val < 32) {
- /* shr dreg_lo,imm8 */
- EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
- /* mov ebx,dreg_hi */
- EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
+ /* shrd dreg_lo,dreg_hi,imm8 */
+ EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
-
- /* IA32_ECX = 32 - val */
- /* mov ecx,val */
- EMIT2(0xB1, val);
- /* movzx ecx,ecx */
- EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
- /* neg ecx */
- EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
- /* add ecx,32 */
- EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
-
- /* shl ebx,cl */
- EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
- /* or dreg_lo,ebx */
- EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
@@@ -1487,8 -1721,13 +1495,13 @@@ static int do_jit(struct bpf_prog *bpf_
case BPF_ALU64 | BPF_MOV | BPF_X:
switch (BPF_SRC(code)) {
case BPF_X:
- emit_ia32_mov_r64(is64, dst, src, dstk,
- sstk, &prog);
+ if (imm32 == 1) {
+ /* Special mov32 for zext. */
+ emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
+ break;
+ }
+ emit_ia32_mov_r64(is64, dst, src, dstk, sstk,
+ &prog, bpf_prog->aux);
break;
case BPF_K:
/* Sign-extend immediate value to dst reg */
@@@ -1528,11 -1767,13 +1541,13 @@@
switch (BPF_SRC(code)) {
case BPF_X:
emit_ia32_alu_r64(is64, BPF_OP(code), dst,
- src, dstk, sstk, &prog);
+ src, dstk, sstk, &prog,
+ bpf_prog->aux);
break;
case BPF_K:
emit_ia32_alu_i64(is64, BPF_OP(code), dst,
- imm32, dstk, &prog);
+ imm32, dstk, &prog,
+ bpf_prog->aux);
break;
}
break;
@@@ -1551,7 -1792,8 +1566,8 @@@
false, &prog);
break;
}
- emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
+ if (!bpf_prog->aux->verifier_zext)
+ emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_X:
@@@ -1571,7 -1813,8 +1587,8 @@@
&prog);
break;
}
- emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
+ if (!bpf_prog->aux->verifier_zext)
+ emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
/* dst = dst / src(imm) */
/* dst = dst % src(imm) */
@@@ -1593,7 -1836,8 +1610,8 @@@
&prog);
break;
}
- emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
+ if (!bpf_prog->aux->verifier_zext)
+ emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
case BPF_ALU64 | BPF_DIV | BPF_K:
case BPF_ALU64 | BPF_DIV | BPF_X:
@@@ -1610,7 -1854,8 +1628,8 @@@
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
emit_ia32_shift_r(BPF_OP(code), dst_lo, IA32_ECX, dstk,
false, &prog);
- emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
+ if (!bpf_prog->aux->verifier_zext)
+ emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
/* dst = dst << imm */
case BPF_ALU64 | BPF_LSH | BPF_K:
@@@ -1646,7 -1891,8 +1665,8 @@@
case BPF_ALU | BPF_NEG:
emit_ia32_alu_i(is64, false, BPF_OP(code),
dst_lo, 0, dstk, &prog);
- emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
+ if (!bpf_prog->aux->verifier_zext)
+ emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
break;
/* dst = ~dst (64 bit) */
case BPF_ALU64 | BPF_NEG:
@@@ -1666,11 -1912,13 +1686,13 @@@
break;
/* dst = htole(dst) */
case BPF_ALU | BPF_END | BPF_FROM_LE:
- emit_ia32_to_le_r64(dst, imm32, dstk, &prog);
+ emit_ia32_to_le_r64(dst, imm32, dstk, &prog,
+ bpf_prog->aux);
break;
/* dst = htobe(dst) */
case BPF_ALU | BPF_END | BPF_FROM_BE:
- emit_ia32_to_be_r64(dst, imm32, dstk, &prog);
+ emit_ia32_to_be_r64(dst, imm32, dstk, &prog,
+ bpf_prog->aux);
break;
/* dst = imm64 */
case BPF_LD | BPF_IMM | BPF_DW: {
@@@ -1825,6 -2073,8 +1847,8 @@@
case BPF_B:
case BPF_H:
case BPF_W:
+ if (!bpf_prog->aux->verifier_zext)
+ break;
if (dstk) {
EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
STACK_VAR(dst_hi));
@@@ -2249,6 -2499,11 +2273,11 @@@ notyet
return proglen;
}
+ bool bpf_jit_needs_zext(void)
+ {
+ return true;
+ }
+
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{
struct bpf_binary_header *header = NULL;
diff --combined drivers/infiniband/hw/cxgb4/cm.c
index 0147c407ac6c,09fcfc9e052d..e87fc0408470
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@@ -953,7 -953,7 +953,7 @@@ static int send_mpa_req(struct c4iw_ep
mpalen = sizeof(*mpa) + ep->plen;
if (mpa_rev_to_use == 2)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(skb, wrlen, GFP_KERNEL);
if (!skb) {
connect_reply_upcall(ep, -ENOMEM);
@@@ -997,9 -997,8 +997,9 @@@
}
if (mpa_rev_to_use == 2) {
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
pr_debug("initiator ird %u ord %u\n", ep->ird,
ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird);
@@@ -1058,7 -1057,7 +1058,7 @@@ static int send_mpa_reject(struct c4iw_
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
@@@ -1089,9 -1088,8 +1089,9 @@@
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
mpa_v2_params.ird = htons(((u16)ep->ird) |
(peer2peer ? MPA_V2_PEER2PEER_MODEL :
0));
@@@ -1138,7 -1136,7 +1138,7 @@@ static int send_mpa_reply(struct c4iw_e
mpalen = sizeof(*mpa) + plen;
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
mpalen += sizeof(struct mpa_v2_conn_params);
- wrlen = roundup(mpalen + sizeof *req, 16);
+ wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL);
if (!skb) {
@@@ -1173,9 -1171,8 +1173,9 @@@
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
- mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
- sizeof (struct mpa_v2_conn_params));
+ mpa->private_data_size =
+ htons(ntohs(mpa->private_data_size) +
+ sizeof(struct mpa_v2_conn_params));
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
if (peer2peer && (ep->mpa_attr.p2p_type !=
@@@ -3233,17 -3230,22 +3233,22 @@@ static int pick_local_ipaddrs(struct c4
int found = 0;
struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ const struct in_ifaddr *ifa;
ind = in_dev_get(dev->rdev.lldi.ports[0]);
if (!ind)
return -EADDRNOTAVAIL;
- for_primary_ifa(ind) {
+ rcu_read_lock();
+ in_dev_for_each_ifa_rcu(ifa, ind) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY)
+ continue;
laddr->sin_addr.s_addr = ifa->ifa_address;
raddr->sin_addr.s_addr = ifa->ifa_address;
found = 1;
break;
}
- endfor_ifa(ind);
+ rcu_read_unlock();
+
in_dev_put(ind);
return found ? 0 : -EADDRNOTAVAIL;
}
diff --combined drivers/infiniband/hw/mlx5/cq.c
index 22230fd7d741,4efbbd2fce0c..e29987cdaeff
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@@ -37,7 -37,7 +37,7 @@@
#include "mlx5_ib.h"
#include "srq.h"
- static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
+ static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
{
struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
@@@ -522,9 -522,9 +522,9 @@@ repoll
case MLX5_CQE_SIG_ERR:
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
- read_lock(&dev->mdev->priv.mkey_table.lock);
- mmkey = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+ xa_lock(&dev->mdev->priv.mkey_table);
+ mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
mr = to_mibmr(mmkey);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true;
@@@ -537,7 -537,7 +537,7 @@@
mr->sig->err_item.expected,
mr->sig->err_item.actual);
- read_unlock(&dev->mdev->priv.mkey_table.lock);
+ xa_unlock(&dev->mdev->priv.mkey_table);
goto repoll;
}
@@@ -884,14 -884,15 +884,15 @@@ static void notify_soft_wc_handler(stru
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
-struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata)
+int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
{
+ struct ib_device *ibdev = ibcq->device;
int entries = attr->cqe;
int vector = attr->comp_vector;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
- struct mlx5_ib_cq *cq;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
int uninitialized_var(index);
int uninitialized_var(inlen);
u32 *cqb = NULL;
@@@ -903,14 -904,18 +904,14 @@@
if (entries < 0 ||
(entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (check_cq_create_flags(attr->flags))
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
entries = roundup_pow_of_two(entries + 1);
if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
- return ERR_PTR(-EINVAL);
-
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq)
- return ERR_PTR(-ENOMEM);
+ return -EINVAL;
cq->ibcq.cqe = entries - 1;
mutex_init(&cq->resize_mutex);
@@@ -925,13 -930,13 +926,13 @@@
err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
&index, &inlen);
if (err)
- goto err_create;
+ return err;
} else {
cqe_size = cache_line_size() == 128 ? 128 : 64;
err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
&index, &inlen);
if (err)
- goto err_create;
+ return err;
INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
}
@@@ -954,7 -959,7 +955,7 @@@
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
- err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
if (err)
goto err_cqb;
@@@ -976,7 -981,7 +977,7 @@@
kvfree(cqb);
- return &cq->ibcq;
+ return 0;
err_cmd:
mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
@@@ -987,10 -992,14 +988,10 @@@ err_cqb
destroy_cq_user(cq, udata);
else
destroy_cq_kernel(dev, cq);
-
-err_create:
- kfree(cq);
-
- return ERR_PTR(err);
+ return err;
}
-int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(cq->device);
struct mlx5_ib_cq *mcq = to_mcq(cq);
@@@ -1000,6 -1009,10 +1001,6 @@@
destroy_cq_user(mcq, udata);
else
destroy_cq_kernel(dev, mcq);
-
- kfree(mcq);
-
- return 0;
}
static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
@@@ -1125,6 -1138,11 +1126,6 @@@ static int resize_user(struct mlx5_ib_d
return 0;
}
-static void un_resize_user(struct mlx5_ib_cq *cq)
-{
- ib_umem_release(cq->resize_umem);
-}
-
static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
int entries, int cqe_size)
{
@@@ -1147,6 -1165,12 +1148,6 @@@ ex
return err;
}
-static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
-{
- free_cq_buf(dev, cq->resize_buf);
- cq->resize_buf = NULL;
-}
-
static int copy_resize_cqes(struct mlx5_ib_cq *cq)
{
struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
@@@ -1327,11 -1351,10 +1328,11 @@@ ex_alloc
kvfree(in);
ex_resize:
- if (udata)
- un_resize_user(cq);
- else
- un_resize_kernel(dev, cq);
+ ib_umem_release(cq->resize_umem);
+ if (!udata) {
+ free_cq_buf(dev, cq->resize_buf);
+ cq->resize_buf = NULL;
+ }
ex:
mutex_unlock(&cq->resize_mutex);
return err;
diff --combined drivers/infiniband/hw/mlx5/main.c
index 05d2bfcb3d60,ba312bf59c7a..9db8c06aa01e
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@@ -52,7 -52,6 +52,7 @@@
#include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/eswitch.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
@@@ -889,7 -888,7 +889,7 @@@ static int mlx5_ib_query_device(struct
}
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
if (MLX5_CAP_GEN(mdev, sho)) {
- props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
+ props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER;
/* At this stage no support for signature handover */
props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
IB_PROT_T10DIF_TYPE_2 |
@@@ -1009,8 -1008,6 +1009,8 @@@
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len =
1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
+ props->max_pi_fast_reg_page_list_len =
+ props->max_fast_reg_page_list_len / 2;
get_atomic_caps_qp(dev, props);
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@@ -2669,11 -2666,15 +2669,15 @@@ int parse_flow_flow_action(struct mlx5_
}
}
- static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
- u32 *match_v, const union ib_flow_spec *ib_spec,
+ static int parse_flow_attr(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_spec *spec,
+ const union ib_flow_spec *ib_spec,
const struct ib_flow_attr *flow_attr,
struct mlx5_flow_act *action, u32 prev_type)
{
+ struct mlx5_flow_context *flow_context = &spec->flow_context;
+ u32 *match_c = spec->match_criteria;
+ u32 *match_v = spec->match_value;
void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
misc_parameters);
void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
@@@ -2992,8 -2993,8 +2996,8 @@@
if (ib_spec->flow_tag.tag_id >= BIT(24))
return -EINVAL;
- action->flow_tag = ib_spec->flow_tag.tag_id;
- action->flags |= FLOW_ACT_HAS_TAG;
+ flow_context->flow_tag = ib_spec->flow_tag.tag_id;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
break;
case IB_FLOW_SPEC_ACTION_DROP:
if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
@@@ -3087,7 -3088,8 +3091,8 @@@ is_valid_esp_aes_gcm(struct mlx5_core_d
return VALID_SPEC_NA;
return is_crypto && is_ipsec &&
- (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
+ (!egress || (!is_drop &&
+ !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ?
VALID_SPEC_VALID : VALID_SPEC_INVALID;
}
@@@ -3255,14 -3257,11 +3260,14 @@@ static struct mlx5_ib_flow_prio *get_fl
int max_table_size;
int num_entries;
int num_groups;
+ bool esw_encap;
u32 flags = 0;
int priority;
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
+ esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE;
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
enum mlx5_flow_namespace_type fn_type;
@@@ -3275,10 -3274,10 +3280,10 @@@
if (ft_type == MLX5_IB_FT_RX) {
fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
prio = &dev->flow_db->prios[priority];
- if (!dev->is_rep &&
+ if (!dev->is_rep && !esw_encap &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
- if (!dev->is_rep &&
+ if (!dev->is_rep && !esw_encap &&
MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
reformat_l3_tunnel_to_l2))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
@@@ -3288,7 -3287,7 +3293,7 @@@
log_max_ft_size));
fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
prio = &dev->flow_db->egress_prios[priority];
- if (!dev->is_rep &&
+ if (!dev->is_rep && !esw_encap &&
MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
}
@@@ -3470,6 -3469,37 +3475,37 @@@ free
return ret;
}
+ static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_eswitch_rep *rep)
+ {
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ void *misc;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw,
+ rep->vport));
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+
+ MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ }
+ }
+
static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
const struct ib_flow_attr *flow_attr,
@@@ -3479,7 -3509,7 +3515,7 @@@
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
- struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
+ struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
struct mlx5_flow_destination dest_arr[2] = {};
struct mlx5_flow_destination *rule_dst = dest_arr;
@@@ -3510,8 -3540,7 +3546,7 @@@
}
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
- err = parse_flow_attr(dev->mdev, spec->match_criteria,
- spec->match_value,
+ err = parse_flow_attr(dev->mdev, spec,
ib_flow, flow_attr, &flow_act,
prev_type);
if (err < 0)
@@@ -3525,19 -3554,15 +3560,15 @@@
set_underlay_qp(dev, spec, underlay_qpn);
if (dev->is_rep) {
- void *misc;
+ struct mlx5_eswitch_rep *rep;
- if (!dev->port[flow_attr->port - 1].rep) {
+ rep = dev->port[flow_attr->port - 1].rep;
+ if (!rep) {
err = -EINVAL;
goto free;
}
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port,
- dev->port[flow_attr->port - 1].rep->vport);
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ mlx5_ib_set_rule_source_port(dev, spec, rep);
}
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
@@@ -3578,11 -3603,11 +3609,11 @@@
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
}
- if ((flow_act.flags & FLOW_ACT_HAS_TAG) &&
+ if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
- flow_act.flow_tag, flow_attr->type);
+ spec->flow_context.flow_tag, flow_attr->type);
err = -EINVAL;
goto free;
}
@@@ -3898,7 -3923,6 +3929,7 @@@ _get_flow_table(struct mlx5_ib_dev *dev
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio = NULL;
int max_table_size = 0;
+ bool esw_encap;
u32 flags = 0;
int priority;
@@@ -3907,30 -3931,22 +3938,30 @@@
else
priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+ esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE;
if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
- if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
- reformat_l3_tunnel_to_l2))
+ reformat_l3_tunnel_to_l2) &&
+ !esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) {
max_table_size = BIT(
MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
- if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap)
flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
} else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) {
max_table_size = BIT(
MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) &&
+ esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
priority = FDB_BYPASS_PATH;
}
@@@ -3962,6 -3978,7 +3993,7 @@@ _create_raw_flow_rule(struct mlx5_ib_de
struct mlx5_ib_flow_prio *ft_prio,
struct mlx5_flow_destination *dst,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context,
struct mlx5_flow_act *flow_act,
void *cmd_in, int inlen,
int dst_num)
@@@ -3984,6 -4001,7 +4016,7 @@@
memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
fs_matcher->mask_len);
spec->match_criteria_enable = fs_matcher->match_criteria_enable;
+ spec->flow_context = *flow_context;
handler->rule = mlx5_add_flow_rules(ft, spec,
flow_act, dst, dst_num);
@@@ -4048,6 -4066,7 +4081,7 @@@ static bool raw_fs_is_multicast(struct
struct mlx5_ib_flow_handler *
mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context,
struct mlx5_flow_act *flow_act,
u32 counter_id,
void *cmd_in, int inlen, int dest_id,
@@@ -4100,7 -4119,8 +4134,8 @@@
dst_num++;
}
- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
+ handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
+ flow_context, flow_act,
cmd_in, inlen, dst_num);
if (IS_ERR(handler)) {
@@@ -4472,7 -4492,7 +4507,7 @@@ static void mlx5_ib_handle_internal_err
* lock/unlock above locks Now need to arm all involved CQs.
*/
list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
- mcq->comp(mcq);
+ mcq->comp(mcq, NULL);
}
spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
}
@@@ -4906,19 -4926,18 +4941,19 @@@ static int create_dev_resources(struct
if (ret)
goto error0;
- devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL);
- if (IS_ERR(devr->c0)) {
- ret = PTR_ERR(devr->c0);
+ devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq);
+ if (!devr->c0) {
+ ret = -ENOMEM;
goto error1;
}
- devr->c0->device = &dev->ib_dev;
- devr->c0->uobject = NULL;
- devr->c0->comp_handler = NULL;
- devr->c0->event_handler = NULL;
- devr->c0->cq_context = NULL;
+
+ devr->c0->device = &dev->ib_dev;
atomic_set(&devr->c0->usecnt, 0);
+ ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL);
+ if (ret)
+ goto err_create_cq;
+
devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL);
if (IS_ERR(devr->x0)) {
ret = PTR_ERR(devr->x0);
@@@ -5010,8 -5029,6 +5045,8 @@@ error3
mlx5_ib_dealloc_xrcd(devr->x0, NULL);
error2:
mlx5_ib_destroy_cq(devr->c0, NULL);
+err_create_cq:
+ kfree(devr->c0);
error1:
mlx5_ib_dealloc_pd(devr->p0, NULL);
error0:
@@@ -5030,7 -5047,6 +5065,7 @@@ static void destroy_dev_resources(struc
mlx5_ib_dealloc_xrcd(devr->x0, NULL);
mlx5_ib_dealloc_xrcd(devr->x1, NULL);
mlx5_ib_destroy_cq(devr->c0, NULL);
+ kfree(devr->c0);
mlx5_ib_dealloc_pd(devr->p0, NULL);
kfree(devr->p0);
@@@ -6063,6 -6079,7 +6098,6 @@@ static int mlx5_ib_stage_init_init(stru
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
- dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->ib_dev.phys_port_cnt = dev->num_ports;
@@@ -6142,13 -6159,8 +6177,13 @@@ static void mlx5_ib_stage_flow_db_clean
}
static const struct ib_device_ops mlx5_ib_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_MLX5,
+ .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
+
.add_gid = mlx5_ib_add_gid,
.alloc_mr = mlx5_ib_alloc_mr,
+ .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
.alloc_pd = mlx5_ib_alloc_pd,
.alloc_ucontext = mlx5_ib_alloc_ucontext,
.attach_mcast = mlx5_ib_mcg_attach,
@@@ -6178,7 -6190,6 +6213,7 @@@
.get_dma_mr = mlx5_ib_get_dma_mr,
.get_link_layer = mlx5_ib_port_link_layer,
.map_mr_sg = mlx5_ib_map_mr_sg,
+ .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
.mmap = mlx5_ib_mmap,
.modify_cq = mlx5_ib_modify_cq,
.modify_device = mlx5_ib_modify_device,
@@@ -6203,7 -6214,6 +6238,7 @@@
.resize_cq = mlx5_ib_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
@@@ -6246,6 -6256,7 +6281,6 @@@ static int mlx5_ib_stage_caps_init(stru
struct mlx5_core_dev *mdev = dev->mdev;
int err;
- dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
dev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@@ -6314,6 -6325,7 +6349,6 @@@
if (mlx5_accel_ipsec_device_caps(dev->mdev) &
MLX5_ACCEL_IPSEC_CAP_DEVICE)
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
- dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
@@@ -6802,7 -6814,7 +6837,7 @@@ static void *mlx5_ib_add(struct mlx5_co
printk_once(KERN_INFO "%s", mlx5_version);
if (MLX5_ESWITCH_MANAGER(mdev) &&
- mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ mlx5_ib_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
if (!mlx5_core_mp_enabled(mdev))
mlx5_ib_register_vport_reps(mdev);
return mdev;
diff --combined drivers/infiniband/hw/mlx5/mlx5_ib.h
index bdb83fc85f94,ee73dc122d28..305d26cdf7f3
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@@ -431,6 -431,9 +431,6 @@@ struct mlx5_ib_qp
int create_type;
- /* Store signature errors */
- bool signature_en;
-
struct list_head qps_list;
struct list_head cq_recv_list;
struct list_head cq_send_list;
@@@ -584,9 -587,6 +584,9 @@@ struct mlx5_ib_mr
void *descs;
dma_addr_t desc_map;
int ndescs;
+ int data_length;
+ int meta_ndescs;
+ int meta_length;
int max_descs;
int desc_size;
int access_mode;
@@@ -605,13 -605,6 +605,13 @@@
int access_flags; /* Needed for rereg MR */
struct mlx5_ib_mr *parent;
+ /* Needed for IB_MR_TYPE_INTEGRITY */
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr *klm_mr;
+ struct mlx5_ib_mr *mtt_mr;
+ u64 data_iova;
+ u64 pi_iova;
+
atomic_t num_leaf_free;
wait_queue_head_t q_leaf_free;
struct mlx5_async_work cb_work;
@@@ -985,7 -978,6 +985,6 @@@ struct mlx5_ib_dev
u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
struct mlx5_async_ctx async_ctx;
- int free_port;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@@ -1123,9 -1115,10 +1122,9 @@@ int mlx5_ib_read_user_wqe_rq(struct mlx
int buflen, size_t *bc);
int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
void *buffer, int buflen, size_t *bc);
-struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_udata *udata);
-int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
+void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
@@@ -1155,15 -1148,8 +1154,15 @@@ int mlx5_ib_rereg_user_mr(struct ib_mr
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
+struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_sg,
+ u32 max_num_meta_sg);
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
+int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
@@@ -1215,7 -1201,7 +1214,7 @@@ int mlx5_ib_check_mr_status(struct ib_m
struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
-int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
+void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
@@@ -1330,6 -1316,7 +1329,7 @@@ extern const struct uapi_definition mlx
extern const struct uapi_definition mlx5_ib_flow_defs[];
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context,
struct mlx5_flow_act *flow_act, u32 counter_id,
void *cmd_in, int inlen, int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
diff --combined drivers/infiniband/hw/mlx5/mr.c
index 6ac77e09a34a,83b452d977d4..20ece6e0b2fc
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@@ -130,7 -130,7 +130,7 @@@ static void reg_mr_callback(int status
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
+ struct xarray *mkeys = &dev->mdev->priv.mkey_table;
int err;
spin_lock_irqsave(&ent->lock, flags);
@@@ -158,12 -158,12 +158,12 @@@
ent->size++;
spin_unlock_irqrestore(&ent->lock, flags);
- write_lock_irqsave(&table->lock, flags);
- err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
- &mr->mmkey);
+ xa_lock_irqsave(mkeys, flags);
+ err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key),
+ &mr->mmkey, GFP_ATOMIC));
+ xa_unlock_irqrestore(mkeys, flags);
if (err)
pr_err("Error inserting to mkey tree. 0x%x\n", -err);
- write_unlock_irqrestore(&table->lock, flags);
if (!completion_done(&ent->compl))
complete(&ent->compl);
@@@ -1507,9 -1507,10 +1507,9 @@@ int mlx5_ib_rereg_user_mr(struct ib_mr
return 0;
err:
- if (mr->umem) {
- ib_umem_release(mr->umem);
- mr->umem = NULL;
- }
+ ib_umem_release(mr->umem);
+ mr->umem = NULL;
+
clean_mr(dev, mr);
return err;
}
@@@ -1605,9 -1606,8 +1605,9 @@@ static void dereg_mr(struct mlx5_ib_de
synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */
if (umem_odp->page_list)
- mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
- ib_umem_end(umem));
+ mlx5_ib_invalidate_range(umem_odp,
+ ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
else
mlx5_ib_free_implicit_mr(mr);
/*
@@@ -1629,85 -1629,28 +1629,85 @@@
* remove the DMA mapping.
*/
mlx5_mr_cache_free(dev, mr);
- if (umem) {
- ib_umem_release(umem);
+ ib_umem_release(umem);
+ if (umem)
atomic_sub(npages, &dev->mdev->priv.reg_pages);
- }
+
if (!mr->allocated_from_cache)
kfree(mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
- dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr));
+ struct mlx5_ib_mr *mmr = to_mmr(ibmr);
+
+ if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
+ dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
+ dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
+ }
+
+ dereg_mr(to_mdev(ibmr->device), mmr);
+
return 0;
}
-struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg, struct ib_udata *udata)
+static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
+ int access_mode, int page_shift)
+{
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+}
+
+static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, int desc_size, int page_shift,
+ int access_mode, u32 *in, int inlen)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int err;
+
+ mr->access_mode = access_mode;
+ mr->desc_size = desc_size;
+ mr->max_descs = ndescs;
+
+ err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
+ if (err)
+ return err;
+
+ mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
+
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_free_descs;
+
+ mr->mmkey.type = MLX5_MKEY_MR;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+
+ return 0;
+
+err_free_descs:
+ mlx5_free_priv_descs(mr);
+ return err;
+}
+
+static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
+ u32 max_num_sg, u32 max_num_meta_sg,
+ int desc_size, int access_mode)
+{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
- int ndescs = ALIGN(max_num_sg, 4);
+ int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
+ int page_shift = 0;
struct mlx5_ib_mr *mr;
- void *mkc;
u32 *in;
int err;
@@@ -1715,168 -1658,99 +1715,168 @@@
if (!mr)
return ERR_PTR(-ENOMEM);
+ mr->ibmr.pd = pd;
+ mr->ibmr.device = pd->device;
+
in = kzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_free;
}
+ if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ page_shift = PAGE_SHIFT;
+
+ err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
+ access_mode, in, inlen);
+ if (err)
+ goto err_free_in;
+
+ mr->umem = NULL;
+ kfree(in);
+
+ return mr;
+
+err_free_in:
+ kfree(in);
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, u32 *in, int inlen)
+{
+ return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
+ PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
+ inlen);
+}
+
+static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, u32 *in, int inlen)
+{
+ return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
+ 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
+}
+
+static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int max_num_sg, int max_num_meta_sg,
+ u32 *in, int inlen)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ u32 psv_index[2];
+ void *mkc;
+ int err;
+
+ mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
+ if (!mr->sig)
+ return -ENOMEM;
+
+ /* create mem & wire PSVs */
+ err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
+ if (err)
+ goto err_free_sig;
+
+ mr->sig->psv_memory.psv_idx = psv_index[0];
+ mr->sig->psv_wire.psv_idx = psv_index[1];
+
+ mr->sig->sig_status_checked = true;
+ mr->sig->sig_err_exists = false;
+ /* Next UMR, Arm SIGERR */
+ ++mr->sig->sigerr_count;
+ mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
+ sizeof(struct mlx5_klm),
+ MLX5_MKC_ACCESS_MODE_KLMS);
+ if (IS_ERR(mr->klm_mr)) {
+ err = PTR_ERR(mr->klm_mr);
+ goto err_destroy_psv;
+ }
+ mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
+ sizeof(struct mlx5_mtt),
+ MLX5_MKC_ACCESS_MODE_MTT);
+ if (IS_ERR(mr->mtt_mr)) {
+ err = PTR_ERR(mr->mtt_mr);
+ goto err_free_klm_mr;
+ }
+
+ /* Set bsf descriptors for mkey */
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
- MLX5_SET(mkc, mkc, free, 1);
- MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
- MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, bsf_en, 1);
+ MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
- if (mr_type == IB_MR_TYPE_MEM_REG) {
- mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
- MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
- err = mlx5_alloc_priv_descs(pd->device, mr,
- ndescs, sizeof(struct mlx5_mtt));
- if (err)
- goto err_free_in;
+ err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
+ MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
+ if (err)
+ goto err_free_mtt_mr;
- mr->desc_size = sizeof(struct mlx5_mtt);
- mr->max_descs = ndescs;
- } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
- mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
+ return 0;
- err = mlx5_alloc_priv_descs(pd->device, mr,
- ndescs, sizeof(struct mlx5_klm));
- if (err)
- goto err_free_in;
- mr->desc_size = sizeof(struct mlx5_klm);
- mr->max_descs = ndescs;
- } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
- u32 psv_index[2];
-
- MLX5_SET(mkc, mkc, bsf_en, 1);
- MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
- mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
- if (!mr->sig) {
- err = -ENOMEM;
- goto err_free_in;
- }
+err_free_mtt_mr:
+ dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
+ mr->mtt_mr = NULL;
+err_free_klm_mr:
+ dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
+ mr->klm_mr = NULL;
+err_destroy_psv:
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
+ mr->sig->psv_memory.psv_idx);
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
+ mr->sig->psv_wire.psv_idx);
+err_free_sig:
+ kfree(mr->sig);
- /* create mem & wire PSVs */
- err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
- 2, psv_index);
- if (err)
- goto err_free_sig;
+ return err;
+}
+
+static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type, u32 max_num_sg,
+ u32 max_num_meta_sg)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ int ndescs = ALIGN(max_num_sg, 4);
+ struct mlx5_ib_mr *mr;
+ u32 *in;
+ int err;
- mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
- mr->sig->psv_memory.psv_idx = psv_index[0];
- mr->sig->psv_wire.psv_idx = psv_index[1];
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
- mr->sig->sig_status_checked = true;
- mr->sig->sig_err_exists = false;
- /* Next UMR, Arm SIGERR */
- ++mr->sig->sigerr_count;
- } else {
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ mr->ibmr.device = pd->device;
+ mr->umem = NULL;
+
+ switch (mr_type) {
+ case IB_MR_TYPE_MEM_REG:
+ err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
+ break;
+ case IB_MR_TYPE_SG_GAPS:
+ err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
+ break;
+ case IB_MR_TYPE_INTEGRITY:
+ err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
+ max_num_meta_sg, in, inlen);
+ break;
+ default:
mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
err = -EINVAL;
- goto err_free_in;
}
- MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3);
- MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7);
- MLX5_SET(mkc, mkc, umr_en, 1);
-
- mr->ibmr.device = pd->device;
- err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err)
- goto err_destroy_psv;
+ goto err_free_in;
- mr->mmkey.type = MLX5_MKEY_MR;
- mr->ibmr.lkey = mr->mmkey.key;
- mr->ibmr.rkey = mr->mmkey.key;
- mr->umem = NULL;
kfree(in);
return &mr->ibmr;
-err_destroy_psv:
- if (mr->sig) {
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_memory.psv_idx))
- mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
- mr->sig->psv_memory.psv_idx);
- if (mlx5_core_destroy_psv(dev->mdev,
- mr->sig->psv_wire.psv_idx))
- mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
- mr->sig->psv_wire.psv_idx);
- }
- mlx5_free_priv_descs(mr);
-err_free_sig:
- kfree(mr->sig);
err_free_in:
kfree(in);
err_free:
@@@ -1884,19 -1758,6 +1884,19 @@@
return ERR_PTR(err);
}
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata)
+{
+ return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
+}
+
+struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_sg, u32 max_num_meta_sg)
+{
+ return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
+ max_num_meta_sg);
+}
+
struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata)
{
@@@ -2026,53 -1887,16 +2026,53 @@@ done
}
static int
+mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ unsigned int sg_offset = 0;
+ int n = 0;
+
+ mr->meta_length = 0;
+ if (data_sg_nents == 1) {
+ n++;
+ mr->ndescs = 1;
+ if (data_sg_offset)
+ sg_offset = *data_sg_offset;
+ mr->data_length = sg_dma_len(data_sg) - sg_offset;
+ mr->data_iova = sg_dma_address(data_sg) + sg_offset;
+ if (meta_sg_nents == 1) {
+ n++;
+ mr->meta_ndescs = 1;
+ if (meta_sg_offset)
+ sg_offset = *meta_sg_offset;
+ else
+ sg_offset = 0;
+ mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
+ mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
+ }
+ ibmr->length = mr->data_length + mr->meta_length;
+ }
+
+ return n;
+}
+
+static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
struct scatterlist *sgl,
unsigned short sg_nents,
- unsigned int *sg_offset_p)
+ unsigned int *sg_offset_p,
+ struct scatterlist *meta_sgl,
+ unsigned short meta_sg_nents,
+ unsigned int *meta_sg_offset_p)
{
struct scatterlist *sg = sgl;
struct mlx5_klm *klms = mr->descs;
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
u32 lkey = mr->ibmr.pd->local_dma_lkey;
- int i;
+ int i, j = 0;
mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
mr->ibmr.length = 0;
@@@ -2087,36 -1911,12 +2087,36 @@@
sg_offset = 0;
}
- mr->ndescs = i;
if (sg_offset_p)
*sg_offset_p = sg_offset;
- return i;
+ mr->ndescs = i;
+ mr->data_length = mr->ibmr.length;
+
+ if (meta_sg_nents) {
+ sg = meta_sgl;
+ sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
+ for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
+ if (unlikely(i + j >= mr->max_descs))
+ break;
+ klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
+ sg_offset);
+ klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
+ sg_offset);
+ klms[i + j].key = cpu_to_be32(lkey);
+ mr->ibmr.length += sg_dma_len(sg) - sg_offset;
+
+ sg_offset = 0;
+ }
+ if (meta_sg_offset_p)
+ *meta_sg_offset_p = sg_offset;
+
+ mr->meta_ndescs = j;
+ mr->meta_length = mr->ibmr.length - mr->data_length;
+ }
+
+ return i + j;
}
static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
@@@ -2133,181 -1933,6 +2133,181 @@@
return 0;
}
+static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ __be64 *descs;
+
+ if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
+ return -ENOMEM;
+
+ descs = mr->descs;
+ descs[mr->ndescs + mr->meta_ndescs++] =
+ cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+
+ return 0;
+}
+
+static int
+mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
+ int n;
+
+ pi_mr->ndescs = 0;
+ pi_mr->meta_ndescs = 0;
+ pi_mr->meta_length = 0;
+
+ ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ pi_mr->ibmr.page_size = ibmr->page_size;
+ n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
+ mlx5_set_page);
+ if (n != data_sg_nents)
+ return n;
+
+ pi_mr->data_iova = pi_mr->ibmr.iova;
+ pi_mr->data_length = pi_mr->ibmr.length;
+ pi_mr->ibmr.length = pi_mr->data_length;
+ ibmr->length = pi_mr->data_length;
+
+ if (meta_sg_nents) {
+ u64 page_mask = ~((u64)ibmr->page_size - 1);
+ u64 iova = pi_mr->data_iova;
+
+ n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
+ meta_sg_offset, mlx5_set_page_pi);
+
+ pi_mr->meta_length = pi_mr->ibmr.length;
+ /*
+ * PI address for the HW is the offset of the metadata address
+ * relative to the first data page address.
+ * It equals to first data page address + size of data pages +
+ * metadata offset at the first metadata page
+ */
+ pi_mr->pi_iova = (iova & page_mask) +
+ pi_mr->ndescs * ibmr->page_size +
+ (pi_mr->ibmr.iova & ~page_mask);
+ /*
+ * In order to use one MTT MR for data and metadata, we register
+ * also the gaps between the end of the data and the start of
+ * the metadata (the sig MR will verify that the HW will access
+ * to right addresses). This mapping is safe because we use
+ * internal mkey for the registration.
+ */
+ pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
+ pi_mr->ibmr.iova = iova;
+ ibmr->length += pi_mr->meta_length;
+ }
+
+ ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ return n;
+}
+
+static int
+mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = mr->klm_mr;
+ int n;
+
+ pi_mr->ndescs = 0;
+ pi_mr->meta_ndescs = 0;
+ pi_mr->meta_length = 0;
+
+ ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
+ meta_sg, meta_sg_nents, meta_sg_offset);
+
+ ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ /* This is zero-based memory region */
+ pi_mr->data_iova = 0;
+ pi_mr->ibmr.iova = 0;
+ pi_mr->pi_iova = pi_mr->data_length;
+ ibmr->length = pi_mr->ibmr.length;
+
+ return n;
+}
+
+int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = NULL;
+ int n;
+
+ WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
+
+ mr->ndescs = 0;
+ mr->data_length = 0;
+ mr->data_iova = 0;
+ mr->meta_ndescs = 0;
+ mr->pi_iova = 0;
+ /*
+ * As a performance optimization, if possible, there is no need to
+ * perform UMR operation to register the data/metadata buffers.
+ * First try to map the sg lists to PA descriptors with local_dma_lkey.
+ * Fallback to UMR only in case of a failure.
+ */
+ n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (n == data_sg_nents + meta_sg_nents)
+ goto out;
+ /*
+ * As a performance optimization, if possible, there is no need to map
+ * the sg lists to KLM descriptors. First try to map the sg lists to MTT
+ * descriptors and fallback to KLM only in case of a failure.
+ * It's more efficient for the HW to work with MTT descriptors
+ * (especially in high load).
+ * Use KLM (indirect access) only if it's mandatory.
+ */
+ pi_mr = mr->mtt_mr;
+ n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (n == data_sg_nents + meta_sg_nents)
+ goto out;
+
+ pi_mr = mr->klm_mr;
+ n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (unlikely(n != data_sg_nents + meta_sg_nents))
+ return -ENOMEM;
+
+out:
+ /* This is zero-based memory region */
+ ibmr->iova = 0;
+ mr->pi_mr = pi_mr;
+ if (pi_mr)
+ ibmr->sig_attrs->meta_length = pi_mr->meta_length;
+ else
+ ibmr->sig_attrs->meta_length = mr->meta_length;
+
+ return 0;
+}
+
int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset)
{
@@@ -2321,8 -1946,7 +2321,8 @@@
DMA_TO_DEVICE);
if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
- n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
+ n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
+ NULL);
else
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
mlx5_set_page);
diff --combined drivers/infiniband/hw/mlx5/odp.c
index 3d18b6ea9efa,831c450b271a..5b642d81e617
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@@ -150,7 -150,7 +150,7 @@@ static struct ib_umem_odp *odp_lookup(u
if (!rb)
goto not_found;
odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (ib_umem_start(&odp->umem) > start + length)
+ if (ib_umem_start(odp) > start + length)
goto not_found;
}
not_found:
@@@ -200,7 -200,7 +200,7 @@@ void mlx5_odp_populate_klm(struct mlx5_
static void mr_leaf_free_action(struct work_struct *work)
{
struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
- int idx = ib_umem_start(&odp->umem) >> MLX5_IMR_MTT_SHIFT;
+ int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
mr->parent = NULL;
@@@ -224,6 -224,7 +224,6 @@@ void mlx5_ib_invalidate_range(struct ib
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
- struct ib_umem *umem;
int in_block = 0;
u64 addr;
@@@ -231,14 -232,15 +231,14 @@@
pr_err("invalidation called on NULL umem or non-ODP umem\n");
return;
}
- umem = &umem_odp->umem;
mr = umem_odp->private;
if (!mr || !mr->ibmr.pd)
return;
- start = max_t(u64, ib_umem_start(umem), start);
- end = min_t(u64, ib_umem_end(umem), end);
+ start = max_t(u64, ib_umem_start(umem_odp), start);
+ end = min_t(u64, ib_umem_end(umem_odp), end);
/*
* Iteration one - zap the HW's MTTs. The notifiers_count ensures that
@@@ -247,8 -249,8 +247,8 @@@
* but they will write 0s as well, so no difference in the end result.
*/
- for (addr = start; addr < end; addr += BIT(umem->page_shift)) {
- idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
+ for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
+ idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
* Strive to write the MTTs in chunks, but avoid overwriting
* non-existing MTTs. The huristic here can be improved to
@@@ -542,12 -544,13 +542,12 @@@ static int mr_leaf_free(struct ib_umem_
void *cookie)
{
struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
- struct ib_umem *umem = &umem_odp->umem;
if (mr->parent != imr)
return 0;
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
- ib_umem_end(umem));
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
+ ib_umem_end(umem_odp));
if (umem_odp->dying)
return 0;
@@@ -599,9 -602,9 +599,9 @@@ static int pagefault_mr(struct mlx5_ib_
}
next_mr:
- size = min_t(size_t, bcnt, ib_umem_end(&odp->umem) - io_virt);
+ size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
- page_shift = mr->umem->page_shift;
+ page_shift = odp->page_shift;
page_mask = ~(BIT(page_shift) - 1);
start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT;
@@@ -765,7 -768,7 +765,7 @@@ static int pagefault_single_data_segmen
bcnt -= *bytes_committed;
next_mr:
- mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key));
+ mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
if (!mkey_is_eq(mmkey, key)) {
mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
ret = -EFAULT;
@@@ -1555,9 -1558,9 +1555,9 @@@ mlx5_ib_create_pf_eq(struct mlx5_ib_de
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
.irq_index = 0,
- .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
.nent = MLX5_IB_NUM_PF_EQE,
};
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
if (IS_ERR(eq->core)) {
err = PTR_ERR(eq->core);
@@@ -1683,8 -1686,8 +1683,8 @@@ static void num_pending_prefetch_dec(st
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
- mmkey = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(sg_list[i].lkey));
+ mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(sg_list[i].lkey));
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
atomic_dec(&mr->num_pending_prefetch);
}
@@@ -1703,8 -1706,8 +1703,8 @@@ static bool num_pending_prefetch_inc(st
struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr;
- mmkey = __mlx5_mr_lookup(dev->mdev,
- mlx5_base_mkey(sg_list[i].lkey));
+ mmkey = xa_load(&dev->mdev->priv.mkey_table,
+ mlx5_base_mkey(sg_list[i].lkey));
if (!mmkey || mmkey->key != sg_list[i].lkey) {
ret = false;
break;
diff --combined drivers/infiniband/hw/mlx5/qp.c
index 4fbf60fed374,768c7e81f688..8b7a60ada92c
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@@ -442,9 -442,9 +442,9 @@@ static int calc_send_wqe(struct ib_qp_i
}
size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
- if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
+ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
- return MLX5_SIG_WQE_SIZE;
+ return MLX5_SIG_WQE_SIZE;
else
return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
}
@@@ -496,6 -496,9 +496,6 @@@ static int calc_sq_size(struct mlx5_ib_
sizeof(struct mlx5_wqe_inline_seg);
attr->cap.max_inline_data = qp->max_inline_data;
- if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
- qp->signature_en = true;
-
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
@@@ -787,7 -790,8 +787,7 @@@ static void destroy_user_rq(struct mlx5
atomic_dec(&dev->delay_drop.rqs_cnt);
mlx5_ib_db_unmap_user(context, &rwq->db);
- if (rwq->umem)
- ib_umem_release(rwq->umem);
+ ib_umem_release(rwq->umem);
}
static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
@@@ -973,7 -977,8 +973,7 @@@ err_free
kvfree(*in);
err_umem:
- if (ubuffer->umem)
- ib_umem_release(ubuffer->umem);
+ ib_umem_release(ubuffer->umem);
err_bfreg:
if (bfregn != MLX5_IB_INVALID_BFREG)
@@@ -992,7 -997,8 +992,7 @@@ static void destroy_qp_user(struct mlx5
ibucontext);
mlx5_ib_db_unmap_user(context, &qp->db);
- if (base->ubuffer.umem)
- ib_umem_release(base->ubuffer.umem);
+ ib_umem_release(base->ubuffer.umem);
/*
* Free only the BFREGs which are handled by the kernel.
@@@ -1036,7 -1042,7 +1036,7 @@@ static int create_kernel_qp(struct mlx5
void *qpc;
int err;
- if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
+ if (init_attr->create_flags & ~(IB_QP_CREATE_INTEGRITY_EN |
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_IPOIB_UD_LSO |
IB_QP_CREATE_NETIF_QP |
@@@ -4164,13 -4170,15 +4164,13 @@@ static __be64 sig_mkey_mask(void
}
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
- struct mlx5_ib_mr *mr, bool umr_inline)
+ struct mlx5_ib_mr *mr, u8 flags)
{
- int size = mr->ndescs * mr->desc_size;
+ int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
memset(umr, 0, sizeof(*umr));
- umr->flags = MLX5_UMR_CHECK_NOT_FREE;
- if (umr_inline)
- umr->flags |= MLX5_UMR_INLINE;
+ umr->flags = flags;
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
umr->mkey_mask = frwr_mkey_mask();
}
@@@ -4297,7 -4305,7 +4297,7 @@@ static void set_reg_mkey_seg(struct mlx
struct mlx5_ib_mr *mr,
u32 key, int access)
{
- int ndescs = ALIGN(mr->ndescs, 8) >> 1;
+ int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
memset(seg, 0, sizeof(*seg));
@@@ -4348,7 -4356,7 +4348,7 @@@ static void set_reg_data_seg(struct mlx
struct mlx5_ib_mr *mr,
struct mlx5_ib_pd *pd)
{
- int bcount = mr->desc_size * mr->ndescs;
+ int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
dseg->addr = cpu_to_be64(mr->desc_map);
dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
@@@ -4541,37 -4549,23 +4541,37 @@@ static int mlx5_set_bsf(struct ib_mr *s
return 0;
}
-static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
- struct mlx5_ib_qp *qp, void **seg,
- int *size, void **cur_edge)
+static int set_sig_data_segment(const struct ib_send_wr *send_wr,
+ struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
{
- struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
- struct ib_mr *sig_mr = wr->sig_mr;
struct mlx5_bsf *bsf;
- u32 data_len = wr->wr.sg_list->length;
- u32 data_key = wr->wr.sg_list->lkey;
- u64 data_va = wr->wr.sg_list->addr;
+ u32 data_len;
+ u32 data_key;
+ u64 data_va;
+ u32 prot_len = 0;
+ u32 prot_key = 0;
+ u64 prot_va = 0;
+ bool prot = false;
int ret;
int wqe_size;
+ struct mlx5_ib_mr *mr = to_mmr(sig_mr);
+ struct mlx5_ib_mr *pi_mr = mr->pi_mr;
+
+ data_len = pi_mr->data_length;
+ data_key = pi_mr->ibmr.lkey;
+ data_va = pi_mr->data_iova;
+ if (pi_mr->meta_ndescs) {
+ prot_len = pi_mr->meta_length;
+ prot_key = pi_mr->ibmr.lkey;
+ prot_va = pi_mr->pi_iova;
+ prot = true;
+ }
- if (!wr->prot ||
- (data_key == wr->prot->lkey &&
- data_va == wr->prot->addr &&
- data_len == wr->prot->length)) {
+ if (!prot || (data_key == prot_key && data_va == prot_va &&
+ data_len == prot_len)) {
/**
* Source domain doesn't contain signature information
* or data and protection are interleaved in memory.
@@@ -4605,6 -4599,8 +4605,6 @@@
struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
struct mlx5_stride_block_entry *data_sentry;
struct mlx5_stride_block_entry *prot_sentry;
- u32 prot_key = wr->prot->lkey;
- u64 prot_va = wr->prot->addr;
u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
int prot_size;
@@@ -4654,15 -4650,17 +4654,15 @@@
}
static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
- const struct ib_sig_handover_wr *wr, u32 size,
- u32 length, u32 pdn)
+ struct ib_mr *sig_mr, int access_flags,
+ u32 size, u32 length, u32 pdn)
{
- struct ib_mr *sig_mr = wr->sig_mr;
u32 sig_key = sig_mr->rkey;
u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
memset(seg, 0, sizeof(*seg));
- seg->flags = get_umr_flags(wr->access_flags) |
- MLX5_MKC_ACCESS_MODE_KLMS;
+ seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
MLX5_MKEY_BSF_EN | pdn);
@@@ -4682,50 -4680,49 +4682,50 @@@ static void set_sig_umr_segment(struct
umr->mkey_mask = sig_mkey_mask();
}
-
-static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
- struct mlx5_ib_qp *qp, void **seg, int *size,
- void **cur_edge)
+static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
{
- const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
- struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
+ const struct ib_reg_wr *wr = reg_wr(send_wr);
+ struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
+ struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
+ struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
u32 pdn = get_pd(qp)->pdn;
u32 xlt_size;
int region_len, ret;
- if (unlikely(wr->wr.num_sge != 1) ||
- unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
- unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
+ if (unlikely(send_wr->num_sge != 0) ||
+ unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
+ unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
unlikely(!sig_mr->sig->sig_status_checked))
return -EINVAL;
/* length of the protected region, data + protection */
- region_len = wr->wr.sg_list->length;
- if (wr->prot &&
- (wr->prot->lkey != wr->wr.sg_list->lkey ||
- wr->prot->addr != wr->wr.sg_list->addr ||
- wr->prot->length != wr->wr.sg_list->length))
- region_len += wr->prot->length;
+ region_len = pi_mr->ibmr.length;
/**
* KLM octoword size - if protection was provided
* then we use strided block format (3 octowords),
* else we use single KLM (1 octoword)
**/
- xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm);
+ if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
+ xlt_size = 0x30;
+ else
+ xlt_size = sizeof(struct mlx5_klm);
set_sig_umr_segment(*seg, xlt_size);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn);
+ set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
+ pdn);
*seg += sizeof(struct mlx5_mkey_seg);
*size += sizeof(struct mlx5_mkey_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
- ret = set_sig_data_segment(wr, qp, seg, size, cur_edge);
+ ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
+ cur_edge);
if (ret)
return ret;
@@@ -4762,14 -4759,12 +4762,14 @@@ static int set_psv_wr(struct ib_sig_dom
static int set_reg_wr(struct mlx5_ib_qp *qp,
const struct ib_reg_wr *wr,
- void **seg, int *size, void **cur_edge)
+ void **seg, int *size, void **cur_edge,
+ bool check_not_free)
{
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
- size_t mr_list_size = mr->ndescs * mr->desc_size;
+ int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ u8 flags = 0;
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
mlx5_ib_warn(to_mdev(qp->ibqp.device),
@@@ -4777,12 -4772,7 +4777,12 @@@
return -EINVAL;
}
- set_reg_umr_seg(*seg, mr, umr_inline);
+ if (check_not_free)
+ flags |= MLX5_UMR_CHECK_NOT_FREE;
+ if (umr_inline)
+ flags |= MLX5_UMR_INLINE;
+
+ set_reg_umr_seg(*seg, mr, flags);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
@@@ -4908,12 -4898,8 +4908,12 @@@ static int _mlx5_ib_post_send(struct ib
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_core_dev *mdev = dev->mdev;
+ struct ib_reg_wr reg_pi_wr;
struct mlx5_ib_qp *qp;
struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr pa_pi_mr;
+ struct ib_sig_attrs *sig_attrs;
struct mlx5_wqe_xrc_seg *xrc;
struct mlx5_bf *bf;
void *cur_edge;
@@@ -4967,8 -4953,7 +4967,8 @@@
goto out;
}
- if (wr->opcode == IB_WR_REG_MR) {
+ if (wr->opcode == IB_WR_REG_MR ||
+ wr->opcode == IB_WR_REG_MR_INTEGRITY) {
fence = dev->umr_fence;
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
} else {
@@@ -5018,7 -5003,7 +5018,7 @@@
qp->sq.wr_data[idx] = IB_WR_REG_MR;
ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
err = set_reg_wr(qp, reg_wr(wr), &seg, &size,
- &cur_edge);
+ &cur_edge, true);
if (err) {
*bad_wr = wr;
goto out;
@@@ -5026,82 -5011,26 +5026,82 @@@
num_sge = 0;
break;
- case IB_WR_REG_SIG_MR:
- qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
- mr = to_mmr(sig_handover_wr(wr)->sig_mr);
-
+ case IB_WR_REG_MR_INTEGRITY:
+ qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY;
+
+ mr = to_mmr(reg_wr(wr)->mr);
+ pi_mr = mr->pi_mr;
+
+ if (pi_mr) {
+ memset(®_pi_wr, 0,
+ sizeof(struct ib_reg_wr));
+
+ reg_pi_wr.mr = &pi_mr->ibmr;
+ reg_pi_wr.access = reg_wr(wr)->access;
+ reg_pi_wr.key = pi_mr->ibmr.rkey;
+
+ ctrl->imm = cpu_to_be32(reg_pi_wr.key);
+ /* UMR for data + prot registration */
+ err = set_reg_wr(qp, ®_pi_wr, &seg,
+ &size, &cur_edge,
+ false);
+ if (err) {
+ *bad_wr = wr;
+ goto out;
+ }
+ finish_wqe(qp, ctrl, seg, size,
+ cur_edge, idx, wr->wr_id,
+ nreq, fence,
+ MLX5_OPCODE_UMR);
+
+ err = begin_wqe(qp, &seg, &ctrl, wr,
+ &idx, &size, &cur_edge,
+ nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+ } else {
+ memset(&pa_pi_mr, 0,
+ sizeof(struct mlx5_ib_mr));
+ /* No UMR, use local_dma_lkey */
+ pa_pi_mr.ibmr.lkey =
+ mr->ibmr.pd->local_dma_lkey;
+
+ pa_pi_mr.ndescs = mr->ndescs;
+ pa_pi_mr.data_length = mr->data_length;
+ pa_pi_mr.data_iova = mr->data_iova;
+ if (mr->meta_ndescs) {
+ pa_pi_mr.meta_ndescs =
+ mr->meta_ndescs;
+ pa_pi_mr.meta_length =
+ mr->meta_length;
+ pa_pi_mr.pi_iova = mr->pi_iova;
+ }
+
+ pa_pi_mr.ibmr.length = mr->ibmr.length;
+ mr->pi_mr = &pa_pi_mr;
+ }
ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
- err = set_sig_umr_wr(wr, qp, &seg, &size,
- &cur_edge);
+ /* UMR for sig MR */
+ err = set_pi_umr_wr(wr, qp, &seg, &size,
+ &cur_edge);
if (err) {
mlx5_ib_warn(dev, "\n");
*bad_wr = wr;
goto out;
}
-
finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
wr->wr_id, nreq, fence,
MLX5_OPCODE_UMR);
+
/*
* SET_PSV WQEs are not signaled and solicited
* on error
*/
+ sig_attrs = mr->ibmr.sig_attrs;
err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
&size, &cur_edge, nreq, false,
true);
@@@ -5111,18 -5040,19 +5111,18 @@@
*bad_wr = wr;
goto out;
}
-
- err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->mem,
- mr->sig->psv_memory.psv_idx, &seg,
- &size);
+ err = set_psv_wr(&sig_attrs->mem,
+ mr->sig->psv_memory.psv_idx,
+ &seg, &size);
if (err) {
mlx5_ib_warn(dev, "\n");
*bad_wr = wr;
goto out;
}
-
finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, fence,
+ wr->wr_id, nreq, next_fence,
MLX5_OPCODE_SET_PSV);
+
err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
&size, &cur_edge, nreq, false,
true);
@@@ -5132,20 -5062,20 +5132,20 @@@
*bad_wr = wr;
goto out;
}
-
- err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
- mr->sig->psv_wire.psv_idx, &seg,
- &size);
+ err = set_psv_wr(&sig_attrs->wire,
+ mr->sig->psv_wire.psv_idx,
+ &seg, &size);
if (err) {
mlx5_ib_warn(dev, "\n");
*bad_wr = wr;
goto out;
}
-
finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
- wr->wr_id, nreq, fence,
+ wr->wr_id, nreq, next_fence,
MLX5_OPCODE_SET_PSV);
- qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+
+ qp->next_fence =
+ MLX5_FENCE_MODE_INITIATOR_SMALL;
num_sge = 0;
goto skip_psv;
@@@ -6117,7 -6047,7 +6117,7 @@@ err
return ERR_PTR(err);
}
-int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
+void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq);
@@@ -6125,6 -6055,8 +6125,6 @@@
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq);
-
- return 0;
}
struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
@@@ -6365,7 -6297,7 +6365,7 @@@ static void handle_drain_completion(str
/* Run the CQ handler - this makes sure that the drain WR will
* be processed if wasn't processed yet.
*/
- mcq->mcq.comp(&mcq->mcq);
+ mcq->mcq.comp(&mcq->mcq, NULL);
}
wait_for_completion(&sdrain->done);
diff --combined drivers/infiniband/hw/qedr/main.c
index a0a7ba0a5af4,5ebf3c53b3fb..533157a2a3be
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@@ -183,10 -183,6 +183,10 @@@ static void qedr_roce_register_device(s
}
static const struct ib_device_ops qedr_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_QEDR,
+ .uverbs_abi_ver = QEDR_ABI_VERSION,
+
.alloc_mr = qedr_alloc_mr,
.alloc_pd = qedr_alloc_pd,
.alloc_ucontext = qedr_alloc_ucontext,
@@@ -224,7 -220,6 +224,7 @@@
.resize_cq = qedr_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext),
@@@ -236,6 -231,8 +236,6 @@@ static int qedr_register_device(struct
dev->ibdev.node_guid = dev->attr.node_guid;
memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
- dev->ibdev.owner = THIS_MODULE;
- dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
QEDR_UVERBS(QUERY_DEVICE) |
@@@ -277,6 -274,7 +277,6 @@@
rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
- dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
if (rc)
return rc;
@@@ -314,7 -312,8 +314,8 @@@ static void qedr_free_mem_sb(struct qed
struct qed_sb_info *sb_info, int sb_id)
{
if (sb_info->sb_virt) {
- dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
+ dev->ops->common->sb_release(dev->cdev, sb_info, sb_id,
+ QED_SB_TYPE_CNQ);
dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
(void *)sb_info->sb_virt, sb_info->sb_phys);
}
@@@ -506,11 -505,13 +507,13 @@@ static irqreturn_t qedr_irq_handler(in
static void qedr_sync_free_irqs(struct qedr_dev *dev)
{
u32 vector;
+ u16 idx;
int i;
for (i = 0; i < dev->int_info.used_cnt; i++) {
if (dev->int_info.msix_cnt) {
- vector = dev->int_info.msix[i * dev->num_hwfns].vector;
+ idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
+ vector = dev->int_info.msix[idx].vector;
synchronize_irq(vector);
free_irq(vector, &dev->cnq_array[i]);
}
@@@ -522,6 -523,7 +525,7 @@@
static int qedr_req_msix_irqs(struct qedr_dev *dev)
{
int i, rc = 0;
+ u16 idx;
if (dev->num_cnq > dev->int_info.msix_cnt) {
DP_ERR(dev,
@@@ -531,7 -533,8 +535,8 @@@
}
for (i = 0; i < dev->num_cnq; i++) {
- rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
+ idx = i * dev->num_hwfns + dev->affin_hwfn_idx;
+ rc = request_irq(dev->int_info.msix[idx].vector,
qedr_irq_handler, 0, dev->cnq_array[i].name,
&dev->cnq_array[i]);
if (rc) {
@@@ -868,6 -871,16 +873,16 @@@ static struct qedr_dev *qedr_add(struc
dev->user_dpm_enabled = dev_info.user_dpm_enabled;
dev->rdma_type = dev_info.rdma_type;
dev->num_hwfns = dev_info.common.num_hwfns;
+
+ if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) {
+ rc = dev->ops->iwarp_set_engine_affin(cdev, false);
+ if (rc) {
+ DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n");
+ goto init_err;
+ }
+ }
+ dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev);
+
dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
@@@ -928,6 -941,10 +943,10 @@@ static void qedr_remove(struct qedr_de
qedr_stop_hw(dev);
qedr_sync_free_irqs(dev);
qedr_free_resources(dev);
+
+ if (IS_IWARP(dev) && QEDR_IS_CMT(dev))
+ dev->ops->iwarp_set_engine_affin(dev->cdev, true);
+
ib_dealloc_device(&dev->ibdev);
}
diff --combined drivers/infiniband/hw/usnic/usnic_ib_main.c
index 6ae5ce007fed,34c1f9d6c915..03f54eb9404b
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@@ -329,10 -329,6 +329,10 @@@ static void usnic_get_dev_fw_str(struc
}
static const struct ib_device_ops usnic_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_USNIC,
+ .uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION,
+
.alloc_pd = usnic_ib_alloc_pd,
.alloc_ucontext = usnic_ib_alloc_ucontext,
.create_cq = usnic_ib_create_cq,
@@@ -354,7 -350,6 +354,7 @@@
.query_qp = usnic_ib_query_qp,
.reg_user_mr = usnic_ib_reg_mr,
INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
};
@@@ -389,10 -384,12 +389,10 @@@ static void *usnic_ib_device_add(struc
us_ibdev->pdev = dev;
us_ibdev->netdev = pci_get_drvdata(dev);
- us_ibdev->ib_dev.owner = THIS_MODULE;
us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
us_ibdev->ib_dev.dev.parent = &dev->dev;
- us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
us_ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@@ -415,6 -412,7 +415,6 @@@
ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
- us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC;
rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
@@@ -429,11 -427,16 +429,16 @@@
if (netif_carrier_ok(us_ibdev->netdev))
usnic_fwd_carrier_up(us_ibdev->ufdev);
- ind = in_dev_get(netdev);
- if (ind->ifa_list)
- usnic_fwd_add_ipaddr(us_ibdev->ufdev,
- ind->ifa_list->ifa_address);
- in_dev_put(ind);
+ rcu_read_lock();
+ ind = __in_dev_get_rcu(netdev);
+ if (ind) {
+ const struct in_ifaddr *ifa;
+
+ ifa = rcu_dereference(ind->ifa_list);
+ if (ifa)
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
+ }
+ rcu_read_unlock();
usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
us_ibdev->ufdev->inaddr, &gid.raw[0]);
diff --combined drivers/net/bonding/bond_main.c
index b0aab3a0a1bf,302499ae05e6..9b7016abca2f
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@@ -613,8 -613,8 +613,8 @@@ static int bond_set_dev_addr(struct net
{
int err;
- netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
- bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
+ slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
+ bond_dev, slave_dev, slave_dev->addr_len);
err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
if (err)
return err;
@@@ -661,8 -661,8 +661,8 @@@ static void bond_do_fail_over_mac(struc
if (new_active) {
rv = bond_set_dev_addr(bond->dev, new_active->dev);
if (rv)
- netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
- -rv, bond->dev->name);
+ slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
+ -rv);
}
break;
case BOND_FOM_FOLLOW:
@@@ -692,8 -692,8 +692,8 @@@
rv = dev_set_mac_address(new_active->dev,
(struct sockaddr *)&ss, NULL);
if (rv) {
- netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
- -rv, new_active->dev->name);
+ slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
+ -rv);
goto out;
}
@@@ -707,8 -707,8 +707,8 @@@
rv = dev_set_mac_address(old_active->dev,
(struct sockaddr *)&ss, NULL);
if (rv)
- netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
- -rv, new_active->dev->name);
+ slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
+ -rv);
out:
break;
default:
@@@ -796,6 -796,8 +796,8 @@@ static bool bond_should_notify_peers(st
slave ? slave->dev->name : "NULL");
if (!slave || !bond->send_peer_notif ||
+ bond->send_peer_notif %
+ max(1, bond->params.peer_notif_delay) != 0 ||
!netif_carrier_ok(bond->dev) ||
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
@@@ -834,9 -836,8 +836,8 @@@ void bond_change_active_slave(struct bo
if (new_active->link == BOND_LINK_BACK) {
if (bond_uses_primary(bond)) {
- netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
- new_active->dev->name,
- (bond->params.updelay - new_active->delay) * bond->params.miimon);
+ slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
+ (bond->params.updelay - new_active->delay) * bond->params.miimon);
}
new_active->delay = 0;
@@@ -850,8 -851,7 +851,7 @@@
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (bond_uses_primary(bond)) {
- netdev_info(bond->dev, "making interface %s the new active one\n",
- new_active->dev->name);
+ slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
}
}
}
@@@ -888,15 -888,18 +888,18 @@@
if (netif_running(bond->dev)) {
bond->send_peer_notif =
- bond->params.num_peer_notif;
+ bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay);
should_notify_peers =
bond_should_notify_peers(bond);
}
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
- if (should_notify_peers)
+ if (should_notify_peers) {
+ bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
+ }
}
}
@@@ -939,7 -942,7 +942,7 @@@ void bond_select_active_slave(struct bo
return;
if (netif_carrier_ok(bond->dev))
- netdev_info(bond->dev, "first active interface up!\n");
+ netdev_info(bond->dev, "active interface up!\n");
else
netdev_info(bond->dev, "now running without any active interface!\n");
}
@@@ -1077,12 -1080,16 +1080,16 @@@ static netdev_features_t bond_fix_featu
#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
+ #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
+ NETIF_F_ALL_TSO)
+
static void bond_compute_features(struct bonding *bond)
{
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
netdev_features_t enc_features = BOND_ENC_FEATURES;
+ netdev_features_t mpls_features = BOND_MPLS_FEATURES;
struct net_device *bond_dev = bond->dev;
struct list_head *iter;
struct slave *slave;
@@@ -1093,6 -1100,7 +1100,7 @@@
if (!bond_has_slaves(bond))
goto done;
vlan_features &= NETIF_F_ALL_FOR_ALL;
+ mpls_features &= NETIF_F_ALL_FOR_ALL;
bond_for_each_slave(bond, slave, iter) {
vlan_features = netdev_increment_features(vlan_features,
@@@ -1101,6 -1109,11 +1109,11 @@@
enc_features = netdev_increment_features(enc_features,
slave->dev->hw_enc_features,
BOND_ENC_FEATURES);
+
+ mpls_features = netdev_increment_features(mpls_features,
+ slave->dev->mpls_features,
+ BOND_MPLS_FEATURES);
+
dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
@@@ -1114,6 -1127,7 +1127,7 @@@ done
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
NETIF_F_GSO_UDP_L4;
+ bond_dev->mpls_features = mpls_features;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
@@@ -1369,15 -1383,14 +1383,14 @@@ int bond_enslave(struct net_device *bon
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- netdev_warn(bond_dev, "no link monitoring support for %s\n",
- slave_dev->name);
+ slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
}
/* already in-use? */
if (netdev_is_rx_handler_busy(slave_dev)) {
NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
- netdev_err(bond_dev,
- "Error: Device is in use and cannot be enslaved\n");
+ slave_err(bond_dev, slave_dev,
+ "Error: Device is in use and cannot be enslaved\n");
return -EBUSY;
}
@@@ -1390,21 -1403,16 +1403,16 @@@
/* vlan challenged mutual exclusion */
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
- netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
- slave_dev->name);
+ slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
if (vlan_uses_dev(bond_dev)) {
NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
- netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
- slave_dev->name, bond_dev->name);
+ slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n");
return -EPERM;
} else {
- netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
- slave_dev->name, slave_dev->name,
- bond_dev->name);
+ slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
}
} else {
- netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
- slave_dev->name);
+ slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
}
/* Old ifenslave binaries are no longer supported. These can
@@@ -1414,8 -1422,7 +1422,7 @@@
*/
if (slave_dev->flags & IFF_UP) {
NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
- netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
- slave_dev->name);
+ slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n");
return -EPERM;
}
@@@ -1428,14 -1435,14 +1435,14 @@@
*/
if (!bond_has_slaves(bond)) {
if (bond_dev->type != slave_dev->type) {
- netdev_dbg(bond_dev, "change device type from %d to %d\n",
- bond_dev->type, slave_dev->type);
+ slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
+ bond_dev->type, slave_dev->type);
res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
bond_dev);
res = notifier_to_errno(res);
if (res) {
- netdev_err(bond_dev, "refused to change device type\n");
+ slave_err(bond_dev, slave_dev, "refused to change device type\n");
return -EBUSY;
}
@@@ -1455,31 -1462,31 +1462,31 @@@
}
} else if (bond_dev->type != slave_dev->type) {
NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
- netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
- slave_dev->name, slave_dev->type, bond_dev->type);
+ slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n",
+ slave_dev->type, bond_dev->type);
return -EINVAL;
}
if (slave_dev->type == ARPHRD_INFINIBAND &&
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
- netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
- slave_dev->type);
+ slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n",
+ slave_dev->type);
res = -EOPNOTSUPP;
goto err_undo_flags;
}
if (!slave_ops->ndo_set_mac_address ||
slave_dev->type == ARPHRD_INFINIBAND) {
- netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
+ slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
if (!bond_has_slaves(bond)) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
- netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
+ slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
} else {
NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
- netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
+ slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
res = -EOPNOTSUPP;
goto err_undo_flags;
}
@@@ -1515,7 -1522,7 +1522,7 @@@
new_slave->original_mtu = slave_dev->mtu;
res = dev_set_mtu(slave_dev, bond->dev->mtu);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
+ slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
goto err_free;
}
@@@ -1536,7 -1543,7 +1543,7 @@@
res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
extack);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
+ slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
goto err_restore_mtu;
}
}
@@@ -1547,7 -1554,7 +1554,7 @@@
/* open the slave since the application closed it */
res = dev_open(slave_dev, extack);
if (res) {
- netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
+ slave_err(bond_dev, slave_dev, "Opening slave failed\n");
goto err_restore_mac;
}
@@@ -1566,8 -1573,7 +1573,7 @@@
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
- netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
- slave_dev->name);
+ slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
goto err_close;
}
@@@ -1597,12 -1603,10 +1603,10 @@@
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
- slave_dev->name);
+ slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
- slave_dev->name);
+ slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
}
}
@@@ -1636,9 -1640,9 +1640,9 @@@
if (new_slave->link != BOND_LINK_DOWN)
new_slave->last_link_up = jiffies;
- netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
- new_slave->link == BOND_LINK_DOWN ? "DOWN" :
- (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+ slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
+ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
if (bond_uses_primary(bond) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
@@@ -1679,7 -1683,7 +1683,7 @@@
bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
break;
default:
- netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
+ slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
/* always active in trunk mode */
bond_set_active_slave(new_slave);
@@@ -1698,7 -1702,7 +1702,7 @@@
#ifdef CONFIG_NET_POLL_CONTROLLER
if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
+ slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
goto err_detach;
}
@@@ -1711,19 -1715,19 +1715,19 @@@
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
+ slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
goto err_detach;
}
res = bond_master_upper_dev_link(bond, new_slave, extack);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
+ slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
goto err_unregister;
}
res = bond_sysfs_slave_add(new_slave);
if (res) {
- netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
+ slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
goto err_upper_unlink;
}
@@@ -1777,10 -1781,9 +1781,9 @@@
bond_update_slave_arr(bond, NULL);
- netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
- slave_dev->name,
- bond_is_active_slave(new_slave) ? "an active" : "a backup",
- new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
+ slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
+ bond_is_active_slave(new_slave) ? "an active" : "a backup",
+ new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
bond_queue_slave_event(new_slave);
@@@ -1875,8 -1878,7 +1878,7 @@@ static int __bond_release_one(struct ne
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- netdev_dbg(bond_dev, "cannot release %s\n",
- slave_dev->name);
+ slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
return -EINVAL;
}
@@@ -1885,8 -1887,7 +1887,7 @@@
slave = bond_get_slave_by_dev(bond, slave_dev);
if (!slave) {
/* not a slave of this bond */
- netdev_info(bond_dev, "%s not enslaved\n",
- slave_dev->name);
+ slave_info(bond_dev, slave_dev, "interface not enslaved\n");
unblock_netpoll_tx();
return -EINVAL;
}
@@@ -1910,9 -1911,8 +1911,8 @@@
if (bond_mode_can_use_xmit_hash(bond))
bond_update_slave_arr(bond, slave);
- netdev_info(bond_dev, "Releasing %s interface %s\n",
- bond_is_active_slave(slave) ? "active" : "backup",
- slave_dev->name);
+ slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
+ bond_is_active_slave(slave) ? "active" : "backup");
oldcurrent = rcu_access_pointer(bond->curr_active_slave);
@@@ -1922,9 -1922,8 +1922,8 @@@
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
- netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
- slave_dev->name, slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
+ slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
+ slave->perm_hwaddr);
}
if (rtnl_dereference(bond->primary_slave) == slave)
@@@ -1972,8 -1971,7 +1971,7 @@@
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
- netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
- slave_dev->name, bond_dev->name);
+ slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
vlan_vids_del_by_dev(slave_dev, bond_dev);
@@@ -2033,8 -2031,8 +2031,8 @@@ int bond_release(struct net_device *bon
/* First release a slave and then destroy the bond if no more slaves are left.
* Must be under rtnl_lock when this function is called.
*/
- static int bond_release_and_destroy(struct net_device *bond_dev,
- struct net_device *slave_dev)
+ static int bond_release_and_destroy(struct net_device *bond_dev,
+ struct net_device *slave_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
int ret;
@@@ -2042,8 -2040,7 +2040,7 @@@
ret = __bond_release_one(bond_dev, slave_dev, false, true);
if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- netdev_info(bond_dev, "Destroying bond %s\n",
- bond_dev->name);
+ netdev_info(bond_dev, "Destroying bond\n");
bond_remove_proc_entry(bond);
unregister_netdevice(bond_dev);
}
@@@ -2101,13 -2098,12 +2098,12 @@@ static int bond_miimon_inspect(struct b
commit++;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
- (BOND_MODE(bond) ==
- BOND_MODE_ACTIVEBACKUP) ?
- (bond_is_active_slave(slave) ?
- "active " : "backup ") : "",
- slave->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
+ (BOND_MODE(bond) ==
+ BOND_MODE_ACTIVEBACKUP) ?
+ (bond_is_active_slave(slave) ?
+ "active " : "backup ") : "",
+ bond->params.downdelay * bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_FAIL:
@@@ -2115,10 -2111,9 +2111,9 @@@
/* recovered before downdelay expired */
bond_propose_link_state(slave, BOND_LINK_UP);
slave->last_link_up = jiffies;
- netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
- (bond->params.downdelay - slave->delay) *
- bond->params.miimon,
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
+ (bond->params.downdelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@@ -2141,20 -2136,18 +2136,18 @@@
slave->delay = bond->params.updelay;
if (slave->delay) {
- netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
- slave->dev->name,
- ignore_updelay ? 0 :
- bond->params.updelay *
- bond->params.miimon);
+ slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
+ ignore_updelay ? 0 :
+ bond->params.updelay *
+ bond->params.miimon);
}
/*FALLTHRU*/
case BOND_LINK_BACK:
if (!link_state) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
- netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
- (bond->params.updelay - slave->delay) *
- bond->params.miimon,
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
+ (bond->params.updelay - slave->delay) *
+ bond->params.miimon);
commit++;
continue;
}
@@@ -2210,9 -2203,8 +2203,8 @@@ static void bond_miimon_commit(struct b
bond_needs_speed_duplex(bond)) {
slave->link = BOND_LINK_DOWN;
if (net_ratelimit())
- netdev_warn(bond->dev,
- "failed to get link speed/duplex for %s\n",
- slave->dev->name);
+ slave_warn(bond->dev, slave->dev,
+ "failed to get link speed/duplex\n");
continue;
}
bond_set_slave_link_state(slave, BOND_LINK_UP,
@@@ -2231,10 -2223,9 +2223,9 @@@
bond_set_backup_slave(slave);
}
- netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
- slave->dev->name,
- slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
- slave->duplex ? "full" : "half");
+ slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
+ slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
+ slave->duplex ? "full" : "half");
bond_miimon_link_change(bond, slave, BOND_LINK_UP);
@@@ -2255,8 -2246,7 +2246,7 @@@
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
- netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
@@@ -2266,8 -2256,8 +2256,8 @@@
continue;
default:
- netdev_err(bond->dev, "invalid new link %d on slave %s\n",
- slave->new_link, slave->dev->name);
+ slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
+ slave->new_link);
slave->new_link = BOND_LINK_NOCHANGE;
continue;
@@@ -2294,6 -2284,7 +2284,7 @@@ static void bond_mii_monitor(struct wor
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
bool should_notify_peers = false;
+ bool commit;
unsigned long delay;
struct slave *slave;
struct list_head *iter;
@@@ -2304,12 -2295,19 +2295,19 @@@
goto re_arm;
rcu_read_lock();
-
should_notify_peers = bond_should_notify_peers(bond);
-
- if (bond_miimon_inspect(bond)) {
+ commit = !!bond_miimon_inspect(bond);
+ if (bond->send_peer_notif) {
rcu_read_unlock();
+ if (rtnl_trylock()) {
+ bond->send_peer_notif--;
+ rtnl_unlock();
+ }
+ } else {
+ rcu_read_unlock();
+ }
+ if (commit) {
/* Race avoidance with bond_close cancel of workqueue */
if (!rtnl_trylock()) {
delay = 1;
@@@ -2323,8 -2321,7 +2321,7 @@@
bond_miimon_commit(bond);
rtnl_unlock(); /* might sleep, hold no other locks */
- } else
- rcu_read_unlock();
+ }
re_arm:
if (bond->params.miimon)
@@@ -2364,15 -2361,16 +2361,16 @@@ static bool bond_has_this_ip(struct bon
* switches in VLAN mode (especially if ports are configured as
* "native" to a VLAN) might not pass non-tagged frames.
*/
- static void bond_arp_send(struct net_device *slave_dev, int arp_op,
- __be32 dest_ip, __be32 src_ip,
- struct bond_vlan_tag *tags)
+ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
+ __be32 src_ip, struct bond_vlan_tag *tags)
{
struct sk_buff *skb;
struct bond_vlan_tag *outer_tag = tags;
+ struct net_device *slave_dev = slave->dev;
+ struct net_device *bond_dev = slave->bond->dev;
- netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
- arp_op, slave_dev->name, &dest_ip, &src_ip);
+ slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
+ arp_op, &dest_ip, &src_ip);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
@@@ -2394,8 -2392,8 +2392,8 @@@
continue;
}
- netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
- ntohs(outer_tag->vlan_proto), tags->vlan_id);
+ slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
+ ntohs(outer_tag->vlan_proto), tags->vlan_id);
skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
tags->vlan_id);
if (!skb) {
@@@ -2407,8 -2405,8 +2405,8 @@@
}
/* Set the outer tag */
if (outer_tag->vlan_id) {
- netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
- ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
+ slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
+ ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
__vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
outer_tag->vlan_id);
}
@@@ -2465,7 -2463,8 +2463,8 @@@ static void bond_arp_send_all(struct bo
int i;
for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
- netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
+ slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
+ __func__, &targets[i]);
tags = NULL;
/* Find out through which dev should the packet go */
@@@ -2479,7 -2478,7 +2478,7 @@@
net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
bond->dev->name,
&targets[i]);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+ bond_arp_send(slave, ARPOP_REQUEST, targets[i],
0, tags);
continue;
}
@@@ -2496,7 -2495,7 +2495,7 @@@
goto found;
/* Not our device - skip */
- netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
+ slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
&targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
ip_rt_put(rt);
@@@ -2505,8 -2504,7 +2504,7 @@@
found:
addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
ip_rt_put(rt);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, tags);
+ bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
kfree(tags);
}
}
@@@ -2516,15 -2514,15 +2514,15 @@@ static void bond_validate_arp(struct bo
int i;
if (!sip || !bond_has_this_ip(bond, tip)) {
- netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
- &sip, &tip);
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
+ __func__, &sip, &tip);
return;
}
i = bond_get_targets_ip(bond->params.arp_targets, sip);
if (i == -1) {
- netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
- &sip);
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
+ __func__, &sip);
return;
}
slave->last_rx = jiffies;
@@@ -2552,8 -2550,8 +2550,8 @@@ int bond_arp_rcv(const struct sk_buff *
alen = arp_hdr_len(bond->dev);
- netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
- skb->dev->name);
+ slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
+ __func__, skb->dev->name);
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
@@@ -2577,10 -2575,10 +2575,10 @@@
arp_ptr += 4 + bond->dev->addr_len;
memcpy(&tip, arp_ptr, 4);
- netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
- slave->dev->name, bond_slave_state(slave),
- bond->params.arp_validate, slave_do_arp_validate(bond, slave),
- &sip, &tip);
+ slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
+ __func__, slave->dev->name, bond_slave_state(slave),
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ &sip, &tip);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
curr_arp_slave = rcu_dereference(bond->current_arp_slave);
@@@ -2683,12 -2681,10 +2681,10 @@@ static void bond_loadbalance_arp_mon(st
* is closed.
*/
if (!oldcurrent) {
- netdev_info(bond->dev, "link status definitely up for interface %s\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status definitely up\n");
do_failover = 1;
} else {
- netdev_info(bond->dev, "interface %s is now up\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "interface is now up\n");
}
}
} else {
@@@ -2707,8 -2703,7 +2703,7 @@@
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- netdev_info(bond->dev, "interface %s is now down\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "interface is now down\n");
if (slave == oldcurrent)
do_failover = 1;
@@@ -2858,8 -2853,7 +2853,7 @@@ static void bond_ab_arp_commit(struct b
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
}
- netdev_info(bond->dev, "link status definitely up for interface %s\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status definitely up\n");
if (!rtnl_dereference(bond->curr_active_slave) ||
slave == rtnl_dereference(bond->primary_slave))
@@@ -2878,8 -2872,7 +2872,7 @@@
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_NOW);
- netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
if (slave == rtnl_dereference(bond->curr_active_slave)) {
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
@@@ -2889,8 -2882,8 +2882,8 @@@
continue;
default:
- netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
- slave->new_link, slave->dev->name);
+ slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n",
+ slave->new_link);
continue;
}
@@@ -2961,8 -2954,7 +2954,7 @@@ static bool bond_ab_arp_probe(struct bo
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_LATER);
- netdev_info(bond->dev, "backup interface %s is now down\n",
- slave->dev->name);
+ slave_info(bond->dev, slave->dev, "backup interface is now down\n");
}
if (slave == curr_arp_slave)
found = true;
@@@ -3074,6 -3066,8 +3066,8 @@@ static int bond_master_netdev_event(uns
{
struct bonding *event_bond = netdev_priv(bond_dev);
+ netdev_dbg(bond_dev, "%s called\n", __func__);
+
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
@@@ -3083,10 -3077,6 +3077,6 @@@
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
break;
- case NETDEV_NOTIFY_PEERS:
- if (event_bond->send_peer_notif)
- event_bond->send_peer_notif--;
- break;
default:
break;
}
@@@ -3105,12 -3095,17 +3095,17 @@@ static int bond_slave_netdev_event(unsi
* before netdev_rx_handler_register is called in which case
* slave will be NULL
*/
- if (!slave)
+ if (!slave) {
+ netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
return NOTIFY_DONE;
+ }
+
bond_dev = slave->bond->dev;
bond = slave->bond;
primary = rtnl_dereference(bond->primary_slave);
+ slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
+
switch (event) {
case NETDEV_UNREGISTER:
if (bond_dev->type != ARPHRD_ETHER)
@@@ -3212,7 -3207,8 +3207,8 @@@ static int bond_netdev_event(struct not
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
- netdev_dbg(event_dev, "event: %lx\n", event);
+ netdev_dbg(event_dev, "%s received %s\n",
+ __func__, netdev_cmd_to_name(event));
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
@@@ -3220,16 -3216,13 +3216,13 @@@
if (event_dev->flags & IFF_MASTER) {
int ret;
- netdev_dbg(event_dev, "IFF_MASTER\n");
ret = bond_master_netdev_event(event, event_dev);
if (ret != NOTIFY_DONE)
return ret;
}
- if (event_dev->flags & IFF_SLAVE) {
- netdev_dbg(event_dev, "IFF_SLAVE\n");
+ if (event_dev->flags & IFF_SLAVE)
return bond_slave_netdev_event(event, event_dev);
- }
return NOTIFY_DONE;
}
@@@ -3546,12 -3539,11 +3539,11 @@@ static int bond_do_ioctl(struct net_dev
slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
- netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
+ slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
if (!slave_dev)
return -ENODEV;
- netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
switch (cmd) {
case BOND_ENSLAVE_OLD:
case SIOCBONDENSLAVE:
@@@ -3676,7 -3668,7 +3668,7 @@@ static int bond_change_mtu(struct net_d
netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
bond_for_each_slave(bond, slave, iter) {
- netdev_dbg(bond_dev, "s %p c_m %p\n",
+ slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
slave, slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@@ -3690,8 -3682,8 +3682,8 @@@
* means changing their mtu from timer context, which
* is probably not a good idea.
*/
- netdev_dbg(bond_dev, "err %d %s\n", res,
- slave->dev->name);
+ slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
+ res, new_mtu);
goto unwind;
}
}
@@@ -3709,10 -3701,9 +3701,9 @@@ unwind
break;
tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
- if (tmp_res) {
- netdev_dbg(bond_dev, "unwind err %d dev %s\n",
- tmp_res, rollback_slave->dev->name);
- }
+ if (tmp_res)
+ slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
+ tmp_res);
}
return res;
@@@ -3736,7 -3727,7 +3727,7 @@@ static int bond_set_mac_address(struct
return bond_alb_set_mac_address(bond_dev, addr);
- netdev_dbg(bond_dev, "bond=%p\n", bond);
+ netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
@@@ -3749,7 -3740,8 +3740,8 @@@
return -EADDRNOTAVAIL;
bond_for_each_slave(bond, slave, iter) {
- netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
+ slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
+ __func__, slave);
res = dev_set_mac_address(slave->dev, addr, NULL);
if (res) {
/* TODO: consider downing the slave
@@@ -3758,7 -3750,8 +3750,8 @@@
* breakage anyway until ARP finish
* updating, so...
*/
- netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
+ slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
+ __func__, res);
goto unwind;
}
}
@@@ -3781,8 -3774,8 +3774,8 @@@ unwind
tmp_res = dev_set_mac_address(rollback_slave->dev,
(struct sockaddr *)&tmp_ss, NULL);
if (tmp_res) {
- netdev_dbg(bond_dev, "unwind err %d dev %s\n",
- tmp_res, rollback_slave->dev->name);
+ slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
+ __func__, tmp_res);
}
}
@@@ -3866,8 -3859,8 +3859,8 @@@ static netdev_tx_t bond_xmit_roundrobin
struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct iphdr *iph = ip_hdr(skb);
struct slave *slave;
+ int slave_cnt;
u32 slave_id;
/* Start with the curr_active_slave that joined the bond as the
@@@ -3876,32 -3869,23 +3869,32 @@@
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
- if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
- slave = rcu_dereference(bond->curr_active_slave);
- if (slave)
- bond_dev_queue_xmit(bond, skb, slave->dev);
- else
- bond_xmit_slave_id(bond, skb, 0);
- } else {
- int slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (skb->protocol == htons(ETH_P_IP)) {
+ int noff = skb_network_offset(skb);
+ struct iphdr *iph;
- if (likely(slave_cnt)) {
- slave_id = bond_rr_gen_slave_id(bond);
- bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
- } else {
- bond_tx_drop(bond_dev, skb);
+ if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
+ goto non_igmp;
+
+ iph = ip_hdr(skb);
+ if (iph->protocol == IPPROTO_IGMP) {
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+ bond_xmit_slave_id(bond, skb, 0);
+ return NETDEV_TX_OK;
}
}
+non_igmp:
+ slave_cnt = READ_ONCE(bond->slave_cnt);
+ if (likely(slave_cnt)) {
+ slave_id = bond_rr_gen_slave_id(bond);
+ bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
+ } else {
+ bond_tx_drop(bond_dev, skb);
+ }
return NETDEV_TX_OK;
}
@@@ -4012,9 -3996,8 +4005,8 @@@ int bond_update_slave_arr(struct bondin
if (skipslave == slave)
continue;
- netdev_dbg(bond->dev,
- "Adding slave dev %s to tx hash array[%d]\n",
- slave->dev->name, new_arr->count);
+ slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
+ new_arr->count);
new_arr->arr[new_arr->count++] = slave;
}
@@@ -4716,6 -4699,7 +4708,7 @@@ static int bond_check_params(struct bon
params->arp_all_targets = arp_all_targets_value;
params->updelay = updelay;
params->downdelay = downdelay;
+ params->peer_notif_delay = 0;
params->use_carrier = use_carrier;
params->lacp_fast = lacp_fast;
params->primary[0] = 0;
diff --combined drivers/net/dsa/mv88e6xxx/chip.c
index 2e8b1ab2c6f7,40b1fb22d8df..6b17cd961d06
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@@ -118,9 -118,9 +118,9 @@@ static irqreturn_t mv88e6xxx_g1_irq_thr
u16 ctl1;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
@@@ -135,13 -135,13 +135,13 @@@
}
}
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
if (err)
goto unlock;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
unlock:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
@@@ -162,7 -162,7 +162,7 @@@ static void mv88e6xxx_g1_irq_bus_lock(s
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
}
static void mv88e6xxx_g1_irq_bus_sync_unlock(struct irq_data *d)
@@@ -184,7 -184,7 +184,7 @@@
goto out;
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static const struct irq_chip mv88e6xxx_g1_irq_chip = {
@@@ -239,9 -239,9 +239,9 @@@ static void mv88e6xxx_g1_irq_free(struc
*/
free_irq(chip->irq, chip);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
mv88e6xxx_g1_irq_free_common(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@@ -310,12 -310,12 +310,12 @@@ static int mv88e6xxx_g1_irq_setup(struc
*/
irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (err)
mv88e6xxx_g1_irq_free_common(chip);
@@@ -359,9 -359,9 +359,9 @@@ static void mv88e6xxx_irq_poll_free(str
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
kthread_destroy_worker(chip->kworker);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
mv88e6xxx_g1_irq_free_common(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@@ -496,11 -496,11 +496,11 @@@ static void mv88e6xxx_adjust_link(struc
mv88e6xxx_phy_is_internal(ds, port))
return;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
phydev->duplex, phydev->pause,
phydev->interface);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
@@@ -616,12 -616,12 +616,12 @@@ static int mv88e6xxx_link_state(struct
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_link_state)
err = chip->info->ops->port_link_state(chip, port, state);
else
err = -EOPNOTSUPP;
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -651,10 -651,10 +651,10 @@@ static void mv88e6xxx_mac_config(struc
}
pause = !!phylink_test(state->advertising, Pause);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause,
state->interface);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
@@@ -665,9 -665,9 +665,9 @@@ static void mv88e6xxx_mac_link_force(st
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->port_set_link(chip, port, link);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(chip->dev, "p%d: failed to force MAC link\n", port);
@@@ -825,6 -825,12 +825,12 @@@ static int mv88e6095_stats_get_strings(
STATS_TYPE_BANK0 | STATS_TYPE_PORT);
}
+ static int mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
+ {
+ return mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0);
+ }
+
static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
uint8_t *data)
{
@@@ -859,7 -865,7 +865,7 @@@ static void mv88e6xxx_get_strings(struc
if (stringset != ETH_SS_STATS)
return;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_strings)
count = chip->info->ops->stats_get_strings(chip, data);
@@@ -872,7 -878,7 +878,7 @@@
data += count * ETH_GSTRING_LEN;
mv88e6xxx_atu_vtu_get_strings(data);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip,
@@@ -895,6 -901,11 +901,11 @@@ static int mv88e6095_stats_get_sset_cou
STATS_TYPE_PORT);
}
+ static int mv88e6250_stats_get_sset_count(struct mv88e6xxx_chip *chip)
+ {
+ return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0);
+ }
+
static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
{
return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 |
@@@ -910,7 -921,7 +921,7 @@@ static int mv88e6xxx_get_sset_count(str
if (sset != ETH_SS_STATS)
return 0;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_sset_count)
count = chip->info->ops->stats_get_sset_count(chip);
if (count < 0)
@@@ -927,7 -938,7 +938,7 @@@
count += ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings);
out:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return count;
}
@@@ -942,11 -953,11 +953,11 @@@ static int mv88e6xxx_stats_get_stats(st
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
if (stat->type & types) {
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
bank1_select,
histogram);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
j++;
}
@@@ -962,6 -973,13 +973,13 @@@ static int mv88e6095_stats_get_stats(st
0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
+ static int mv88e6250_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+ {
+ return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0,
+ 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
+ }
+
static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
@@@ -998,14 -1016,14 +1016,14 @@@ static void mv88e6xxx_get_stats(struct
if (chip->info->ops->stats_get_stats)
count = chip->info->ops->stats_get_stats(chip, port, data);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->serdes_get_stats) {
data += count;
count = chip->info->ops->serdes_get_stats(chip, port, data);
}
data += count;
mv88e6xxx_atu_vtu_get_stats(chip, port, data);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
@@@ -1014,10 -1032,10 +1032,10 @@@
struct mv88e6xxx_chip *chip = ds->priv;
int ret;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
ret = mv88e6xxx_stats_snapshot(chip, port);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (ret < 0)
return;
@@@ -1044,7 -1062,7 +1062,7 @@@ static void mv88e6xxx_get_regs(struct d
memset(p, 0xff, 32 * sizeof(u16));
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
for (i = 0; i < 32; i++) {
@@@ -1053,7 -1071,7 +1071,7 @@@
p[i] = reg;
}
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port,
@@@ -1119,9 -1137,9 +1137,9 @@@ static void mv88e6xxx_port_stp_state_se
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_state(chip, port, state);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(ds->dev, "p%d: failed to update state\n", port);
@@@ -1306,9 -1324,9 +1324,9 @@@ static void mv88e6xxx_port_fast_age(str
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
@@@ -1436,7 -1454,7 +1454,7 @@@ static int mv88e6xxx_port_check_hw_vlan
if (!vid_begin)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
do {
err = mv88e6xxx_vtu_getnext(chip, &vlan);
@@@ -1476,7 -1494,7 +1494,7 @@@
} while (vlan.vid < vid_end);
unlock:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -1492,9 -1510,9 +1510,9 @@@ static int mv88e6xxx_port_vlan_filterin
if (!chip->info->max_vid)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -1628,7 -1646,7 +1646,7 @@@ static void mv88e6xxx_port_vlan_add(str
else
member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
if (_mv88e6xxx_port_vlan_add(chip, port, vid, member))
@@@ -1639,7 -1657,7 +1657,7 @@@
dev_err(ds->dev, "p%d: failed to set PVID %d\n", port,
vlan->vid_end);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip,
@@@ -1685,7 -1703,7 +1703,7 @@@ static int mv88e6xxx_port_vlan_del(stru
if (!chip->info->max_vid)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
if (err)
@@@ -1704,7 -1722,7 +1722,7 @@@
}
unlock:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -1715,10 -1733,10 +1733,10 @@@ static int mv88e6xxx_port_fdb_add(struc
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -1729,10 -1747,10 +1747,10 @@@ static int mv88e6xxx_port_fdb_del(struc
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid,
MV88E6XXX_G1_ATU_DATA_STATE_UNUSED);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -1749,9 -1767,7 +1767,7 @@@ static int mv88e6xxx_port_db_dump_fid(s
eth_broadcast_addr(addr.mac);
do {
- mutex_lock(&chip->reg_lock);
err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
- mutex_unlock(&chip->reg_lock);
if (err)
return err;
@@@ -1784,10 -1800,7 +1800,7 @@@ static int mv88e6xxx_port_db_dump(struc
int err;
/* Dump port's default Filtering Information Database (VLAN ID 0) */
- mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_get_fid(chip, port, &fid);
- mutex_unlock(&chip->reg_lock);
-
if (err)
return err;
@@@ -1797,9 -1810,7 +1810,7 @@@
/* Dump VLANs' Filtering Information Databases */
do {
- mutex_lock(&chip->reg_lock);
err = mv88e6xxx_vtu_getnext(chip, &vlan);
- mutex_unlock(&chip->reg_lock);
if (err)
return err;
@@@ -1819,8 -1830,13 +1830,13 @@@ static int mv88e6xxx_port_fdb_dump(stru
dsa_fdb_dump_cb_t *cb, void *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
- return mv88e6xxx_port_db_dump(chip, port, cb, data);
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_port_db_dump(chip, port, cb, data);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
}
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
@@@ -1867,9 -1883,9 +1883,9 @@@ static int mv88e6xxx_port_bridge_join(s
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_bridge_map(chip, br);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -1879,11 -1895,11 +1895,11 @@@ static void mv88e6xxx_port_bridge_leave
{
struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_bridge_map(chip, br) ||
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev,
@@@ -1895,9 -1911,9 +1911,9 @@@
if (!mv88e6xxx_has_pvt(chip))
return 0;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, dev, port);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -1910,10 -1926,10 +1926,10 @@@ static void mv88e6xxx_crosschip_bridge_
if (!mv88e6xxx_has_pvt(chip))
return;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_pvt_map(chip, dev, port))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
@@@ -2264,14 -2280,14 +2280,14 @@@ static int mv88e6xxx_port_enable(struc
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_serdes_power(chip, port, true);
if (!err && chip->info->ops->serdes_irq_setup)
err = chip->info->ops->serdes_irq_setup(chip, port);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -2280,7 -2296,7 +2296,7 @@@ static void mv88e6xxx_port_disable(stru
{
struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED))
dev_err(chip->dev, "failed to disable port\n");
@@@ -2291,7 -2307,7 +2307,7 @@@
if (mv88e6xxx_serdes_power(chip, port, false))
dev_err(chip->dev, "failed to power off SERDES\n");
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds,
@@@ -2300,9 -2316,9 +2316,9 @@@
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -2432,7 -2448,7 +2448,7 @@@ static int mv88e6xxx_setup(struct dsa_s
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->setup_errata) {
err = chip->info->ops->setup_errata(chip);
@@@ -2539,7 -2555,7 +2555,7 @@@
goto unlock;
unlock:
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -2554,9 -2570,9 +2570,9 @@@ static int mv88e6xxx_mdio_read(struct m
if (!chip->info->ops->phy_read)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (reg == MII_PHYSID2) {
/* Some internal PHYs don't have a model number. */
@@@ -2589,9 -2605,9 +2605,9 @@@ static int mv88e6xxx_mdio_write(struct
if (!chip->info->ops->phy_write)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -2606,9 -2622,9 +2622,9 @@@ static int mv88e6xxx_mdio_register(stru
int err;
if (external) {
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
return err;
@@@ -2729,9 -2745,9 +2745,9 @@@ static int mv88e6xxx_get_eeprom(struct
if (!chip->info->ops->get_eeprom)
return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->get_eeprom(chip, eeprom, data);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
return err;
@@@ -2753,9 -2769,9 +2769,9 @@@ static int mv88e6xxx_set_eeprom(struct
if (eeprom->magic != 0xc3ec4951)
return -EINVAL;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = chip->info->ops->set_eeprom(chip, eeprom, data);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -3444,6 -3460,44 +3460,44 @@@ static const struct mv88e6xxx_ops mv88e
.phylink_validate = mv88e6352_phylink_validate,
};
+ static const struct mv88e6xxx_ops mv88e6250_ops = {
+ /* MV88E6XXX_FAMILY_6250 */
+ .ieee_pri_map = mv88e6250_g1_ieee_pri_map,
+ .ip_pri_map = mv88e6085_g1_ip_pri_map,
+ .irl_init_all = mv88e6352_g2_irl_init_all,
+ .get_eeprom = mv88e6xxx_g2_get_eeprom16,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom16,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
+ .port_set_speed = mv88e6250_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
+ .port_link_state = mv88e6250_port_link_state,
+ .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_get_sset_count = mv88e6250_stats_get_sset_count,
+ .stats_get_strings = mv88e6250_stats_get_strings,
+ .stats_get_stats = mv88e6250_stats_get_stats,
+ .set_cpu_port = mv88e6095_g1_set_cpu_port,
+ .set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6250_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .pot_clear = mv88e6xxx_g2_pot_clear,
+ .reset = mv88e6250_g1_reset,
+ .vtu_getnext = mv88e6250_g1_vtu_getnext,
+ .vtu_loadpurge = mv88e6250_g1_vtu_loadpurge,
+ .phylink_validate = mv88e6065_phylink_validate,
+ };
+
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
.setup_errata = mv88e6390_setup_errata,
@@@ -4229,6 -4283,27 +4283,27 @@@ static const struct mv88e6xxx_info mv88
.ops = &mv88e6240_ops,
},
+ [MV88E6250] = {
+ .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6250,
+ .family = MV88E6XXX_FAMILY_6250,
+ .name = "Marvell 88E6250",
+ .num_databases = 64,
+ .num_ports = 7,
+ .num_internal_phys = 5,
+ .max_vid = 4095,
+ .port_base_addr = 0x08,
+ .phy_base_addr = 0x00,
+ .global1_addr = 0x0f,
+ .global2_addr = 0x07,
+ .age_time_coeff = 15000,
+ .g1_irqs = 9,
+ .g2_irqs = 10,
+ .atu_move_port_mask = 0xf,
+ .dual_chip = true,
+ .tag_protocol = DSA_TAG_PROTO_DSA,
+ .ops = &mv88e6250_ops,
+ },
+
[MV88E6290] = {
.prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6290,
.family = MV88E6XXX_FAMILY_6390,
@@@ -4457,9 -4532,9 +4532,9 @@@ static int mv88e6xxx_detect(struct mv88
u16 id;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_read(chip, 0, MV88E6XXX_PORT_SWITCH_ID, &id);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
return err;
@@@ -4522,12 -4597,12 +4597,12 @@@ static void mv88e6xxx_port_mdb_add(stru
{
struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC))
dev_err(ds->dev, "p%d: failed to load multicast MAC address\n",
port);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
@@@ -4536,10 -4611,10 +4611,10 @@@
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid,
MV88E6XXX_G1_ATU_DATA_STATE_UNUSED);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -4550,12 -4625,12 +4625,12 @@@ static int mv88e6xxx_port_egress_floods
struct mv88e6xxx_chip *chip = ds->priv;
int err = -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_egress_floods)
err = chip->info->ops->port_set_egress_floods(chip, port,
unicast,
multicast);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
@@@ -4711,8 -4786,6 +4786,8 @@@ static int mv88e6xxx_probe(struct mdio_
err = PTR_ERR(chip->reset);
goto out;
}
+ if (chip->reset)
+ usleep_range(1000, 2000);
err = mv88e6xxx_detect(chip);
if (err)
@@@ -4728,9 -4801,9 +4803,9 @@@
chip->eeprom_len = pdata->eeprom_len;
}
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_switch_reset(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
@@@ -4749,12 -4822,12 +4824,12 @@@
* the PHYs will link their interrupts to these interrupt
* controllers
*/
- mutex_lock(&chip->reg_lock);
+ mv88e6xxx_reg_lock(chip);
if (chip->irq > 0)
err = mv88e6xxx_g1_irq_setup(chip);
else
err = mv88e6xxx_irq_poll_setup(chip);
- mutex_unlock(&chip->reg_lock);
+ mv88e6xxx_reg_unlock(chip);
if (err)
goto out;
@@@ -4839,6 -4912,10 +4914,10 @@@ static const struct of_device_id mv88e6
.compatible = "marvell,mv88e6190",
.data = &mv88e6xxx_table[MV88E6190],
},
+ {
+ .compatible = "marvell,mv88e6250",
+ .data = &mv88e6xxx_table[MV88E6250],
+ },
{ /* sentinel */ },
};
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index c12c1bab0fe4,c4986b519191..656ed80647f0
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@@ -684,7 -684,7 +684,7 @@@ static void *bnx2x_frag_alloc(const str
if (unlikely(gfpflags_allow_blocking(gfp_mask)))
return (void *)__get_free_page(gfp_mask);
- return netdev_alloc_frag(fp->rx_frag_size);
+ return napi_alloc_frag(fp->rx_frag_size);
}
return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
@@@ -3857,12 -3857,9 +3857,12 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
if (!(bp->flags & TX_TIMESTAMPING_EN)) {
+ bp->eth_stats.ptp_skip_tx_ts++;
BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (bp->ptp_tx_skb) {
- BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+ bp->eth_stats.ptp_skip_tx_ts++;
+ netdev_err_once(bp->dev,
+ "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* schedule check for Tx timestamp */
diff --combined drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9090c79387c1,b7b62273c955..dd15d35ab745
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@@ -2130,12 -2130,12 +2130,12 @@@ static int bnxt_poll(struct napi_struc
}
}
if (bp->flags & BNXT_FLAG_DIM) {
- struct net_dim_sample dim_sample;
+ struct dim_sample dim_sample;
- net_dim_sample(cpr->event_ctr,
- cpr->rx_packets,
- cpr->rx_bytes,
- &dim_sample);
+ dim_update_sample(cpr->event_ctr,
+ cpr->rx_packets,
+ cpr->rx_bytes,
+ &dim_sample);
net_dim(&cpr->dim, dim_sample);
}
return work_done;
@@@ -5508,16 -5508,7 +5508,16 @@@ static int bnxt_cp_rings_in_use(struct
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
+ int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
+ int cp = bp->cp_nr_rings;
+
+ if (!ulp_stat)
+ return cp;
+
+ if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
+ return bnxt_get_ulp_msix_base(bp) + ulp_stat;
+
+ return cp + ulp_stat;
}
static bool bnxt_need_reserve_rings(struct bnxt *bp)
@@@ -7486,7 -7477,11 +7486,7 @@@ unsigned int bnxt_get_avail_cp_rings_fo
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
{
- unsigned int stat;
-
- stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
- stat -= bp->cp_nr_rings;
- return stat;
+ return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}
int bnxt_get_avail_msix(struct bnxt *bp, int num)
@@@ -7818,7 -7813,7 +7818,7 @@@ static void bnxt_enable_napi(struct bnx
if (bp->bnapi[i]->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
- cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
napi_enable(&bp->bnapi[i]->napi);
}
@@@ -10267,10 -10262,10 +10267,10 @@@ static void bnxt_remove_one(struct pci_
bnxt_dcb_free(bp);
kfree(bp->edev);
bp->edev = NULL;
+ bnxt_cleanup_pci(bp);
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
- bnxt_cleanup_pci(bp);
bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@@ -10864,7 -10859,6 +10864,7 @@@ static void bnxt_shutdown(struct pci_de
if (system_state == SYSTEM_POWER_OFF) {
bnxt_clear_int_mode(bp);
+ pci_disable_device(pdev);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
diff --combined drivers/net/ethernet/cadence/macb.h
index 98735584570d,515bfd2c9e3f..03983bd46eef
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@@ -496,11 -496,7 +496,11 @@@
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
-#define GEM_SUBNSINCR_SIZE 16
+#define GEM_SUBNSINCRL_OFFSET 24
+#define GEM_SUBNSINCRL_SIZE 8
+#define GEM_SUBNSINCRH_OFFSET 0
+#define GEM_SUBNSINCRH_SIZE 16
+#define GEM_SUBNSINCR_SIZE 24
/* Bitfields in TI */
#define GEM_NSINCR_OFFSET 0
@@@ -838,9 -834,6 +838,9 @@@ struct gem_tx_ts
/* limit RX checksum offload to TCP and UDP packets */
#define GEM_RX_CSUM_CHECKED_MASK 2
+/* Scaled PPM fraction */
+#define PPM_FRACTION 16
+
/* struct macb_tx_skb - data about an skb which is being transmitted
* @skb: skb currently being transmitted, only set for the last buffer
* of the frame
@@@ -1067,7 -1060,8 +1067,8 @@@ struct macb_or_gem_ops
int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp);
void (*mog_init_rings)(struct macb *bp);
- int (*mog_rx)(struct macb_queue *queue, int budget);
+ int (*mog_rx)(struct macb_queue *queue, struct napi_struct *napi,
+ int budget);
};
/* MACB-PTP interface: adapt to platform needs. */
diff --combined drivers/net/ethernet/ti/cpsw.c
index 4e3026f9abed,32b7b3b74a6b..1e70ae7bbd61
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@@ -457,16 -457,13 +457,13 @@@ static void cpsw_rx_handler(void *token
}
requeue:
- if (netif_dormant(ndev)) {
- dev_kfree_skb_any(new_skb);
- return;
- }
-
ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
skb_tailroom(new_skb), 0);
- if (WARN_ON(ret < 0))
+ if (ret < 0) {
+ WARN_ON(ret == -ENOMEM);
dev_kfree_skb_any(new_skb);
+ }
}
void cpsw_split_res(struct cpsw_common *cpsw)
@@@ -1051,9 -1048,9 +1048,9 @@@ int cpsw_fill_rx_channels(struct cpsw_p
}
skb_set_queue_mapping(skb, ch);
- ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
- skb->data, skb_tailroom(skb),
- 0);
+ ret = cpdma_chan_idle_submit(cpsw->rxv[ch].ch, skb,
+ skb->data,
+ skb_tailroom(skb), 0);
if (ret < 0) {
cpsw_err(priv, ifup,
"cannot submit skb to channel %d rx, error %d\n",
@@@ -1423,8 -1420,11 +1420,11 @@@ static int cpsw_ndo_open(struct net_dev
return 0;
err_cleanup:
- cpdma_ctlr_stop(cpsw->dma);
- for_each_slave(priv, cpsw_slave_stop, cpsw);
+ if (!cpsw->usage_count) {
+ cpdma_ctlr_stop(cpsw->dma);
+ for_each_slave(priv, cpsw_slave_stop, cpsw);
+ }
+
pm_runtime_put_sync(cpsw->dev);
netif_carrier_off(priv->ndev);
return ret;
@@@ -2179,7 -2179,6 +2179,7 @@@ static int cpsw_probe_dt(struct cpsw_pl
return ret;
}
+ slave_data->slave_node = slave_node;
slave_data->phy_node = of_parse_phandle(slave_node,
"phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
@@@ -2263,8 -2262,7 +2263,7 @@@ no_phy_slave
static void cpsw_remove_dt(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
struct cpsw_platform_data *data = &cpsw->data;
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
@@@ -2331,7 -2329,6 +2330,7 @@@ static int cpsw_probe_dual_emac(struct
/* register the network device */
SET_NETDEV_DEV(ndev, cpsw->dev);
+ ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
ret = register_netdev(ndev);
if (ret)
dev_err(cpsw->dev, "cpsw: error registering net device\n");
@@@ -2476,7 -2473,7 +2475,7 @@@ static int cpsw_probe(struct platform_d
goto clean_cpts;
}
- platform_set_drvdata(pdev, ndev);
+ platform_set_drvdata(pdev, cpsw);
priv = netdev_priv(ndev);
priv->cpsw = cpsw;
priv->ndev = ndev;
@@@ -2509,7 -2506,6 +2508,7 @@@
/* register the network device */
SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
ret = register_netdev(ndev);
if (ret) {
dev_err(dev, "error registering net device\n");
@@@ -2570,9 -2566,8 +2569,8 @@@ clean_runtime_disable_ret
static int cpsw_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- int ret;
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ int i, ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
@@@ -2580,9 -2575,9 +2578,9 @@@
return ret;
}
- if (cpsw->data.dual_emac)
- unregister_netdev(cpsw->slaves[1].ndev);
- unregister_netdev(ndev);
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
+ unregister_netdev(cpsw->slaves[i].ndev);
cpts_release(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
@@@ -2595,20 -2590,13 +2593,13 @@@
#ifdef CONFIG_PM_SLEEP
static int cpsw_suspend(struct device *dev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- if (cpsw->data.dual_emac) {
- int i;
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
- for (i = 0; i < cpsw->data.slaves; i++) {
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
if (netif_running(cpsw->slaves[i].ndev))
cpsw_ndo_stop(cpsw->slaves[i].ndev);
- }
- } else {
- if (netif_running(ndev))
- cpsw_ndo_stop(ndev);
- }
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@@ -2618,25 -2606,20 +2609,20 @@@
static int cpsw_resume(struct device *dev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
rtnl_lock();
- if (cpsw->data.dual_emac) {
- int i;
- for (i = 0; i < cpsw->data.slaves; i++) {
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
if (netif_running(cpsw->slaves[i].ndev))
cpsw_ndo_open(cpsw->slaves[i].ndev);
- }
- } else {
- if (netif_running(ndev))
- cpsw_ndo_open(ndev);
- }
+
rtnl_unlock();
return 0;
diff --combined drivers/net/vxlan.c
index b4283f52a09d,5e2323592e08..3d9bcc957f7d
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@@ -468,14 -468,19 +468,19 @@@ static u32 eth_vni_hash(const unsigned
return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
}
+ static u32 fdb_head_index(struct vxlan_dev *vxlan, const u8 *mac, __be32 vni)
+ {
+ if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
+ return eth_vni_hash(mac, vni);
+ else
+ return eth_hash(mac);
+ }
+
/* Hash chain to use given mac address */
static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
const u8 *mac, __be32 vni)
{
- if (vxlan->cfg.flags & VXLAN_F_COLLECT_METADATA)
- return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
- else
- return &vxlan->fdb_head[eth_hash(mac)];
+ return &vxlan->fdb_head[fdb_head_index(vxlan, mac, vni)];
}
/* Look up Ethernet address in forwarding table */
@@@ -590,8 -595,8 +595,8 @@@ int vxlan_fdb_replay(const struct net_d
return -EINVAL;
vxlan = netdev_priv(dev);
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
if (f->vni == vni) {
list_for_each_entry(rdst, &f->remotes, list) {
@@@ -599,14 -604,16 +604,16 @@@
f, rdst,
extack);
if (rc)
- goto out;
+ goto unlock;
}
}
}
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
+ return 0;
- out:
- spin_unlock_bh(&vxlan->hash_lock);
+ unlock:
+ spin_unlock_bh(&vxlan->hash_lock[h]);
return rc;
}
EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
@@@ -622,14 -629,15 +629,15 @@@ void vxlan_fdb_clear_offload(const stru
return;
vxlan = netdev_priv(dev);
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
if (f->vni == vni)
list_for_each_entry(rdst, &f->remotes, list)
rdst->offloaded = false;
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
- spin_unlock_bh(&vxlan->hash_lock);
+
}
EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
@@@ -804,14 -812,6 +812,14 @@@ static struct vxlan_fdb *vxlan_fdb_allo
return f;
}
+static void vxlan_fdb_insert(struct vxlan_dev *vxlan, const u8 *mac,
+ __be32 src_vni, struct vxlan_fdb *f)
+{
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->hlist,
+ vxlan_fdb_head(vxlan, mac, src_vni));
+}
+
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
@@@ -837,13 -837,18 +845,13 @@@
return rc;
}
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
-
*fdb = f;
return 0;
}
-static void vxlan_fdb_free(struct rcu_head *head)
+static void __vxlan_fdb_free(struct vxlan_fdb *f)
{
- struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
struct vxlan_rdst *rd, *nd;
list_for_each_entry_safe(rd, nd, &f->remotes, list) {
@@@ -853,13 -858,6 +861,13 @@@
kfree(f);
}
+static void vxlan_fdb_free(struct rcu_head *head)
+{
+ struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
+
+ __vxlan_fdb_free(f);
+}
+
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
bool do_notify, bool swdev_notify)
{
@@@ -987,7 -985,6 +995,7 @@@ static int vxlan_fdb_update_create(stru
if (rc < 0)
return rc;
+ vxlan_fdb_insert(vxlan, mac, src_vni, f);
rc = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
swdev_notify, extack);
if (rc)
@@@ -1116,6 -1113,7 +1124,7 @@@ static int vxlan_fdb_add(struct ndmsg *
__be16 port;
__be32 src_vni, vni;
u32 ifindex;
+ u32 hash_index;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@@ -1134,12 -1132,13 +1143,13 @@@
if (vxlan->default_dst.remote_ip.sa.sa_family != ip.sa.sa_family)
return -EAFNOSUPPORT;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, addr, src_vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex,
ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
true, extack);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@@ -1187,16 -1186,18 +1197,18 @@@ static int vxlan_fdb_delete(struct ndms
__be32 src_vni, vni;
__be16 port;
u32 ifindex;
+ u32 hash_index;
int err;
err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
if (err)
return err;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, addr, src_vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@@ -1308,8 -1309,10 +1320,10 @@@ static bool vxlan_snoop(struct net_devi
f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL);
} else {
+ u32 hash_index = fdb_head_index(vxlan, src_mac, vni);
+
/* learned new entry */
- spin_lock(&vxlan->hash_lock);
+ spin_lock(&vxlan->hash_lock[hash_index]);
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
@@@ -1320,7 -1323,7 +1334,7 @@@
vni,
vxlan->default_dst.remote_vni,
ifindex, NTF_SELF, true, NULL);
- spin_unlock(&vxlan->hash_lock);
+ spin_unlock(&vxlan->hash_lock[hash_index]);
}
return false;
@@@ -2230,7 -2233,7 +2244,7 @@@ static struct rtable *vxlan_get_route(s
fl4.fl4_sport = sport;
rt = ip_route_output_key(vxlan->net, &fl4);
- if (likely(!IS_ERR(rt))) {
+ if (!IS_ERR(rt)) {
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to %pI4\n", &daddr);
ip_rt_put(rt);
@@@ -2710,7 -2713,7 +2724,7 @@@ static void vxlan_cleanup(struct timer_
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
- spin_lock(&vxlan->hash_lock);
+ spin_lock(&vxlan->hash_lock[h]);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@@ -2732,7 -2735,7 +2746,7 @@@
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
- spin_unlock(&vxlan->hash_lock);
+ spin_unlock(&vxlan->hash_lock[h]);
}
mod_timer(&vxlan->age_timer, next_timer);
@@@ -2775,12 -2778,13 +2789,13 @@@ static int vxlan_init(struct net_devic
static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
{
struct vxlan_fdb *f;
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, vni);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
vxlan_fdb_destroy(vxlan, f, true, true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
static void vxlan_uninit(struct net_device *dev)
@@@ -2825,9 -2829,10 +2840,10 @@@ static void vxlan_flush(struct vxlan_de
{
unsigned int h;
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
+
+ spin_lock_bh(&vxlan->hash_lock[h]);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@@ -2837,8 -2842,8 +2853,8 @@@
if (!is_zero_ether_addr(f->eth_addr))
vxlan_fdb_destroy(vxlan, f, true, true);
}
+ spin_unlock_bh(&vxlan->hash_lock[h]);
}
- spin_unlock_bh(&vxlan->hash_lock);
}
/* Cleanup timer and forwarding table on shutdown */
@@@ -3022,7 -3027,6 +3038,6 @@@ static void vxlan_setup(struct net_devi
dev->max_mtu = ETH_MAX_MTU;
INIT_LIST_HEAD(&vxlan->next);
- spin_lock_init(&vxlan->hash_lock);
timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE);
@@@ -3030,8 -3034,10 +3045,10 @@@
gro_cells_init(&vxlan->gro_cells, dev);
- for (h = 0; h < FDB_HASH_SIZE; ++h)
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ spin_lock_init(&vxlan->hash_lock[h]);
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
+ }
}
static void vxlan_ether_setup(struct net_device *dev)
@@@ -3582,17 -3588,12 +3599,17 @@@ static int __vxlan_dev_create(struct ne
if (err)
goto errout;
- /* notify default fdb entry */
if (f) {
+ vxlan_fdb_insert(vxlan, all_zeros_mac,
+ vxlan->default_dst.remote_vni, f);
+
+ /* notify default fdb entry */
err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
RTM_NEWNEIGH, true, extack);
- if (err)
- goto errout;
+ if (err) {
+ vxlan_fdb_destroy(vxlan, f, false, false);
+ goto unregister;
+ }
}
list_add(&vxlan->next, &vn->vxlan_list);
@@@ -3604,8 -3605,7 +3621,8 @@@ errout
* destroy the entry by hand here.
*/
if (f)
- vxlan_fdb_destroy(vxlan, f, false, false);
+ __vxlan_fdb_free(f);
+unregister:
if (unregister)
unregister_netdevice(dev);
return err;
@@@ -3931,7 -3931,9 +3948,9 @@@ static int vxlan_changelink(struct net_
/* handle default dst entry */
if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
- spin_lock_bh(&vxlan->hash_lock);
+ u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
+
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
if (!vxlan_addr_any(&conf.remote_ip)) {
err = vxlan_fdb_update(vxlan, all_zeros_mac,
&conf.remote_ip,
@@@ -3942,7 -3944,7 +3961,7 @@@
conf.remote_ifindex,
NTF_SELF, true, extack);
if (err) {
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
}
@@@ -3954,7 -3956,7 +3973,7 @@@
dst->remote_vni,
dst->remote_ifindex,
true);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
if (conf.age_interval != vxlan->cfg.age_interval)
@@@ -4209,8 -4211,11 +4228,11 @@@ vxlan_fdb_offloaded_set(struct net_devi
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *rdst;
struct vxlan_fdb *f;
+ u32 hash_index;
+
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
@@@ -4226,7 -4231,7 +4248,7 @@@
rdst->offloaded = fdb_info->offloaded;
out:
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
}
static int
@@@ -4235,11 -4240,13 +4257,13 @@@ vxlan_fdb_external_learn_add(struct net
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct netlink_ext_ack *extack;
+ u32 hash_index;
int err;
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
extack = switchdev_notifier_info_to_extack(&fdb_info->info);
- spin_lock_bh(&vxlan->hash_lock);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
NUD_REACHABLE,
NLM_F_CREATE | NLM_F_REPLACE,
@@@ -4249,7 -4256,7 +4273,7 @@@
fdb_info->remote_ifindex,
NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
false, extack);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
@@@ -4260,9 -4267,11 +4284,11 @@@ vxlan_fdb_external_learn_del(struct net
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
+ u32 hash_index;
int err = 0;
- spin_lock_bh(&vxlan->hash_lock);
+ hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
if (!f)
@@@ -4276,7 -4285,7 +4302,7 @@@
fdb_info->remote_ifindex,
false);
- spin_unlock_bh(&vxlan->hash_lock);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
return err;
}
diff --combined drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 38ab24d96244,602c31b3992a..f5df5b370d78
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@@ -90,8 -90,10 +90,10 @@@
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
{
- #define PCI_DUMP_SIZE 64
- #define PREFIX_LEN 32
+ #define PCI_DUMP_SIZE 352
+ #define PCI_MEM_DUMP_SIZE 64
+ #define PCI_PARENT_DUMP_SIZE 524
+ #define PREFIX_LEN 32
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct pci_dev *pdev = trans_pcie->pci_dev;
u32 i, pos, alloc_size, *ptr, *buf;
@@@ -102,11 -104,15 +104,15 @@@
/* Should be a multiple of 4 */
BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
+ BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
+ BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
+
/* Alloc a max size buffer */
- if (PCI_ERR_ROOT_ERR_SRC + 4 > PCI_DUMP_SIZE)
- alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
- else
- alloc_size = PCI_DUMP_SIZE + PREFIX_LEN;
+ alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
+ alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
+ alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
+ alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
+
buf = kmalloc(alloc_size, GFP_ATOMIC);
if (!buf)
return;
@@@ -123,7 -129,7 +129,7 @@@
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
- for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
*ptr = iwl_read32(trans, i);
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@@ -146,7 -152,7 +152,7 @@@
IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
pci_name(pdev));
- for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+ for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
if (pci_read_config_dword(pdev, i, ptr))
goto err_read;
print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
@@@ -188,14 -194,14 +194,14 @@@ static void iwl_pcie_free_fw_monitor(st
{
int i;
- for (i = 0; i < trans->num_blocks; i++) {
- dma_free_coherent(trans->dev, trans->fw_mon[i].size,
- trans->fw_mon[i].block,
- trans->fw_mon[i].physical);
- trans->fw_mon[i].block = NULL;
- trans->fw_mon[i].physical = 0;
- trans->fw_mon[i].size = 0;
- trans->num_blocks--;
+ for (i = 0; i < trans->dbg.num_blocks; i++) {
+ dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size,
+ trans->dbg.fw_mon[i].block,
+ trans->dbg.fw_mon[i].physical);
+ trans->dbg.fw_mon[i].block = NULL;
+ trans->dbg.fw_mon[i].physical = 0;
+ trans->dbg.fw_mon[i].size = 0;
+ trans->dbg.num_blocks--;
}
}
@@@ -230,10 -236,10 +236,10 @@@ static void iwl_pcie_alloc_fw_monitor_b
(unsigned long)BIT(power - 10),
(unsigned long)BIT(max_power - 10));
- trans->fw_mon[trans->num_blocks].block = cpu_addr;
- trans->fw_mon[trans->num_blocks].physical = phys;
- trans->fw_mon[trans->num_blocks].size = size;
- trans->num_blocks++;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys;
+ trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
+ trans->dbg.num_blocks++;
}
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
@@@ -254,7 -260,7 +260,7 @@@
* This function allocats the default fw monitor.
* The optional additional ones will be allocated in runtime
*/
- if (trans->num_blocks)
+ if (trans->dbg.num_blocks)
return;
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
@@@ -889,21 -895,21 +895,21 @@@ static int iwl_pcie_load_cpu_sections(s
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
- const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
+ const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
int i;
- if (trans->ini_valid) {
- if (!trans->num_blocks)
+ if (trans->dbg.ini_valid) {
+ if (!trans->dbg.num_blocks)
return;
IWL_DEBUG_FW(trans,
"WRT: applying DRAM buffer[0] destination\n");
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
- trans->fw_mon[0].physical >>
+ trans->dbg.fw_mon[0].physical >>
MON_BUFF_SHIFT_VER2);
iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size - 256) >>
+ (trans->dbg.fw_mon[0].physical +
+ trans->dbg.fw_mon[0].size - 256) >>
MON_BUFF_SHIFT_VER2);
return;
}
@@@ -916,7 -922,7 +922,7 @@@
else
IWL_WARN(trans, "PCI should have external buffer debug\n");
- for (i = 0; i < trans->dbg_n_dest_reg; i++) {
+ for (i = 0; i < trans->dbg.n_dest_reg; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val);
@@@ -955,18 -961,19 +961,19 @@@
}
monitor:
- if (dest->monitor_mode == EXTERNAL_MODE && trans->fw_mon[0].size) {
+ if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
- trans->fw_mon[0].physical >> dest->base_shift);
+ trans->dbg.fw_mon[0].physical >>
+ dest->base_shift);
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size - 256) >>
+ (trans->dbg.fw_mon[0].physical +
+ trans->dbg.fw_mon[0].size - 256) >>
dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size) >>
+ (trans->dbg.fw_mon[0].physical +
+ trans->dbg.fw_mon[0].size) >>
dest->end_shift);
}
}
@@@ -1003,12 -1010,12 +1010,12 @@@ static int iwl_pcie_load_given_ucode(st
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_pcie_alloc_fw_monitor(trans, 0);
- if (trans->fw_mon[0].size) {
+ if (trans->dbg.fw_mon[0].size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
- trans->fw_mon[0].physical >> 4);
+ trans->dbg.fw_mon[0].physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
- (trans->fw_mon[0].physical +
- trans->fw_mon[0].size) >> 4);
+ (trans->dbg.fw_mon[0].physical +
+ trans->dbg.fw_mon[0].size) >> 4);
}
} else if (iwl_pcie_dbg_on(trans)) {
iwl_pcie_apply_destination(trans);
@@@ -1236,7 -1243,7 +1243,7 @@@ static void _iwl_trans_pcie_stop_device
trans_pcie->is_down = true;
/* Stop dbgc before stopping device */
- _iwl_fw_dbg_stop_recording(trans, NULL);
+ iwl_fw_dbg_stop_recording(trans, NULL);
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
@@@ -2729,8 -2736,8 +2736,8 @@@ static int iwl_dbgfs_monitor_data_open(
struct iwl_trans *trans = inode->i_private;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!trans->dbg_dest_tlv ||
- trans->dbg_dest_tlv->monitor_mode != EXTERNAL_MODE) {
+ if (!trans->dbg.dest_tlv ||
+ trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
IWL_ERR(trans, "Debug destination is not set to DRAM\n");
return -ENOENT;
}
@@@ -2777,22 -2784,22 +2784,22 @@@ static ssize_t iwl_dbgfs_monitor_data_r
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- void *cpu_addr = (void *)trans->fw_mon[0].block, *curr_buf;
+ void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
bool b_full;
- if (trans->dbg_dest_tlv) {
+ if (trans->dbg.dest_tlv) {
write_ptr_addr =
- le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
- wrap_cnt_addr = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
+ wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
} else {
write_ptr_addr = MON_BUFF_WRPTR;
wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
}
- if (unlikely(!trans->dbg_rec_on))
+ if (unlikely(!trans->dbg.rec_on))
return 0;
mutex_lock(&data->mutex);
@@@ -2816,7 -2823,7 +2823,7 @@@
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
write_ptr < data->prev_wr_ptr) {
- size = trans->fw_mon[0].size - data->prev_wr_ptr;
+ size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr;
curr_buf = cpu_addr + data->prev_wr_ptr;
b_full = iwl_write_to_user_buf(user_buf, count,
curr_buf, &size,
@@@ -3035,14 -3042,10 +3042,10 @@@ iwl_trans_pcie_dump_pointers(struct iwl
base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
write_ptr = DBGC_CUR_DBGBUF_STATUS;
wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
- } else if (trans->ini_valid) {
- base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2);
- write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2);
- wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2);
- } else if (trans->dbg_dest_tlv) {
- write_ptr = le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
- wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ } else if (trans->dbg.dest_tlv) {
+ write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
} else {
base = MON_BUFF_BASE_ADDR;
write_ptr = MON_BUFF_WRPTR;
@@@ -3069,11 -3072,10 +3072,10 @@@ iwl_trans_pcie_dump_monitor(struct iwl_
{
u32 len = 0;
- if ((trans->num_blocks &&
+ if (trans->dbg.dest_tlv ||
+ (trans->dbg.num_blocks &&
(trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
- trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210 ||
- trans->ini_valid)) ||
- (trans->dbg_dest_tlv && !trans->ini_valid)) {
+ trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
@@@ -3082,32 -3084,32 +3084,32 @@@
iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
len += sizeof(**data) + sizeof(*fw_mon_data);
- if (trans->num_blocks) {
+ if (trans->dbg.num_blocks) {
memcpy(fw_mon_data->data,
- trans->fw_mon[0].block,
- trans->fw_mon[0].size);
+ trans->dbg.fw_mon[0].block,
+ trans->dbg.fw_mon[0].size);
- monitor_len = trans->fw_mon[0].size;
- } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+ monitor_len = trans->dbg.fw_mon[0].size;
+ } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
/*
* Update pointers to reflect actual values after
* shifting
*/
- if (trans->dbg_dest_tlv->version) {
+ if (trans->dbg.dest_tlv->version) {
base = (iwl_read_prph(trans, base) &
IWL_LDBG_M2S_BUF_BA_MSK) <<
- trans->dbg_dest_tlv->base_shift;
+ trans->dbg.dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
base += trans->cfg->smem_offset;
} else {
base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
+ trans->dbg.dest_tlv->base_shift;
}
iwl_trans_read_mem(trans, base, fw_mon_data->data,
monitor_len / sizeof(u32));
- } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+ } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
monitor_len =
iwl_trans_pci_dump_marbh_monitor(trans,
fw_mon_data,
@@@ -3126,40 -3128,40 +3128,40 @@@
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
- if (trans->num_blocks) {
+ if (trans->dbg.num_blocks) {
*len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fw_mon) +
- trans->fw_mon[0].size;
- return trans->fw_mon[0].size;
- } else if (trans->dbg_dest_tlv) {
+ trans->dbg.fw_mon[0].size;
+ return trans->dbg.fw_mon[0].size;
+ } else if (trans->dbg.dest_tlv) {
u32 base, end, cfg_reg, monitor_len;
- if (trans->dbg_dest_tlv->version == 1) {
- cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ if (trans->dbg.dest_tlv->version == 1) {
+ cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
cfg_reg = iwl_read_prph(trans, cfg_reg);
base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
- trans->dbg_dest_tlv->base_shift;
+ trans->dbg.dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
base += trans->cfg->smem_offset;
monitor_len =
(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
- trans->dbg_dest_tlv->end_shift;
+ trans->dbg.dest_tlv->end_shift;
monitor_len *= IWL_M2S_UNIT_SIZE;
} else {
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
- end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+ base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
+ end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
base = iwl_read_prph(trans, base) <<
- trans->dbg_dest_tlv->base_shift;
+ trans->dbg.dest_tlv->base_shift;
end = iwl_read_prph(trans, end) <<
- trans->dbg_dest_tlv->end_shift;
+ trans->dbg.dest_tlv->end_shift;
/* Make "end" point to the actual end */
if (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_8000 ||
- trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
- end += (1 << trans->dbg_dest_tlv->end_shift);
+ trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
+ end += (1 << trans->dbg.dest_tlv->end_shift);
monitor_len = end - base;
}
*len += sizeof(struct iwl_fw_error_dump_data) +
@@@ -3192,7 -3194,7 +3194,7 @@@ static struct iwl_trans_dump_dat
len = sizeof(*dump_data);
/* host commands */
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD))
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
len += sizeof(*data) +
cmdq->n_window * (sizeof(*txcmd) +
TFD_MAX_PAYLOAD_SIZE);
@@@ -3244,7 -3246,7 +3246,7 @@@
len = 0;
data = (void *)dump_data->data;
- if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
u16 tfd_size = trans_pcie->tfd_size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
@@@ -3569,17 -3571,15 +3571,17 @@@ struct iwl_trans *iwl_trans_pcie_alloc(
trans->cfg = &iwlax210_2ax_cfg_so_jf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
- trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
+ trans->cfg = &iwlax211_2ax_cfg_so_gf_a0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
- trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
+ trans->cfg = &iwlax411_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
- if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ if ((CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) ||
+ (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR1))) {
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
@@@ -3601,9 -3601,8 +3603,9 @@@
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
((trans->cfg != &iwl_ax200_cfg_cc &&
- trans->cfg != &killer1650x_2ax_cfg &&
- trans->cfg != &killer1650w_2ax_cfg) ||
+ trans->cfg != &killer1650x_2ax_cfg &&
+ trans->cfg != &killer1650w_2ax_cfg &&
+ trans->cfg != &iwl_ax201_cfg_quz_hr) ||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
@@@ -3684,6 -3683,7 +3686,7 @@@ void iwl_trans_pcie_sync_nmi(struct iwl
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
+ bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
u32 inta_addr, sw_err_bit;
if (trans_pcie->msix_enabled) {
@@@ -3694,7 -3694,12 +3697,12 @@@
sw_err_bit = CSR_INT_BIT_SW_ERR;
}
- iwl_disable_interrupts(trans);
+ /* if the interrupts were already disabled, there is no point in
+ * calling iwl_disable_interrupts
+ */
+ if (interrupts_enabled)
+ iwl_disable_interrupts(trans);
+
iwl_force_nmi(trans);
while (time_after(timeout, jiffies)) {
u32 inta_hw = iwl_read32(trans, inta_addr);
@@@ -3708,6 -3713,13 +3716,13 @@@
mdelay(1);
}
- iwl_enable_interrupts(trans);
+
+ /* enable interrupts only if there were already enabled before this
+ * function to avoid a case were the driver enable interrupts before
+ * proper configurations were made
+ */
+ if (interrupts_enabled)
+ iwl_enable_interrupts(trans);
+
iwl_trans_fw_error(trans);
}
diff --combined drivers/net/wireless/marvell/mwifiex/scan.c
index e2786ab612ca,d870d4b2e03d..0d6d41727037
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@@ -1361,25 -1361,21 +1361,25 @@@ int mwifiex_update_bss_desc_with_ie(str
break;
case WLAN_EID_VENDOR_SPECIFIC:
- if (element_len + 2 < sizeof(vendor_ie->vend_hdr))
- return -EINVAL;
-
vendor_ie = (struct ieee_types_vendor_specific *)
current_ptr;
- if (!memcmp
- (vendor_ie->vend_hdr.oui, wpa_oui,
- sizeof(wpa_oui))) {
+ /* 802.11 requires at least 3-byte OUI. */
+ if (element_len < sizeof(vendor_ie->vend_hdr.oui.oui))
+ return -EINVAL;
+
+ /* Not long enough for a match? Skip it. */
+ if (element_len < sizeof(wpa_oui))
+ break;
+
+ if (!memcmp(&vendor_ie->vend_hdr.oui, wpa_oui,
+ sizeof(wpa_oui))) {
bss_entry->bcn_wpa_ie =
(struct ieee_types_vendor_specific *)
current_ptr;
bss_entry->wpa_offset = (u16)
(current_ptr - bss_entry->beacon_buf);
- } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui,
+ } else if (!memcmp(&vendor_ie->vend_hdr.oui, wmm_oui,
sizeof(wmm_oui))) {
if (total_ie_len ==
sizeof(struct ieee_types_wmm_parameter) ||
@@@ -1504,7 -1500,6 +1504,6 @@@ int mwifiex_scan_networks(struct mwifie
u8 filtered_scan;
u8 scan_current_chan_only;
u8 max_chan_per_scan;
- unsigned long flags;
if (adapter->scan_processing) {
mwifiex_dbg(adapter, WARN,
@@@ -1525,9 -1520,9 +1524,9 @@@
return -EFAULT;
}
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = true;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv),
GFP_KERNEL);
@@@ -1555,13 -1550,12 +1554,12 @@@
/* Get scan command from scan_pending_q and put to cmd_pending_q */
if (!ret) {
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (!list_empty(&adapter->scan_pending_q)) {
cmd_node = list_first_entry(&adapter->scan_pending_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
queue_work(adapter->workqueue, &adapter->main_work);
@@@ -1572,8 -1566,7 +1570,7 @@@
mwifiex_wait_queue_complete(adapter, cmd_node);
}
} else {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
}
}
@@@ -1581,9 -1574,9 +1578,9 @@@
kfree(scan_chan_list);
done:
if (ret) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
}
return ret;
}
@@@ -1719,7 -1712,6 +1716,6 @@@ static int mwifiex_update_curr_bss_para
{
struct mwifiex_bssdescriptor *bss_desc;
int ret;
- unsigned long flags;
/* Allocate and fill new bss descriptor */
bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL);
@@@ -1734,7 -1726,7 +1730,7 @@@
if (ret)
goto done;
- spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags);
+ spin_lock_bh(&priv->curr_bcn_buf_lock);
/* Make a copy of current BSSID descriptor */
memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc,
sizeof(priv->curr_bss_params.bss_descriptor));
@@@ -1743,7 -1735,7 +1739,7 @@@
* in mwifiex_save_curr_bcn()
*/
mwifiex_save_curr_bcn(priv);
- spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags);
+ spin_unlock_bh(&priv->curr_bcn_buf_lock);
done:
/* beacon_ie buffer was allocated in function
@@@ -1997,15 -1989,14 +1993,14 @@@ static void mwifiex_check_next_scan_com
{
struct mwifiex_adapter *adapter = priv->adapter;
struct cmd_ctrl_node *cmd_node;
- unsigned long flags;
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (list_empty(&adapter->scan_pending_q)) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
mwifiex_active_scan_req_for_passive_chan(priv);
@@@ -2029,13 -2020,13 +2024,13 @@@
}
} else if ((priv->scan_aborting && !priv->scan_request) ||
priv->scan_block) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_cancel_pending_scan_cmd(adapter);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
if (!adapter->active_scan_triggered) {
if (priv->scan_request) {
@@@ -2061,7 -2052,7 +2056,7 @@@
cmd_node = list_first_entry(&adapter->scan_pending_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node);
}
@@@ -2071,15 -2062,14 +2066,14 @@@
void mwifiex_cancel_scan(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
- unsigned long cmd_flags;
int i;
mwifiex_cancel_pending_scan_cmd(adapter);
if (adapter->scan_processing) {
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_lock_bh(&adapter->mwifiex_cmd_lock);
adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+ spin_unlock_bh(&adapter->mwifiex_cmd_lock);
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
if (!priv)
@@@ -2561,7 -2551,6 +2555,6 @@@ int mwifiex_ret_802_11_scan_ext(struct
struct host_cmd_ds_command *cmd_ptr;
struct cmd_ctrl_node *cmd_node;
- unsigned long cmd_flags, scan_flags;
bool complete_scan = false;
mwifiex_dbg(adapter, INFO, "info: EXT scan returns successfully\n");
@@@ -2596,8 -2585,8 +2589,8 @@@
sizeof(struct mwifiex_ie_types_header));
}
- spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_flags);
- spin_lock_irqsave(&adapter->scan_pending_q_lock, scan_flags);
+ spin_lock_bh(&adapter->cmd_pending_q_lock);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
if (list_empty(&adapter->scan_pending_q)) {
complete_scan = true;
list_for_each_entry(cmd_node, &adapter->cmd_pending_q, list) {
@@@ -2611,8 -2600,8 +2604,8 @@@
}
}
}
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, scan_flags);
- spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, cmd_flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
+ spin_unlock_bh(&adapter->cmd_pending_q_lock);
if (complete_scan)
mwifiex_complete_scan(priv);
@@@ -2784,13 -2773,12 +2777,12 @@@ mwifiex_queue_scan_cmd(struct mwifiex_p
struct cmd_ctrl_node *cmd_node)
{
struct mwifiex_adapter *adapter = priv->adapter;
- unsigned long flags;
cmd_node->wait_q_enabled = true;
cmd_node->condition = &adapter->scan_wait_q_woken;
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ spin_lock_bh(&adapter->scan_pending_q_lock);
list_add_tail(&cmd_node->list, &adapter->scan_pending_q);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_unlock_bh(&adapter->scan_pending_q_lock);
}
/*
diff --combined drivers/net/wireless/marvell/mwifiex/wmm.c
index 64916ba15df5,0301bc33f554..41f0231376c0
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@@ -138,7 -138,6 +138,6 @@@ void mwifiex_ralist_add(struct mwifiex_
struct mwifiex_ra_list_tbl *ra_list;
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_sta_node *node;
- unsigned long flags;
for (i = 0; i < MAX_NUM_TID; ++i) {
@@@ -163,7 -162,7 +162,7 @@@
ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
}
} else {
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ spin_lock_bh(&priv->sta_list_spinlock);
node = mwifiex_get_sta_entry(priv, ra);
if (node)
ra_list->tx_paused = node->tx_pause;
@@@ -171,7 -170,7 +170,7 @@@
mwifiex_is_sta_11n_enabled(priv, node);
if (ra_list->is_11n_enabled)
ra_list->max_amsdu = node->max_amsdu;
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ spin_unlock_bh(&priv->sta_list_spinlock);
}
mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
@@@ -240,7 -239,7 +239,7 @@@ mwifiex_wmm_setup_queue_priorities(stru
mwifiex_dbg(priv->adapter, INFO,
"info: WMM Parameter IE: version=%d,\t"
"qos_info Parameter Set Count=%d, Reserved=%#x\n",
- wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap &
+ wmm_ie->version, wmm_ie->qos_info_bitmap &
IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
wmm_ie->reserved);
@@@ -583,11 -582,10 +582,10 @@@ static int mwifiex_free_ack_frame(int i
void
mwifiex_clean_txrx(struct mwifiex_private *priv)
{
- unsigned long flags;
struct sk_buff *skb, *tmp;
mwifiex_11n_cleanup_reorder_tbl(priv);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_wmm_cleanup_queues(priv);
mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
@@@ -601,7 -599,7 +599,7 @@@
if (priv->adapter->if_ops.clean_pcie_ring &&
!test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
skb_unlink(skb, &priv->tdls_txq);
@@@ -642,10 -640,9 +640,9 @@@ void mwifiex_update_ralist_tx_pause(str
{
struct mwifiex_ra_list_tbl *ra_list;
u32 pkt_cnt = 0, tx_pkts_queued;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
@@@ -671,7 -668,7 +668,7 @@@
atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/* This function updates non-tdls peer ralist tx_pause while
@@@ -682,10 -679,9 +679,9 @@@ void mwifiex_update_ralist_tx_pause_in_
{
struct mwifiex_ra_list_tbl *ra_list;
u32 pkt_cnt = 0, tx_pkts_queued;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
@@@ -716,7 -712,7 +712,7 @@@
atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
@@@ -748,10 -744,9 +744,9 @@@ voi
mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
- unsigned long flags;
int i;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
for (i = 0; i < MAX_NUM_TID; ++i) {
ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
@@@ -767,7 -762,7 +762,7 @@@
list_del(&ra_list->list);
kfree(ra_list);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
@@@ -818,7 -813,6 +813,6 @@@ mwifiex_wmm_add_buf_txqueue(struct mwif
u32 tid;
struct mwifiex_ra_list_tbl *ra_list;
u8 ra[ETH_ALEN], tid_down;
- unsigned long flags;
struct list_head list_head;
int tdls_status = TDLS_NOT_SETUP;
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
@@@ -844,7 -838,7 +838,7 @@@
tid = skb->priority;
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
@@@ -864,8 -858,7 +858,7 @@@
break;
case TDLS_SETUP_INPROGRESS:
skb_queue_tail(&priv->tdls_txq, skb);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
default:
list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
@@@ -881,7 -874,7 +874,7 @@@
}
if (!ra_list) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
@@@ -901,7 -894,7 +894,7 @@@
else
atomic_inc(&priv->wmm.tx_pkts_queued);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
@@@ -1092,7 -1085,6 +1085,6 @@@ mwifiex_wmm_get_highest_priolist_ptr(st
struct mwifiex_ra_list_tbl *ptr;
struct mwifiex_tid_tbl *tid_ptr;
atomic_t *hqp;
- unsigned long flags_ra;
int i, j;
/* check the BSS with highest priority first */
@@@ -1118,8 -1110,7 +1110,7 @@@ try_again
hqp = &priv_tmp->wmm.highest_queued_prio;
for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
- spin_lock_irqsave(&priv_tmp->wmm.
- ra_list_spinlock, flags_ra);
+ spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
tid_ptr = &(priv_tmp)->wmm.
tid_tbl_ptr[tos_to_tid[i]];
@@@ -1134,9 -1125,7 +1125,7 @@@
goto found;
}
- spin_unlock_irqrestore(&priv_tmp->wmm.
- ra_list_spinlock,
- flags_ra);
+ spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
}
if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
@@@ -1158,7 -1147,7 +1147,7 @@@ found
/* holds ra_list_spinlock */
if (atomic_read(hqp) > i)
atomic_set(hqp, i);
- spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
+ spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
*priv = priv_tmp;
*tid = tos_to_tid[i];
@@@ -1182,24 -1171,23 +1171,23 @@@ void mwifiex_rotate_priolists(struct mw
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
- unsigned long flags;
- spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
/*
* dirty trick: we remove 'head' temporarily and reinsert it after
* curr bss node. imagine list to stay fixed while head is moved
*/
list_move(&tbl[priv->bss_priority].bss_prio_head,
&tbl[priv->bss_priority].bss_prio_cur->list);
- spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (mwifiex_is_ralist_valid(priv, ra, tid)) {
priv->wmm.packets_out[tid]++;
/* same as above */
list_move(&tid_ptr->ra_list, &ra->list);
}
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
/*
@@@ -1236,8 -1224,7 +1224,7 @@@ mwifiex_is_11n_aggragation_possible(str
*/
static void
mwifiex_send_single_packet(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int ptr_index,
- unsigned long ra_list_flags)
+ struct mwifiex_ra_list_tbl *ptr, int ptr_index)
__releases(&priv->wmm.ra_list_spinlock)
{
struct sk_buff *skb, *skb_next;
@@@ -1246,8 -1233,7 +1233,7 @@@
struct mwifiex_txinfo *tx_info;
if (skb_queue_empty(&ptr->skb_head)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
return;
}
@@@ -1265,18 -1251,17 +1251,17 @@@
else
skb_next = NULL;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
sizeof(struct txpd) : 0);
if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
/* Queue the packet back at the head */
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
@@@ -1286,8 -1271,7 +1271,7 @@@
ptr->total_pkt_count++;
ptr->ba_pkt_count++;
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
} else {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
atomic_dec(&priv->wmm.tx_pkts_queued);
@@@ -1323,8 -1307,7 +1307,7 @@@ mwifiex_is_ptr_processed(struct mwifiex
*/
static void
mwifiex_send_processed_packet(struct mwifiex_private *priv,
- struct mwifiex_ra_list_tbl *ptr, int ptr_index,
- unsigned long ra_list_flags)
+ struct mwifiex_ra_list_tbl *ptr, int ptr_index)
__releases(&priv->wmm.ra_list_spinlock)
{
struct mwifiex_tx_param tx_param;
@@@ -1334,8 -1317,7 +1317,7 @@@
struct mwifiex_txinfo *tx_info;
if (skb_queue_empty(&ptr->skb_head)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return;
}
@@@ -1343,8 -1325,7 +1325,7 @@@
if (adapter->data_sent || adapter->tx_lock_flag) {
ptr->total_pkt_count--;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
skb_queue_tail(&adapter->tx_data_q, skb);
atomic_dec(&priv->wmm.tx_pkts_queued);
atomic_inc(&adapter->tx_queued);
@@@ -1358,7 -1339,7 +1339,7 @@@
tx_info = MWIFIEX_SKB_TXCB(skb);
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
tx_param.next_pkt_len =
((skb_next) ? skb_next->len +
@@@ -1374,11 -1355,10 +1355,10 @@@
switch (ret) {
case -EBUSY:
mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
mwifiex_write_data_complete(adapter, skb, 0, -1);
return;
}
@@@ -1386,8 -1366,7 +1366,7 @@@
skb_queue_tail(&ptr->skb_head, skb);
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
break;
case -1:
mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
@@@ -1404,10 -1383,9 +1383,9 @@@
if (ret != -EBUSY) {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
atomic_dec(&priv->wmm.tx_pkts_queued);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
ptr->total_pkt_count--;
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
- ra_list_flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
}
}
@@@ -1423,7 -1401,6 +1401,6 @@@ mwifiex_dequeue_tx_packet(struct mwifie
int ptr_index = 0;
u8 ra[ETH_ALEN];
int tid_del = 0, tid = 0;
- unsigned long flags;
ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
if (!ptr)
@@@ -1433,14 -1410,14 +1410,14 @@@
mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
- spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+ spin_lock_bh(&priv->wmm.ra_list_spinlock);
if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
- spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ spin_unlock_bh(&priv->wmm.ra_list_spinlock);
return -1;
}
if (mwifiex_is_ptr_processed(priv, ptr)) {
- mwifiex_send_processed_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_processed_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_send_processed_packet() */
return 0;
@@@ -1455,12 -1432,12 +1432,12 @@@
mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
- mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+ mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
* mwifiex_11n_aggregate_pkt()
*/
else
- mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_single_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
* mwifiex_send_single_packet()
*/
@@@ -1481,11 -1458,11 +1458,11 @@@
if (mwifiex_is_amsdu_allowed(priv, tid) &&
mwifiex_is_11n_aggragation_possible(priv, ptr,
adapter->tx_buf_size))
- mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags);
+ mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_11n_aggregate_pkt() */
else
- mwifiex_send_single_packet(priv, ptr, ptr_index, flags);
+ mwifiex_send_single_packet(priv, ptr, ptr_index);
/* ra_list_spinlock has been freed in
mwifiex_send_single_packet() */
}
diff --combined drivers/net/wireless/mediatek/mt76/mt76.h
index 889b76deb703,56bf93a8988e..989386ecb5e4
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@@ -30,7 -30,6 +30,7 @@@
#define MT_TX_RING_SIZE 256
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
+#define MT_SKB_HEAD_LEN 128
struct mt76_dev;
struct mt76_wcid;
@@@ -259,10 -258,11 +259,11 @@@ struct mt76_rx_tid
#define MT_TX_CB_TXS_DONE BIT(1)
#define MT_TX_CB_TXS_FAILED BIT(2)
- #define MT_PACKET_ID_MASK GENMASK(7, 0)
+ #define MT_PACKET_ID_MASK GENMASK(6, 0)
#define MT_PACKET_ID_NO_ACK 0
#define MT_PACKET_ID_NO_SKB 1
#define MT_PACKET_ID_FIRST 2
+ #define MT_PACKET_ID_HAS_RATE BIT(7)
#define MT_TX_STATUS_SKB_TIMEOUT HZ
@@@ -382,7 -382,8 +383,8 @@@ enum mt76u_out_ep
__MT_EP_OUT_MAX,
};
- #define MT_SG_MAX_SIZE 8
+ #define MT_TX_SG_MAX_SIZE 8
+ #define MT_RX_SG_MAX_SIZE 1
#define MT_NUM_TX_ENTRIES 256
#define MT_NUM_RX_ENTRIES 128
#define MCU_RESP_URB_SIZE 1024
@@@ -394,9 -395,7 +396,7 @@@ struct mt76_usb
struct delayed_work stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
- u16 out_max_packet;
u8 in_ep[__MT_EP_IN_MAX];
- u16 in_max_packet;
bool sg_en;
struct mt76u_mcu {
@@@ -453,6 -452,7 +453,7 @@@ struct mt76_dev
int tx_dma_idx[4];
struct tasklet_struct tx_tasklet;
+ struct napi_struct tx_napi;
struct delayed_work mac_work;
wait_queue_head_t tx_wait;
@@@ -484,6 -484,8 +485,8 @@@
int txpower_conf;
int txpower_cur;
+ enum nl80211_dfs_regions region;
+
u32 debugfs_reg;
struct led_classdev led_cdev;
@@@ -689,6 -691,14 +692,14 @@@ static inline void mt76_insert_hdr_pad(
skb->data[len + 1] = 0;
}
+ static inline bool mt76_is_skb_pktid(u8 pktid)
+ {
+ if (pktid & MT_PACKET_ID_HAS_RATE)
+ return false;
+
+ return pktid >= MT_PACKET_ID_FIRST;
+ }
+
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
@@@ -750,6 -760,10 +761,10 @@@ void mt76_csa_check(struct mt76_dev *de
void mt76_csa_finish(struct mt76_dev *dev);
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+ void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
+ int mt76_get_rate(struct mt76_dev *dev,
+ struct ieee80211_supported_band *sband,
+ int idx, bool cck);
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
diff --combined drivers/net/wireless/mediatek/mt76/usb.c
index dd90427b2d67,61b27f3ec6e4..fb87ce7fbdf6
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@@ -267,12 -267,10 +267,10 @@@ mt76u_set_endpoints(struct usb_interfac
if (usb_endpoint_is_bulk_in(ep_desc) &&
in_ep < __MT_EP_IN_MAX) {
usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
- usb->in_max_packet = usb_endpoint_maxp(ep_desc);
in_ep++;
} else if (usb_endpoint_is_bulk_out(ep_desc) &&
out_ep < __MT_EP_OUT_MAX) {
usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
- usb->out_max_packet = usb_endpoint_maxp(ep_desc);
out_ep++;
}
}
@@@ -333,12 -331,13 +331,13 @@@ mt76u_refill_rx(struct mt76_dev *dev, s
}
static int
- mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
+ mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
+ int sg_max_size)
{
unsigned int size = sizeof(struct urb);
if (dev->usb.sg_en)
- size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
+ size += sg_max_size * sizeof(struct scatterlist);
e->urb = kzalloc(size, GFP_KERNEL);
if (!e->urb)
@@@ -357,11 -356,12 +356,12 @@@ mt76u_rx_urb_alloc(struct mt76_dev *dev
{
int err;
- err = mt76u_urb_alloc(dev, e);
+ err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
if (err)
return err;
- return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
+ return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
+ GFP_KERNEL);
}
static void mt76u_urb_free(struct urb *urb)
@@@ -429,42 -429,6 +429,42 @@@ static int mt76u_get_rx_entry_len(u8 *d
return dma_len;
}
+static struct sk_buff *
+mt76u_build_rx_skb(void *data, int len, int buf_size)
+{
+ struct sk_buff *skb;
+
+ if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
+ struct page *page;
+
+ /* slow path, not enough space for data and
+ * skb_shared_info
+ */
+ skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
+ data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
+ page = virt_to_head_page(data);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, data - page_address(page),
+ len - MT_SKB_HEAD_LEN, buf_size);
+
+ return skb;
+ }
+
+ /* fast path */
+ skb = build_skb(data, buf_size);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MT_DMA_HDR_LEN);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static int
mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{
@@@ -482,11 -446,19 +482,11 @@@
return 0;
data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
- if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) {
- dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
- return 0;
- }
-
- skb = build_skb(data, q->buf_size);
+ skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
if (!skb)
return 0;
- skb_reserve(skb, MT_DMA_HDR_LEN);
- __skb_put(skb, data_len);
len -= data_len;
-
while (len > 0 && nsgs < urb->num_sgs) {
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@@ -605,8 -577,9 +605,9 @@@ static int mt76u_alloc_rx(struct mt76_d
if (!q->entry)
return -ENOMEM;
- q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
q->ndesc = MT_NUM_RX_ENTRIES;
+ q->buf_size = PAGE_SIZE;
+
for (i = 0; i < q->ndesc; i++) {
err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
if (err < 0)
@@@ -763,7 -736,7 +764,7 @@@ mt76u_tx_setup_buffers(struct mt76_dev
urb->transfer_buffer = skb->data;
return 0;
} else {
- sg_init_table(urb->sg, MT_SG_MAX_SIZE);
+ sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
if (urb->num_sgs == 0)
return -ENOMEM;
@@@ -857,7 -830,8 +858,8 @@@ static int mt76u_alloc_tx(struct mt76_d
q->ndesc = MT_NUM_TX_ENTRIES;
for (j = 0; j < q->ndesc; j++) {
- err = mt76u_urb_alloc(dev, &q->entry[j]);
+ err = mt76u_urb_alloc(dev, &q->entry[j],
+ MT_TX_SG_MAX_SIZE);
if (err < 0)
return err;
}
diff --combined fs/afs/cmservice.c
index 2168b36c7a2a,1ea39971eb91..b86195e4dc6c
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@@ -256,11 -256,8 +256,11 @@@ static void SRXAFSCB_CallBack(struct wo
* server holds up change visibility till it receives our reply so as
* to maintain cache coherency.
*/
- if (call->server)
+ if (call->server) {
+ trace_afs_server(call->server, atomic_read(&call->server->usage),
+ afs_server_trace_callback);
afs_break_callbacks(call->server, call->count, call->request);
+ }
afs_send_empty_reply(call);
afs_put_call(call);
@@@ -505,14 -502,18 +505,14 @@@ static void SRXAFSCB_ProbeUuid(struct w
struct afs_call *call = container_of(work, struct afs_call, work);
struct afs_uuid *r = call->request;
- struct {
- __be32 match;
- } reply;
-
_enter("");
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
- reply.match = htonl(0);
+ afs_send_empty_reply(call);
else
- reply.match = htonl(1);
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+ 1, 1, "K-1");
- afs_send_simple_reply(call, &reply, sizeof(reply));
afs_put_call(call);
_leave("");
}
@@@ -579,9 -580,8 +579,8 @@@ static int afs_deliver_cb_probe_uuid(st
*/
static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
{
- struct afs_interface *ifs;
struct afs_call *call = container_of(work, struct afs_call, work);
- int loop, nifs;
+ int loop;
struct {
struct /* InterfaceAddr */ {
@@@ -599,19 -599,7 +598,7 @@@
_enter("");
- nifs = 0;
- ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL);
- if (ifs) {
- nifs = afs_get_ipv4_interfaces(call->net, ifs, 32, false);
- if (nifs < 0) {
- kfree(ifs);
- ifs = NULL;
- nifs = 0;
- }
- }
-
memset(&reply, 0, sizeof(reply));
- reply.ia.nifs = htonl(nifs);
reply.ia.uuid[0] = call->net->uuid.time_low;
reply.ia.uuid[1] = htonl(ntohs(call->net->uuid.time_mid));
@@@ -621,15 -609,6 +608,6 @@@
for (loop = 0; loop < 6; loop++)
reply.ia.uuid[loop + 5] = htonl((s8) call->net->uuid.node[loop]);
- if (ifs) {
- for (loop = 0; loop < nifs; loop++) {
- reply.ia.ifaddr[loop] = ifs[loop].address.s_addr;
- reply.ia.netmask[loop] = ifs[loop].netmask.s_addr;
- reply.ia.mtu[loop] = htonl(ifs[loop].mtu);
- }
- kfree(ifs);
- }
-
reply.cap.capcount = htonl(1);
reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION);
afs_send_simple_reply(call, &reply, sizeof(reply));
diff --combined fs/afs/internal.h
index be37fafbaeb5,0f84d0da5417..f66a3be12fd6
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@@ -514,7 -514,6 +514,7 @@@ struct afs_server
atomic_t usage;
u32 addr_version; /* Address list version */
u32 cm_epoch; /* Server RxRPC epoch */
+ unsigned int debug_id; /* Debugging ID for traces */
/* file service access */
rwlock_t fs_lock; /* access lock */
@@@ -720,15 -719,6 +720,6 @@@ struct afs_permits
};
/*
- * record of one of a system's set of network interfaces
- */
- struct afs_interface {
- struct in_addr address; /* IPv4 address bound to interface */
- struct in_addr netmask; /* netmask applied to address */
- unsigned mtu; /* MTU of interface */
- };
-
- /*
* Error prioritisation and accumulation.
*/
struct afs_error {
@@@ -845,9 -835,9 +836,9 @@@ extern struct fscache_cookie_def afs_vn
* callback.c
*/
extern void afs_init_callback_state(struct afs_server *);
-extern void __afs_break_callback(struct afs_vnode *);
-extern void afs_break_callback(struct afs_vnode *);
-extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*);
+extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
+extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
+extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break *);
extern int afs_register_server_cb_interest(struct afs_vnode *,
struct afs_server_list *, unsigned int);
@@@ -1091,12 -1081,6 +1082,6 @@@ extern struct vfsmount *afs_d_automount
extern void afs_mntpt_kill_timer(void);
/*
- * netdevices.c
- */
- extern int afs_get_ipv4_interfaces(struct afs_net *, struct afs_interface *,
- size_t, bool);
-
- /*
* proc.c
*/
#ifdef CONFIG_PROC_FS
@@@ -1241,12 -1225,17 +1226,12 @@@ extern void __exit afs_clean_up_permit_
*/
extern spinlock_t afs_server_peer_lock;
-static inline struct afs_server *afs_get_server(struct afs_server *server)
-{
- atomic_inc(&server->usage);
- return server;
-}
-
extern struct afs_server *afs_find_server(struct afs_net *,
const struct sockaddr_rxrpc *);
extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *);
extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *);
-extern void afs_put_server(struct afs_net *, struct afs_server *);
+extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace);
+extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
extern void afs_manage_servers(struct work_struct *);
extern void afs_servers_timer(struct timer_list *);
extern void __net_exit afs_purge_servers(struct afs_net *);
@@@ -1430,7 -1419,7 +1415,7 @@@ static inline void afs_check_for_remote
{
if (fc->ac.error == -ENOENT) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
- afs_break_callback(vnode);
+ afs_break_callback(vnode, afs_cb_break_for_deleted);
}
}
diff --combined include/linux/mlx5/qp.h
index 08e43cd9e742,127d224443e3..937041101504
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@@ -37,8 -37,7 +37,8 @@@
#include <linux/mlx5/driver.h>
#define MLX5_INVALID_LKEY 0x100
-#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
+/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */
+#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8)
#define MLX5_DIF_SIZE 8
#define MLX5_STRIDE_BLOCK_OP 0x400
#define MLX5_CPY_GRD_MASK 0xc0
@@@ -203,7 -202,12 +203,12 @@@ struct mlx5_wqe_ctrl_seg
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
- __be32 imm;
+ union {
+ __be32 general_id;
+ __be32 imm;
+ __be32 umr_mkey;
+ __be32 tisn;
+ };
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
@@@ -552,11 -556,6 +557,6 @@@ static inline struct mlx5_core_qp *__ml
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
}
- static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
- {
- return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
- }
-
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *qp,
u32 *in, int inlen,
diff --combined include/linux/phylink.h
index 5b130140fb8f,0fe57a261c9c..300ecdb6790a
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@@ -54,6 -54,21 +54,21 @@@ struct phylink_link_state
unsigned int an_complete:1;
};
+ enum phylink_op_type {
+ PHYLINK_NETDEV = 0,
+ PHYLINK_DEV,
+ };
+
+ /**
+ * struct phylink_config - PHYLINK configuration structure
+ * @dev: a pointer to a struct device associated with the MAC
+ * @type: operation type of PHYLINK instance
+ */
+ struct phylink_config {
+ struct device *dev;
+ enum phylink_op_type type;
+ };
+
/**
* struct phylink_mac_ops - MAC operations structure.
* @validate: Validate and update the link configuration.
@@@ -66,16 -81,17 +81,17 @@@
* The individual methods are described more fully below.
*/
struct phylink_mac_ops {
- void (*validate)(struct net_device *ndev, unsigned long *supported,
+ void (*validate)(struct phylink_config *config,
+ unsigned long *supported,
struct phylink_link_state *state);
- int (*mac_link_state)(struct net_device *ndev,
+ int (*mac_link_state)(struct phylink_config *config,
struct phylink_link_state *state);
- void (*mac_config)(struct net_device *ndev, unsigned int mode,
+ void (*mac_config)(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
- void (*mac_an_restart)(struct net_device *ndev);
- void (*mac_link_down)(struct net_device *ndev, unsigned int mode,
+ void (*mac_an_restart)(struct phylink_config *config);
+ void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
- void (*mac_link_up)(struct net_device *ndev, unsigned int mode,
+ void (*mac_link_up)(struct phylink_config *config, unsigned int mode,
phy_interface_t interface,
struct phy_device *phy);
};
@@@ -83,7 -99,7 +99,7 @@@
#if 0 /* For kernel-doc purposes only. */
/**
* validate - Validate and update the link configuration
- * @ndev: a pointer to a &struct net_device for the MAC.
+ * @config: a pointer to a &struct phylink_config.
* @supported: ethtool bitmask for supported link modes.
* @state: a pointer to a &struct phylink_link_state.
*
@@@ -93,26 -109,19 +109,26 @@@
* Note that the PHY may be able to transform from one connection
* technology to another, so, eg, don't clear 1000BaseX just
* because the MAC is unable to BaseX mode. This is more about
- * clearing unsupported speeds and duplex settings.
+ * clearing unsupported speeds and duplex settings. The port modes
+ * should not be cleared; phylink_set_port_modes() will help with this.
*
* If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX
* or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode
* based on @state->advertising and/or @state->speed and update
- * @state->interface accordingly.
+ * @state->interface accordingly. See phylink_helper_basex_speed().
+ *
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the
+ * MAC driver to return all supported link modes.
+ *
+ * If the @state->interface mode is not supported, then the @supported
+ * mask must be cleared.
*/
- void validate(struct net_device *ndev, unsigned long *supported,
+ void validate(struct phylink_config *config, unsigned long *supported,
struct phylink_link_state *state);
/**
* mac_link_state() - Read the current link state from the hardware
- * @ndev: a pointer to a &struct net_device for the MAC.
+ * @config: a pointer to a &struct phylink_config.
* @state: a pointer to a &struct phylink_link_state.
*
* Read the current link state from the MAC, reporting the current
@@@ -121,12 -130,12 +137,12 @@@
* negotiation completion state in @state->an_complete, and link
* up state in @state->link.
*/
- int mac_link_state(struct net_device *ndev,
+ int mac_link_state(struct phylink_config *config,
struct phylink_link_state *state);
/**
* mac_config() - configure the MAC for the selected mode and state
- * @ndev: a pointer to a &struct net_device for the MAC.
+ * @config: a pointer to a &struct phylink_config.
* @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
* @state: a pointer to a &struct phylink_link_state.
*
@@@ -175,18 -184,18 +191,18 @@@
* down. This "update" behaviour is critical to avoid bouncing the
* link up status.
*/
- void mac_config(struct net_device *ndev, unsigned int mode,
+ void mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
/**
* mac_an_restart() - restart 802.3z BaseX autonegotiation
- * @ndev: a pointer to a &struct net_device for the MAC.
+ * @config: a pointer to a &struct phylink_config.
*/
- void mac_an_restart(struct net_device *ndev);
+ void mac_an_restart(struct phylink_config *config);
/**
* mac_link_down() - take the link down
- * @ndev: a pointer to a &struct net_device for the MAC.
+ * @config: a pointer to a &struct phylink_config.
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
*
@@@ -195,12 -204,12 +211,12 @@@
* Energy Efficient Ethernet MAC configuration. Interface type
* selection must be done in mac_config().
*/
- void mac_link_down(struct net_device *ndev, unsigned int mode,
+ void mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface);
/**
* mac_link_up() - allow the link to come up
- * @ndev: a pointer to a &struct net_device for the MAC.
+ * @config: a pointer to a &struct phylink_config.
* @mode: link autonegotiation mode
* @interface: link &typedef phy_interface_t mode
* @phy: any attached phy
@@@ -211,13 -220,14 +227,14 @@@
* phy_init_eee() and perform appropriate MAC configuration for EEE.
* Interface type selection must be done in mac_config().
*/
- void mac_link_up(struct net_device *ndev, unsigned int mode,
+ void mac_link_up(struct phylink_config *config, unsigned int mode,
phy_interface_t interface,
struct phy_device *phy);
#endif
- struct phylink *phylink_create(struct net_device *, struct fwnode_handle *,
- phy_interface_t iface, const struct phylink_mac_ops *ops);
+ struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
+ phy_interface_t iface,
+ const struct phylink_mac_ops *ops);
void phylink_destroy(struct phylink *);
int phylink_connect_phy(struct phylink *, struct phy_device *);
diff --combined include/net/ip_vs.h
index b36a1df93e7c,cb1ad0cc5c7b..3759167f91f5
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@@ -603,6 -603,7 +603,7 @@@ struct ip_vs_dest_user_kern
u16 tun_type; /* tunnel type */
__be16 tun_port; /* tunnel port */
+ u16 tun_flags; /* tunnel flags */
};
@@@ -665,6 -666,7 +666,7 @@@ struct ip_vs_dest
atomic_t last_weight; /* server latest weight */
__u16 tun_type; /* tunnel type */
__be16 tun_port; /* tunnel port */
+ __u16 tun_flags; /* tunnel flags */
refcount_t refcnt; /* reference counter */
struct ip_vs_stats stats; /* statistics */
@@@ -808,12 -810,11 +810,12 @@@ struct ipvs_master_sync_state
struct ip_vs_sync_buff *sync_buff;
unsigned long sync_queue_len;
unsigned int sync_queue_delay;
- struct task_struct *master_thread;
struct delayed_work master_wakeup_work;
struct netns_ipvs *ipvs;
};
+struct ip_vs_sync_thread_data;
+
/* How much time to keep dests in trash */
#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
@@@ -944,8 -945,7 +946,8 @@@ struct netns_ipvs
spinlock_t sync_lock;
struct ipvs_master_sync_state *ms;
spinlock_t sync_buff_lock;
- struct task_struct **backup_threads;
+ struct ip_vs_sync_thread_data *master_tinfo;
+ struct ip_vs_sync_thread_data *backup_tinfo;
int threads_mask;
volatile int sync_state;
struct mutex sync_mutex;
@@@ -1406,6 -1406,9 +1408,9 @@@ bool ip_vs_has_real_service(struct netn
struct ip_vs_dest *
ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol,
const union nf_inet_addr *daddr, __be16 dport);
+ struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af,
+ const union nf_inet_addr *daddr,
+ __be16 tun_port);
int ip_vs_use_count_inc(void);
void ip_vs_use_count_dec(void);
@@@ -1499,6 -1502,9 +1504,9 @@@ static inline int ip_vs_todrop(struct n
static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; }
#endif
+ #define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \
+ IP_VS_CONN_F_FWD_MASK)
+
/* ip_vs_fwd_tag returns the forwarding tag of the connection */
#define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
diff --combined include/net/tls.h
index 889df0312cd1,0279938386ab..176d0b039f32
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@@ -40,6 -40,7 +40,7 @@@
#include <linux/socket.h>
#include <linux/tcp.h>
#include <linux/skmsg.h>
+ #include <linux/netdevice.h>
#include <net/tcp.h>
#include <net/strparser.h>
@@@ -61,6 -62,7 +62,7 @@@
#define TLS_DEVICE_NAME_MAX 32
#define MAX_IV_SIZE 16
+ #define TLS_MAX_REC_SEQ_SIZE 8
/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
*
@@@ -197,20 -199,24 +199,24 @@@ struct tls_offload_context_tx
struct scatterlist sg_tx_data[MAX_SKB_FRAGS];
void (*sk_destruct)(struct sock *sk);
- u8 driver_state[];
+ u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
- #define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
+ #define TLS_DRIVER_STATE_SIZE_TX 16
};
#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
- (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
- TLS_DRIVER_STATE_SIZE)
+ (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX)
enum tls_context_flags {
TLS_RX_SYNC_RUNNING = 0,
+ /* Unlike RX where resync is driven entirely by the core in TX only
+ * the driver knows when things went out of sync, so we need the flag
+ * to be atomic.
+ */
+ TLS_TX_SYNC_SCHED = 1,
};
struct cipher_context {
@@@ -240,34 -246,32 +246,32 @@@ struct tls_prot_info
};
struct tls_context {
+ /* read-only cache line */
struct tls_prot_info prot_info;
- union tls_crypto_context crypto_send;
- union tls_crypto_context crypto_recv;
+ u8 tx_conf:3;
+ u8 rx_conf:3;
- struct list_head list;
- struct net_device *netdev;
- refcount_t refcount;
+ int (*push_pending_record)(struct sock *sk, int flags);
+ void (*sk_write_space)(struct sock *sk);
void *priv_ctx_tx;
void *priv_ctx_rx;
- u8 tx_conf:3;
- u8 rx_conf:3;
+ struct net_device *netdev;
+ /* rw cache line */
struct cipher_context tx;
struct cipher_context rx;
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
- unsigned long flags;
bool in_tcp_sendpages;
bool pending_open_record_frags;
+ unsigned long flags;
- int (*push_pending_record)(struct sock *sk, int flags);
-
- void (*sk_write_space)(struct sock *sk);
+ /* cache cold stuff */
void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);
@@@ -279,6 -283,12 +283,12 @@@
int __user *optlen);
int (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
+
+ union tls_crypto_context crypto_send;
+ union tls_crypto_context crypto_recv;
+
+ struct list_head list;
+ refcount_t refcount;
};
enum tls_offload_ctx_dir {
@@@ -294,26 -304,49 +304,50 @@@ struct tlsdev_ops
void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx,
enum tls_offload_ctx_dir direction);
- void (*tls_dev_resync_rx)(struct net_device *netdev,
- struct sock *sk, u32 seq, u64 rcd_sn);
+ void (*tls_dev_resync)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u8 *rcd_sn,
+ enum tls_offload_ctx_dir direction);
};
+ enum tls_offload_sync_type {
+ TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
+ TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
+ };
+
+ #define TLS_DEVICE_RESYNC_NH_START_IVAL 2
+ #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128
+
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
- atomic64_t resync_req;
- u8 driver_state[];
+ enum tls_offload_sync_type resync_type;
+ /* this member is set regardless of resync_type, to avoid branches */
+ u8 resync_nh_reset:1;
+ /* CORE_NEXT_HINT-only member, but use the hole here */
+ u8 resync_nh_do_now:1;
+ union {
+ /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ */
+ struct {
+ atomic64_t resync_req;
+ };
+ /* TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT */
+ struct {
+ u32 decrypted_failed;
+ u32 decrypted_tgt;
+ } resync_nh;
+ };
+ u8 driver_state[] __aligned(8);
/* The TLS layer reserves room for driver specific state
* Currently the belief is that there is not enough
* driver specific state to justify another layer of indirection
*/
+ #define TLS_DRIVER_STATE_SIZE_RX 8
};
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
- (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
- TLS_DRIVER_STATE_SIZE)
+ (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
+void tls_ctx_free(struct tls_context *ctx);
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
@@@ -432,19 -465,15 +466,15 @@@ static inline struct tls_context *tls_g
}
static inline void tls_advance_record_sn(struct sock *sk,
- struct cipher_context *ctx,
- int version)
+ struct tls_prot_info *prot,
+ struct cipher_context *ctx)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_prot_info *prot = &tls_ctx->prot_info;
-
if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
tls_err_abort(sk, EBADMSG);
- if (version != TLS_1_3_VERSION) {
+ if (prot->version != TLS_1_3_VERSION)
tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
prot->iv_size);
- }
}
static inline void tls_fill_prepend(struct tls_context *ctx,
@@@ -546,6 -575,23 +576,23 @@@ tls_offload_ctx_rx(const struct tls_con
return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
+ #if IS_ENABLED(CONFIG_TLS_DEVICE)
+ static inline void *__tls_driver_ctx(struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+ {
+ if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+ return tls_offload_ctx_tx(tls_ctx)->driver_state;
+ else
+ return tls_offload_ctx_rx(tls_ctx)->driver_state;
+ }
+
+ static inline void *
+ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
+ {
+ return __tls_driver_ctx(tls_get_ctx(sk), direction);
+ }
+ #endif
+
/* The TLS context is valid until sk_destruct is called */
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{
@@@ -555,6 -601,31 +602,31 @@@
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
}
+ static inline void
+ tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
+ {
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_offload_ctx_rx(tls_ctx)->resync_type = type;
+ }
+
+ static inline void tls_offload_tx_resync_request(struct sock *sk)
+ {
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
+ }
+
+ /* Driver's seq tracking has to be disabled until resync succeeded */
+ static inline bool tls_offload_tx_resync_pending(struct sock *sk)
+ {
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ bool ret;
+
+ ret = test_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
+ smp_mb__after_atomic();
+ return ret;
+ }
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
@@@ -563,6 -634,7 +635,7 @@@ void tls_unregister_device(struct tls_d
int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
+ struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
@@@ -575,6 -647,6 +648,6 @@@ int tls_sw_fallback_init(struct sock *s
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
- void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
+ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
#endif /* _TLS_OFFLOAD_H */
diff --combined include/net/xdp_sock.h
index 7da155164947,057b159ff8b9..dc0aa9b2c12d
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@@ -58,16 -58,11 +58,16 @@@ struct xdp_sock
struct xdp_umem *umem;
struct list_head flush_node;
u16 queue_id;
- struct xsk_queue *tx ____cacheline_aligned_in_smp;
- struct list_head list;
bool zc;
+ enum {
+ XSK_READY = 0,
+ XSK_BOUND,
+ XSK_UNBOUND,
+ } state;
/* Protects multiple processes in the control path */
struct mutex mutex;
+ struct xsk_queue *tx ____cacheline_aligned_in_smp;
+ struct list_head list;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
* in the SKB destructor callback.
*/
@@@ -82,10 -77,11 +82,11 @@@ int xsk_rcv(struct xdp_sock *xs, struc
void xsk_flush(struct xdp_sock *xs);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */
+ bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
void xsk_umem_discard_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
- bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
+ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
@@@ -104,6 -100,16 +105,16 @@@ static inline dma_addr_t xdp_umem_get_d
}
/* Reuse-queue aware version of FILL queue helpers */
+ static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
+ {
+ struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
+
+ if (rq->length >= cnt)
+ return true;
+
+ return xsk_umem_has_addrs(umem, cnt - rq->length);
+ }
+
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
@@@ -151,6 -157,11 +162,11 @@@ static inline bool xsk_is_setup_for_bpf
return false;
}
+ static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
+ {
+ return false;
+ }
+
static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
{
return NULL;
@@@ -164,8 -175,8 +180,8 @@@ static inline void xsk_umem_complete_tx
{
}
- static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
- u32 *len)
+ static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
+ struct xdp_desc *desc)
{
return false;
}
@@@ -205,6 -216,11 +221,11 @@@ static inline dma_addr_t xdp_umem_get_d
return 0;
}
+ static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
+ {
+ return false;
+ }
+
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
return NULL;
diff --combined include/uapi/linux/bpf.h
index 29a5bc3d5c66,ead27aebf491..a82657a64dd4
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@@ -170,6 -170,7 +170,7 @@@ enum bpf_prog_type
BPF_PROG_TYPE_FLOW_DISSECTOR,
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ BPF_PROG_TYPE_CGROUP_SOCKOPT,
};
enum bpf_attach_type {
@@@ -194,6 -195,8 +195,8 @@@
BPF_CGROUP_SYSCTL,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
+ BPF_CGROUP_GETSOCKOPT,
+ BPF_CGROUP_SETSOCKOPT,
__MAX_BPF_ATTACH_TYPE
};
@@@ -262,6 -265,24 +265,24 @@@
*/
#define BPF_F_ANY_ALIGNMENT (1U << 1)
+ /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
+ * Verifier does sub-register def/use analysis and identifies instructions whose
+ * def only matters for low 32-bit, high 32-bit is never referenced later
+ * through implicit zero extension. Therefore verifier notifies JIT back-ends
+ * that it is safe to ignore clearing high 32-bit for these instructions. This
+ * saves some back-ends a lot of code-gen. However such optimization is not
+ * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
+ * hence hasn't used verifier's analysis result. But, we really want to have a
+ * way to be able to verify the correctness of the described optimization on
+ * x86_64 on which testsuites are frequently exercised.
+ *
+ * So, this flag is introduced. Once it is set, verifier will randomize high
+ * 32-bit for those instructions who has been identified as safe to ignore them.
+ * Then, if verifier is not doing correct analysis, such randomization will
+ * regress tests to expose bugs.
+ */
+ #define BPF_F_TEST_RND_HI32 (1U << 2)
+
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* two extensions:
*
@@@ -1550,8 -1571,11 +1571,11 @@@ union bpf_attr
* but this is only implemented for native XDP (with driver
* support) as of this writing).
*
- * All values for *flags* are reserved for future usage, and must
- * be left at zero.
+ * The lower two bits of *flags* are used as the return code if
+ * the map lookup fails. This is so that the return value can be
+ * one of the XDP program return codes up to XDP_TX, as chosen by
+ * the caller. Any higher bits in the *flags* argument must be
+ * unset.
*
* When used to redirect packets to net devices, this helper
* provides a high performance increase over **bpf_redirect**\ ().
@@@ -1746,6 -1770,7 +1770,7 @@@
* * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
* * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
* * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
+ * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
*
* Therefore, this function can be used to clear a callback flag by
* setting the appropriate bit to zero. e.g. to disable the RTO
@@@ -2674,6 -2699,20 +2699,20 @@@
* 0 on success.
*
* **-ENOENT** if the bpf-local-storage cannot be found.
+ *
+ * int bpf_send_signal(u32 sig)
+ * Description
+ * Send signal *sig* to the current task.
+ * Return
+ * 0 on success or successfully queued.
+ *
+ * **-EBUSY** if work queue under nmi is full.
+ *
+ * **-EINVAL** if *sig* is invalid.
+ *
+ * **-EPERM** if no permission to send the *sig*.
+ *
+ * **-EAGAIN** if bpf program can try again.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@@ -2784,7 -2823,8 +2823,8 @@@
FN(strtol), \
FN(strtoul), \
FN(sk_storage_get), \
- FN(sk_storage_delete),
+ FN(sk_storage_delete), \
+ FN(send_signal),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@@ -3033,6 -3073,12 +3073,12 @@@ struct bpf_tcp_sock
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
+ * total number of DSACK blocks received
+ */
+ __u32 delivered; /* Total data packets delivered incl. rexmits */
+ __u32 delivered_ce; /* Like the above but only ECE marked packets */
+ __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */
};
struct bpf_sock_tuple {
@@@ -3052,6 -3098,10 +3098,10 @@@
};
};
+ struct bpf_xdp_sock {
+ __u32 queue_id;
+ };
+
#define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type.
@@@ -3143,7 -3193,6 +3193,7 @@@ struct bpf_prog_info
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 gpl_compatible:1;
+ __u32 :31; /* alignment pad */
__u64 netns_dev;
__u64 netns_ino;
__u32 nr_jited_ksyms;
@@@ -3213,6 -3262,7 +3263,7 @@@ struct bpf_sock_addr
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
* Stored in network byte order.
*/
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
/* User bpf_sock_ops struct to access socket values and specify request ops
@@@ -3264,13 -3314,15 +3315,15 @@@ struct bpf_sock_ops
__u32 sk_txhash;
__u64 bytes_received;
__u64 bytes_acked;
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
/* Definitions for bpf_sock_ops_cb_flags */
#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
- #define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
+ #define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3)
+ #define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently
* supported cb flags
*/
@@@ -3325,6 -3377,8 +3378,8 @@@ enum
BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
* socket transition to LISTEN state.
*/
+ BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@@ -3503,4 -3557,15 +3558,15 @@@ struct bpf_sysctl
*/
};
+ struct bpf_sockopt {
+ __bpf_md_ptr(struct bpf_sock *, sk);
+ __bpf_md_ptr(void *, optval);
+ __bpf_md_ptr(void *, optval_end);
+
+ __s32 level;
+ __s32 optname;
+ __s32 optlen;
+ __s32 retval;
+ };
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --combined include/uapi/linux/nl80211.h
index fa7ebbc6ff27,8fc3a43cac75..75758ec26c8b
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@@ -235,6 -235,15 +235,15 @@@
*/
/**
+ * DOC: SAE authentication offload
+ *
+ * By setting @NL80211_EXT_FEATURE_SAE_OFFLOAD flag drivers can indicate they
+ * support offloading SAE authentication for WPA3-Personal networks. In
+ * %NL80211_CMD_CONNECT the password for SAE should be specified using
+ * %NL80211_ATTR_SAE_PASSWORD.
+ */
+
+ /**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@@ -2341,6 -2350,12 +2350,12 @@@ enum nl80211_commands
* should be picking up the lowest tx power, either tx power per-interface
* or per-station.
*
+ * @NL80211_ATTR_SAE_PASSWORD: attribute for passing SAE password material. It
+ * is used with %NL80211_CMD_CONNECT to provide password for offloading
+ * SAE authentication for WPA3-Personal networks.
+ *
+ * @NL80211_ATTR_TWT_RESPONDER: Enable target wait time responder support.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@@ -2794,6 -2809,10 +2809,10 @@@ enum nl80211_attrs
NL80211_ATTR_STA_TX_POWER_SETTING,
NL80211_ATTR_STA_TX_POWER,
+ NL80211_ATTR_SAE_PASSWORD,
+
+ NL80211_ATTR_TWT_RESPONDER,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@@ -4406,6 -4425,7 +4425,7 @@@ enum nl80211_mfp
enum nl80211_wpa_versions {
NL80211_WPA_VERSION_1 = 1 << 0,
NL80211_WPA_VERSION_2 = 1 << 1,
+ NL80211_WPA_VERSION_3 = 1 << 2,
};
/**
@@@ -5314,7 -5334,7 +5334,7 @@@ enum nl80211_feature_flags
NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28,
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29,
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30,
- NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31,
+ NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1U << 31,
};
/**
@@@ -5422,6 -5442,9 +5442,9 @@@
* @NL80211_EXT_FEATURE_STA_TX_PWR: This driver supports controlling tx power
* to a station.
*
+ * @NL80211_EXT_FEATURE_SAE_OFFLOAD: Device wants to do SAE authentication in
+ * station mode (SAE password is passed as part of the connect command).
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@@ -5466,6 -5489,7 +5489,7 @@@ enum nl80211_ext_feature_index
NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD,
NL80211_EXT_FEATURE_EXT_KEY_ID,
NL80211_EXT_FEATURE_STA_TX_PWR,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --combined kernel/bpf/core.c
index f2148db91439,e2c1b43728da..16079550db6d
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@@ -1364,10 -1364,10 +1364,10 @@@ select_insn
insn++;
CONT;
ALU_ARSH_X:
- DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
+ DST = (u64) (u32) (((s32) DST) >> SRC);
CONT;
ALU_ARSH_K:
- DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
+ DST = (u64) (u32) (((s32) DST) >> IMM);
CONT;
ALU64_ARSH_X:
(*(s64 *) &DST) >>= SRC;
@@@ -1791,38 -1791,42 +1791,42 @@@ struct bpf_prog_array *bpf_prog_array_a
return &empty_prog_array.hdr;
}
- void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
+ void bpf_prog_array_free(struct bpf_prog_array *progs)
{
- if (!progs ||
- progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
+ if (!progs || progs == &empty_prog_array.hdr)
return;
kfree_rcu(progs, rcu);
}
- int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
+ int bpf_prog_array_length(struct bpf_prog_array *array)
{
struct bpf_prog_array_item *item;
u32 cnt = 0;
- rcu_read_lock();
- item = rcu_dereference(array)->items;
- for (; item->prog; item++)
+ for (item = array->items; item->prog; item++)
if (item->prog != &dummy_bpf_prog.prog)
cnt++;
- rcu_read_unlock();
return cnt;
}
+ bool bpf_prog_array_is_empty(struct bpf_prog_array *array)
+ {
+ struct bpf_prog_array_item *item;
+
+ for (item = array->items; item->prog; item++)
+ if (item->prog != &dummy_bpf_prog.prog)
+ return false;
+ return true;
+ }
- static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
+ static bool bpf_prog_array_copy_core(struct bpf_prog_array *array,
u32 *prog_ids,
u32 request_cnt)
{
struct bpf_prog_array_item *item;
int i = 0;
- item = rcu_dereference_check(array, 1)->items;
- for (; item->prog; item++) {
+ for (item = array->items; item->prog; item++) {
if (item->prog == &dummy_bpf_prog.prog)
continue;
prog_ids[i] = item->prog->aux->id;
@@@ -1835,7 -1839,7 +1839,7 @@@
return !!(item->prog);
}
- int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
+ int bpf_prog_array_copy_to_user(struct bpf_prog_array *array,
__u32 __user *prog_ids, u32 cnt)
{
unsigned long err = 0;
@@@ -1846,18 -1850,12 +1850,12 @@@
* cnt = bpf_prog_array_length();
* if (cnt > 0)
* bpf_prog_array_copy_to_user(..., cnt);
- * so below kcalloc doesn't need extra cnt > 0 check, but
- * bpf_prog_array_length() releases rcu lock and
- * prog array could have been swapped with empty or larger array,
- * so always copy 'cnt' prog_ids to the user.
- * In a rare race the user will see zero prog_ids
+ * so below kcalloc doesn't need extra cnt > 0 check.
*/
ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
if (!ids)
return -ENOMEM;
- rcu_read_lock();
nospc = bpf_prog_array_copy_core(array, ids, cnt);
- rcu_read_unlock();
err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
kfree(ids);
if (err)
@@@ -1867,19 -1865,19 +1865,19 @@@
return 0;
}
- void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
+ void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
struct bpf_prog *old_prog)
{
- struct bpf_prog_array_item *item = array->items;
+ struct bpf_prog_array_item *item;
- for (; item->prog; item++)
+ for (item = array->items; item->prog; item++)
if (item->prog == old_prog) {
WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
break;
}
}
- int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
+ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
struct bpf_prog *exclude_prog,
struct bpf_prog *include_prog,
struct bpf_prog_array **new_array)
@@@ -1943,7 -1941,7 +1941,7 @@@
return 0;
}
- int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
+ int bpf_prog_array_copy_info(struct bpf_prog_array *array,
u32 *prog_ids, u32 request_cnt,
u32 *prog_cnt)
{
@@@ -2086,6 -2084,15 +2084,15 @@@ bool __weak bpf_helper_changes_pkt_data
return false;
}
+ /* Return TRUE if the JIT backend wants verifier to enable sub-register usage
+ * analysis code and wants explicit zero extension inserted by verifier.
+ * Otherwise, return FALSE.
+ */
+ bool __weak bpf_jit_needs_zext(void)
+ {
+ return false;
+ }
+
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
* skb_copy_bits(), so provide a weak definition of it for NET-less config.
*/
@@@ -2103,3 -2110,4 +2110,4 @@@ EXPORT_SYMBOL(bpf_stats_enabled_key)
#include <linux/bpf_trace.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx);
diff --combined kernel/cgroup/cgroup.c
index 2a42a52ea75f,daa567728251..b8042569a119
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@@ -2201,7 -2201,8 +2201,7 @@@ static int cgroup_init_fs_context(struc
fc->ops = &cgroup_fs_context_ops;
else
fc->ops = &cgroup1_fs_context_ops;
- if (fc->user_ns)
- put_user_ns(fc->user_ns);
+ put_user_ns(fc->user_ns);
fc->user_ns = get_user_ns(ctx->ns->user_ns);
fc->global = true;
return 0;
@@@ -2242,50 -2243,6 +2242,50 @@@ static struct file_system_type cgroup2_
.fs_flags = FS_USERNS_MOUNT,
};
+#ifdef CONFIG_CPUSETS
+static const struct fs_context_operations cpuset_fs_context_ops = {
+ .get_tree = cgroup1_get_tree,
+ .free = cgroup_fs_context_free,
+};
+
+/*
+ * This is ugly, but preserves the userspace API for existing cpuset
+ * users. If someone tries to mount the "cpuset" filesystem, we
+ * silently switch it to mount "cgroup" instead
+ */
+static int cpuset_init_fs_context(struct fs_context *fc)
+{
+ char *agent = kstrdup("/sbin/cpuset_release_agent", GFP_USER);
+ struct cgroup_fs_context *ctx;
+ int err;
+
+ err = cgroup_init_fs_context(fc);
+ if (err) {
+ kfree(agent);
+ return err;
+ }
+
+ fc->ops = &cpuset_fs_context_ops;
+
+ ctx = cgroup_fc2context(fc);
+ ctx->subsys_mask = 1 << cpuset_cgrp_id;
+ ctx->flags |= CGRP_ROOT_NOPREFIX;
+ ctx->release_agent = agent;
+
+ get_filesystem(&cgroup_fs_type);
+ put_filesystem(fc->fs_type);
+ fc->fs_type = &cgroup_fs_type;
+
+ return 0;
+}
+
+static struct file_system_type cpuset_fs_type = {
+ .name = "cpuset",
+ .init_fs_context = cpuset_init_fs_context,
+ .fs_flags = FS_USERNS_MOUNT,
+};
+#endif
+
int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
struct cgroup_namespace *ns)
{
@@@ -5048,8 -5005,6 +5048,6 @@@ static void css_release_work_fn(struct
if (cgrp->kn)
RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
NULL);
-
- cgroup_bpf_put(cgrp);
}
mutex_unlock(&cgroup_mutex);
@@@ -5575,6 -5530,8 +5573,8 @@@ static int cgroup_destroy_locked(struc
cgroup1_check_for_release(parent);
+ cgroup_bpf_offline(cgrp);
+
/* put the base reference */
percpu_ref_kill(&cgrp->self.refcnt);
@@@ -5804,9 -5761,6 +5804,9 @@@ int __init cgroup_init(void
WARN_ON(register_filesystem(&cgroup_fs_type));
WARN_ON(register_filesystem(&cgroup2_fs_type));
WARN_ON(!proc_create_single("cgroups", 0, NULL, proc_cgroupstats_show));
+#ifdef CONFIG_CPUSETS
+ WARN_ON(register_filesystem(&cpuset_fs_type));
+#endif
return 0;
}
@@@ -6325,6 -6279,7 +6325,7 @@@ void cgroup_sk_alloc(struct sock_cgroup
* Don't use cgroup_get_live().
*/
cgroup_get(sock_cgroup_ptr(skcd));
+ cgroup_bpf_get(sock_cgroup_ptr(skcd));
return;
}
@@@ -6336,6 -6291,7 +6337,7 @@@
cset = task_css_set(current);
if (likely(cgroup_tryget(cset->dfl_cgrp))) {
skcd->val = (unsigned long)cset->dfl_cgrp;
+ cgroup_bpf_get(cset->dfl_cgrp);
break;
}
cpu_relax();
@@@ -6346,7 -6302,10 +6348,10 @@@
void cgroup_sk_free(struct sock_cgroup_data *skcd)
{
- cgroup_put(sock_cgroup_ptr(skcd));
+ struct cgroup *cgrp = sock_cgroup_ptr(skcd);
+
+ cgroup_bpf_put(cgrp);
+ cgroup_put(cgrp);
}
#endif /* CONFIG_SOCK_CGROUP_DATA */
diff --combined lib/Kconfig
index e09b3e081a53,78ddb9526b62..f8d4088551d7
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@@ -531,6 -531,14 +531,6 @@@ config LRU_CACH
config CLZ_TAB
bool
-config DDR
- bool "JEDEC DDR data"
- help
- Data from JEDEC specs for DDR SDRAM memories,
- particularly the AC timing parameters and addressing
- information. This data is useful for drivers handling
- DDR SDRAM controllers.
-
config IRQ_POLL
bool "IRQ polling library"
help
@@@ -554,6 -562,14 +554,14 @@@ config SIGNATUR
Digital signature verification. Currently only RSA is supported.
Implementation is done using GnuPG MPI library
+ config DIMLIB
+ bool "DIM library"
+ default y
+ help
+ Dynamic Interrupt Moderation library.
+ Implements an algorithm for dynamically change CQ modertion values
+ according to run time performance.
+
#
# libfdt files, only selected if needed.
#
diff --combined lib/Kconfig.debug
index be9787484fb4,99272b5dd980..4542bac32a88
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@@ -305,26 -305,19 +305,26 @@@ config DEBUG_F
If unsure, say N.
-config HEADERS_CHECK
- bool "Run 'make headers_check' when building vmlinux"
+config HEADERS_INSTALL
+ bool "Install uapi headers to usr/include"
depends on !UML
help
- This option will extract the user-visible kernel headers whenever
- building the kernel, and will run basic sanity checks on them to
- ensure that exported files do not attempt to include files which
- were not exported, etc.
+ This option will install uapi headers (headers exported to user-space)
+ into the usr/include directory for use during the kernel build.
+ This is unneeded for building the kernel itself, but needed for some
+ user-space program samples. It is also needed by some features such
+ as uapi header sanity checks.
+
+config HEADERS_CHECK
+ bool "Run sanity checks on uapi headers when building 'all'"
+ depends on HEADERS_INSTALL
+ help
+ This option will run basic sanity checks on uapi headers when
+ building the 'all' target, for example, ensure that they do not
+ attempt to include files which were not exported, etc.
If you're making modifications to header files which are
- relevant for userspace, say 'Y', and check the headers
- exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
- your build tree), to make sure they're suitable.
+ relevant for userspace, say 'Y'.
config OPTIMIZE_INLINING
bool "Allow compiler to uninline functions marked 'inline'"
@@@ -1708,7 -1701,7 +1708,7 @@@ config LKDT
called lkdtm.
Documentation on how to use the module can be found in
- Documentation/fault-injection/provoke-crashes.txt
+ Documentation/fault-injection/provoke-crashes.rst
config TEST_LIST_SORT
tristate "Linked list sorting test"
@@@ -1916,6 -1909,15 +1916,15 @@@ config TEST_BP
If unsure, say N.
+ config TEST_BLACKHOLE_DEV
+ tristate "Test blackhole netdev functionality"
+ depends on m && NET
+ help
+ This builds the "test_blackhole_dev" module that validates the
+ data path through this blackhole netdev.
+
+ If unsure, say N.
+
config FIND_BIT_BENCHMARK
tristate "Test find_bit functions"
help
diff --combined lib/Makefile
index cb66bc9c5b2f,6ac44fe2a37f..86c39ec0d874
--- a/lib/Makefile
+++ b/lib/Makefile
@@@ -91,6 -91,7 +91,7 @@@ obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += tes
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
+ obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
@@@ -202,10 -203,13 +203,11 @@@ obj-$(CONFIG_GLOB) += glob.
obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
obj-$(CONFIG_MPILIB) += mpi/
+ obj-$(CONFIG_DIMLIB) += dim/
obj-$(CONFIG_SIGNATURE) += digsig.o
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
-obj-$(CONFIG_DDR) += jedec_ddr_data.o
-
obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
diff --combined net/batman-adv/hard-interface.c
index 3719cfd026f0,b5465e6e380d..c90e47342bb0
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@@ -16,6 -16,7 +16,7 @@@
#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/kref.h>
+ #include <linux/limits.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
@@@ -795,9 -796,6 +796,9 @@@ int batadv_hardif_enable_interface(stru
batadv_hardif_recalc_extra_skbroom(soft_iface);
+ if (bat_priv->algo_ops->iface.enabled)
+ bat_priv->algo_ops->iface.enabled(hard_iface);
+
out:
return 0;
@@@ -923,9 -921,7 +924,7 @@@ batadv_hardif_add_interface(struct net_
hard_iface->soft_iface = NULL;
hard_iface->if_status = BATADV_IF_NOT_IN_USE;
- ret = batadv_debugfs_add_hardif(hard_iface);
- if (ret)
- goto free_sysfs;
+ batadv_debugfs_add_hardif(hard_iface);
INIT_LIST_HEAD(&hard_iface->list);
INIT_HLIST_HEAD(&hard_iface->neigh_list);
@@@ -947,8 -943,6 +946,6 @@@
return hard_iface;
- free_sysfs:
- batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
free_if:
kfree(hard_iface);
release_dev:
diff --combined net/batman-adv/types.h
index e0b25104cbfa,c2996296b953..6ae139d74e0f
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@@ -14,20 -14,22 +14,22 @@@
#include <linux/average.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
+ #include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/kref.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/sched.h> /* for linux/wait.h */
+ #include <linux/seq_file.h>
+ #include <linux/skbuff.h>
#include <linux/spinlock.h>
+ #include <linux/timer.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <uapi/linux/batadv_packet.h>
#include <uapi/linux/batman_adv.h>
- struct seq_file;
-
#ifdef CONFIG_BATMAN_ADV_DAT
/**
@@@ -402,6 -404,17 +404,17 @@@ struct batadv_orig_node
* list
*/
struct hlist_node mcast_want_all_ipv6_node;
+
+ /**
+ * @mcast_want_all_rtr4_node: a list node for the mcast.want_all_rtr4
+ * list
+ */
+ struct hlist_node mcast_want_all_rtr4_node;
+ /**
+ * @mcast_want_all_rtr6_node: a list node for the mcast.want_all_rtr6
+ * list
+ */
+ struct hlist_node mcast_want_all_rtr6_node;
#endif
/** @capabilities: announced capabilities of this originator */
@@@ -1169,6 -1182,26 +1182,26 @@@ struct batadv_mcast_querier_state
};
/**
+ * struct batadv_mcast_mla_flags - flags for the querier, bridge and tvlv state
+ */
+ struct batadv_mcast_mla_flags {
+ /** @querier_ipv4: the current state of an IGMP querier in the mesh */
+ struct batadv_mcast_querier_state querier_ipv4;
+
+ /** @querier_ipv6: the current state of an MLD querier in the mesh */
+ struct batadv_mcast_querier_state querier_ipv6;
+
+ /** @enabled: whether the multicast tvlv is currently enabled */
+ unsigned char enabled:1;
+
+ /** @bridged: whether the soft interface has a bridge on top */
+ unsigned char bridged:1;
+
+ /** @tvlv_flags: the flags we have last sent in our mcast tvlv */
+ u8 tvlv_flags;
+ };
+
+ /**
* struct batadv_priv_mcast - per mesh interface mcast data
*/
struct batadv_priv_mcast {
@@@ -1196,20 -1229,22 +1229,22 @@@
*/
struct hlist_head want_all_ipv6_list;
- /** @querier_ipv4: the current state of an IGMP querier in the mesh */
- struct batadv_mcast_querier_state querier_ipv4;
-
- /** @querier_ipv6: the current state of an MLD querier in the mesh */
- struct batadv_mcast_querier_state querier_ipv6;
-
- /** @flags: the flags we have last sent in our mcast tvlv */
- u8 flags;
+ /**
+ * @want_all_rtr4_list: a list of orig_nodes wanting all routable IPv4
+ * multicast traffic
+ */
+ struct hlist_head want_all_rtr4_list;
- /** @enabled: whether the multicast tvlv is currently enabled */
- unsigned char enabled:1;
+ /**
+ * @want_all_rtr6_list: a list of orig_nodes wanting all routable IPv6
+ * multicast traffic
+ */
+ struct hlist_head want_all_rtr6_list;
- /** @bridged: whether the soft interface has a bridge on top */
- unsigned char bridged:1;
+ /**
+ * @mla_flags: flags for the querier, bridge and tvlv state
+ */
+ struct batadv_mcast_mla_flags mla_flags;
/**
* @mla_lock: a lock protecting mla_list and mla_flags
@@@ -1228,6 -1263,12 +1263,12 @@@
/** @num_want_all_ipv6: counter for items in want_all_ipv6_list */
atomic_t num_want_all_ipv6;
+ /** @num_want_all_rtr4: counter for items in want_all_rtr4_list */
+ atomic_t num_want_all_rtr4;
+
+ /** @num_want_all_rtr6: counter for items in want_all_rtr6_list */
+ atomic_t num_want_all_rtr6;
+
/**
* @want_lists_lock: lock for protecting modifications to mcasts
* want_all_{unsnoopables,ipv4,ipv6}_list (traversals are rcu-locked)
@@@ -2129,9 -2170,6 +2170,9 @@@ struct batadv_algo_iface_ops
/** @enable: init routing info when hard-interface is enabled */
int (*enable)(struct batadv_hard_iface *hard_iface);
+ /** @enabled: notification when hard-interface was enabled (optional) */
+ void (*enabled)(struct batadv_hard_iface *hard_iface);
+
/** @disable: de-init routing info when hard-interface is disabled */
void (*disable)(struct batadv_hard_iface *hard_iface);
diff --combined net/bluetooth/l2cap_core.c
index 5406d7cd46ad,007317b072b4..cc506fe99b4d
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@@ -168,11 -168,18 +168,18 @@@ static struct l2cap_chan *l2cap_get_cha
return c;
}
- static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
+ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
+ u8 src_type)
{
struct l2cap_chan *c;
list_for_each_entry(c, &chan_list, global_l) {
+ if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
+ continue;
+
+ if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
+ continue;
+
if (c->sport == psm && !bacmp(&c->src, src))
return c;
}
@@@ -185,7 -192,7 +192,7 @@@ int l2cap_add_psm(struct l2cap_chan *ch
write_lock(&chan_list_lock);
- if (psm && __l2cap_global_chan_by_addr(psm, src)) {
+ if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
err = -EADDRINUSE;
goto done;
}
@@@ -209,7 -216,8 +216,8 @@@
err = -EINVAL;
for (p = start; p <= end; p += incr)
- if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
+ if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
+ chan->src_type)) {
chan->psm = cpu_to_le16(p);
chan->sport = cpu_to_le16(p);
err = 0;
@@@ -1353,7 -1361,7 +1361,7 @@@ static bool l2cap_check_enc_key_size(st
* actually encrypted before enforcing a key size.
*/
return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
- hcon->enc_key_size > HCI_MIN_ENC_KEY_SIZE);
+ hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
}
static void l2cap_do_start(struct l2cap_chan *chan)
@@@ -4394,6 -4402,12 +4402,12 @@@ static inline int l2cap_disconnect_rsp(
l2cap_chan_lock(chan);
+ if (chan->state != BT_DISCONN) {
+ l2cap_chan_unlock(chan);
+ mutex_unlock(&conn->chan_lock);
+ return 0;
+ }
+
l2cap_chan_hold(chan);
l2cap_chan_del(chan, 0);
@@@ -5291,7 -5305,14 +5305,14 @@@ static inline int l2cap_conn_param_upda
memset(&rsp, 0, sizeof(rsp));
- err = hci_check_conn_params(min, max, latency, to_multiplier);
+ if (min < hcon->le_conn_min_interval ||
+ max > hcon->le_conn_max_interval) {
+ BT_DBG("requested connection interval exceeds current bounds.");
+ err = -EINVAL;
+ } else {
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
+ }
+
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
diff --combined net/bridge/netfilter/Kconfig
index 36a98d36d339,f4fb0b9b927d..d978f6d820f3
--- a/net/bridge/netfilter/Kconfig
+++ b/net/bridge/netfilter/Kconfig
@@@ -19,6 -19,20 +19,20 @@@ config NF_LOG_BRIDG
tristate "Bridge packet logging"
select NF_LOG_COMMON
+ config NF_CONNTRACK_BRIDGE
+ tristate "IPv4/IPV6 bridge connection tracking support"
+ depends on NF_CONNTRACK
+ default n
+ help
+ Connection tracking keeps a record of what packets have passed
+ through your machine, in order to figure out how they are related
+ into connections. This is used to enhance packet filtering via
+ stateful policies. Enable this if you want native tracking from
+ the bridge. This provides a replacement for the `br_netfilter'
+ infrastructure.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
endif # NF_TABLES_BRIDGE
menuconfig BRIDGE_NF_EBTABLES
@@@ -114,7 -128,7 +128,7 @@@ config BRIDGE_EBT_LIMI
equivalent of the iptables limit match.
If you want to compile it as a module, say M here and read
- <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+ <file:Documentation/kbuild/modules.rst>. If unsure, say `N'.
config BRIDGE_EBT_MARK
tristate "ebt: mark filter support"
diff --combined net/core/filter.c
index 3fdf1b21be36,089aaea0ccc6..b106f677fd55
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@@ -62,6 -62,7 +62,7 @@@
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
#include <net/ip_fib.h>
+ #include <net/nexthop.h>
#include <net/flow.h>
#include <net/arp.h>
#include <net/ipv6.h>
@@@ -2157,8 -2158,8 +2158,8 @@@ BPF_CALL_2(bpf_redirect, u32, ifindex,
if (unlikely(flags & ~(BPF_F_INGRESS)))
return TC_ACT_SHOT;
- ri->ifindex = ifindex;
ri->flags = flags;
+ ri->tgt_index = ifindex;
return TC_ACT_REDIRECT;
}
@@@ -2168,8 -2169,8 +2169,8 @@@ int skb_do_redirect(struct sk_buff *skb
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct net_device *dev;
- dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
- ri->ifindex = 0;
+ dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index);
+ ri->tgt_index = 0;
if (unlikely(!dev)) {
kfree_skb(skb);
return -EINVAL;
@@@ -3487,11 -3488,11 +3488,11 @@@ xdp_do_redirect_slow(struct net_device
struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
{
struct net_device *fwd;
- u32 index = ri->ifindex;
+ u32 index = ri->tgt_index;
int err;
fwd = dev_get_by_index_rcu(dev_net(dev), index);
- ri->ifindex = 0;
+ ri->tgt_index = 0;
if (unlikely(!fwd)) {
err = -EINVAL;
goto err;
@@@ -3522,7 -3523,6 +3523,6 @@@ static int __bpf_tx_xdp_map(struct net_
err = dev_map_enqueue(dst, xdp, dev_rx);
if (unlikely(err))
return err;
- __dev_map_insert_ctx(map, index);
break;
}
case BPF_MAP_TYPE_CPUMAP: {
@@@ -3531,7 -3531,6 +3531,6 @@@
err = cpu_map_enqueue(rcpu, xdp, dev_rx);
if (unlikely(err))
return err;
- __cpu_map_insert_ctx(map, index);
break;
}
case BPF_MAP_TYPE_XSKMAP: {
@@@ -3605,18 -3604,14 +3604,14 @@@ static int xdp_do_redirect_map(struct n
struct bpf_prog *xdp_prog, struct bpf_map *map,
struct bpf_redirect_info *ri)
{
- u32 index = ri->ifindex;
- void *fwd = NULL;
+ u32 index = ri->tgt_index;
+ void *fwd = ri->tgt_value;
int err;
- ri->ifindex = 0;
+ ri->tgt_index = 0;
+ ri->tgt_value = NULL;
WRITE_ONCE(ri->map, NULL);
- fwd = __xdp_map_lookup_elem(map, index);
- if (unlikely(!fwd)) {
- err = -EINVAL;
- goto err;
- }
if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
xdp_do_flush_map();
@@@ -3652,19 -3647,14 +3647,14 @@@ static int xdp_do_generic_redirect_map(
struct bpf_map *map)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
- u32 index = ri->ifindex;
- void *fwd = NULL;
+ u32 index = ri->tgt_index;
+ void *fwd = ri->tgt_value;
int err = 0;
- ri->ifindex = 0;
+ ri->tgt_index = 0;
+ ri->tgt_value = NULL;
WRITE_ONCE(ri->map, NULL);
- fwd = __xdp_map_lookup_elem(map, index);
- if (unlikely(!fwd)) {
- err = -EINVAL;
- goto err;
- }
-
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
struct bpf_dtab_netdev *dst = fwd;
@@@ -3696,14 -3686,14 +3686,14 @@@ int xdp_do_generic_redirect(struct net_
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map = READ_ONCE(ri->map);
- u32 index = ri->ifindex;
+ u32 index = ri->tgt_index;
struct net_device *fwd;
int err = 0;
if (map)
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
map);
- ri->ifindex = 0;
+ ri->tgt_index = 0;
fwd = dev_get_by_index_rcu(dev_net(dev), index);
if (unlikely(!fwd)) {
err = -EINVAL;
@@@ -3731,8 -3721,9 +3721,9 @@@ BPF_CALL_2(bpf_xdp_redirect, u32, ifind
if (unlikely(flags))
return XDP_ABORTED;
- ri->ifindex = ifindex;
ri->flags = flags;
+ ri->tgt_index = ifindex;
+ ri->tgt_value = NULL;
WRITE_ONCE(ri->map, NULL);
return XDP_REDIRECT;
@@@ -3751,11 -3742,23 +3742,23 @@@ BPF_CALL_3(bpf_xdp_redirect_map, struc
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
- if (unlikely(flags))
+ /* Lower bits of the flags are used as return code on lookup failure */
+ if (unlikely(flags > XDP_TX))
return XDP_ABORTED;
- ri->ifindex = ifindex;
+ ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
+ if (unlikely(!ri->tgt_value)) {
+ /* If the lookup fails we want to clear out the state in the
+ * redirect_info struct completely, so that if an eBPF program
+ * performs multiple lookups, the last one always takes
+ * precedence.
+ */
+ WRITE_ONCE(ri->map, NULL);
+ return flags;
+ }
+
ri->flags = flags;
+ ri->tgt_index = ifindex;
WRITE_ONCE(ri->map, map);
return XDP_REDIRECT;
@@@ -4670,7 -4673,7 +4673,7 @@@ static int bpf_ipv4_fib_lookup(struct n
if (res.type != RTN_UNICAST)
return BPF_FIB_LKUP_RET_NOT_FWDED;
- if (res.fi->fib_nhs > 1)
+ if (fib_info_num_path(res.fi) > 1)
fib_select_path(net, &res, &fl4, NULL);
if (check_mtu) {
@@@ -4737,7 -4740,7 +4740,7 @@@ static int bpf_ipv6_fib_lookup(struct n
return -ENODEV;
idev = __in6_dev_get_safely(dev);
- if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
+ if (unlikely(!idev || !idev->cnf.forwarding))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
@@@ -5191,54 -5194,6 +5194,6 @@@ static const struct bpf_func_proto bpf_
};
#endif /* CONFIG_IPV6_SEG6_BPF */
- #define CONVERT_COMMON_TCP_SOCK_FIELDS(md_type, CONVERT) \
- do { \
- switch (si->off) { \
- case offsetof(md_type, snd_cwnd): \
- CONVERT(snd_cwnd); break; \
- case offsetof(md_type, srtt_us): \
- CONVERT(srtt_us); break; \
- case offsetof(md_type, snd_ssthresh): \
- CONVERT(snd_ssthresh); break; \
- case offsetof(md_type, rcv_nxt): \
- CONVERT(rcv_nxt); break; \
- case offsetof(md_type, snd_nxt): \
- CONVERT(snd_nxt); break; \
- case offsetof(md_type, snd_una): \
- CONVERT(snd_una); break; \
- case offsetof(md_type, mss_cache): \
- CONVERT(mss_cache); break; \
- case offsetof(md_type, ecn_flags): \
- CONVERT(ecn_flags); break; \
- case offsetof(md_type, rate_delivered): \
- CONVERT(rate_delivered); break; \
- case offsetof(md_type, rate_interval_us): \
- CONVERT(rate_interval_us); break; \
- case offsetof(md_type, packets_out): \
- CONVERT(packets_out); break; \
- case offsetof(md_type, retrans_out): \
- CONVERT(retrans_out); break; \
- case offsetof(md_type, total_retrans): \
- CONVERT(total_retrans); break; \
- case offsetof(md_type, segs_in): \
- CONVERT(segs_in); break; \
- case offsetof(md_type, data_segs_in): \
- CONVERT(data_segs_in); break; \
- case offsetof(md_type, segs_out): \
- CONVERT(segs_out); break; \
- case offsetof(md_type, data_segs_out): \
- CONVERT(data_segs_out); break; \
- case offsetof(md_type, lost_out): \
- CONVERT(lost_out); break; \
- case offsetof(md_type, sacked_out): \
- CONVERT(sacked_out); break; \
- case offsetof(md_type, bytes_received): \
- CONVERT(bytes_received); break; \
- case offsetof(md_type, bytes_acked): \
- CONVERT(bytes_acked); break; \
- } \
- } while (0)
-
#ifdef CONFIG_INET
static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
int dif, int sdif, u8 family, u8 proto)
@@@ -5589,7 -5544,8 +5544,8 @@@ static const struct bpf_func_proto bpf_
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
- if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, bytes_acked))
+ if (off < 0 || off >= offsetofend(struct bpf_tcp_sock,
+ icsk_retransmits))
return false;
if (off % size != 0)
@@@ -5620,8 -5576,19 +5576,19 @@@ u32 bpf_tcp_sock_convert_ctx_access(enu
offsetof(struct tcp_sock, FIELD)); \
} while (0)
- CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_tcp_sock,
- BPF_TCP_SOCK_GET_COMMON);
+ #define BPF_INET_SOCK_GET_COMMON(FIELD) \
+ do { \
+ BUILD_BUG_ON(FIELD_SIZEOF(struct inet_connection_sock, \
+ FIELD) > \
+ FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
+ struct inet_connection_sock, \
+ FIELD), \
+ si->dst_reg, si->src_reg, \
+ offsetof( \
+ struct inet_connection_sock, \
+ FIELD)); \
+ } while (0)
if (insn > insn_buf)
return insn - insn_buf;
@@@ -5637,6 -5604,81 +5604,81 @@@
offsetof(struct tcp_sock, rtt_min) +
offsetof(struct minmax_sample, v));
break;
+ case offsetof(struct bpf_tcp_sock, snd_cwnd):
+ BPF_TCP_SOCK_GET_COMMON(snd_cwnd);
+ break;
+ case offsetof(struct bpf_tcp_sock, srtt_us):
+ BPF_TCP_SOCK_GET_COMMON(srtt_us);
+ break;
+ case offsetof(struct bpf_tcp_sock, snd_ssthresh):
+ BPF_TCP_SOCK_GET_COMMON(snd_ssthresh);
+ break;
+ case offsetof(struct bpf_tcp_sock, rcv_nxt):
+ BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
+ break;
+ case offsetof(struct bpf_tcp_sock, snd_nxt):
+ BPF_TCP_SOCK_GET_COMMON(snd_nxt);
+ break;
+ case offsetof(struct bpf_tcp_sock, snd_una):
+ BPF_TCP_SOCK_GET_COMMON(snd_una);
+ break;
+ case offsetof(struct bpf_tcp_sock, mss_cache):
+ BPF_TCP_SOCK_GET_COMMON(mss_cache);
+ break;
+ case offsetof(struct bpf_tcp_sock, ecn_flags):
+ BPF_TCP_SOCK_GET_COMMON(ecn_flags);
+ break;
+ case offsetof(struct bpf_tcp_sock, rate_delivered):
+ BPF_TCP_SOCK_GET_COMMON(rate_delivered);
+ break;
+ case offsetof(struct bpf_tcp_sock, rate_interval_us):
+ BPF_TCP_SOCK_GET_COMMON(rate_interval_us);
+ break;
+ case offsetof(struct bpf_tcp_sock, packets_out):
+ BPF_TCP_SOCK_GET_COMMON(packets_out);
+ break;
+ case offsetof(struct bpf_tcp_sock, retrans_out):
+ BPF_TCP_SOCK_GET_COMMON(retrans_out);
+ break;
+ case offsetof(struct bpf_tcp_sock, total_retrans):
+ BPF_TCP_SOCK_GET_COMMON(total_retrans);
+ break;
+ case offsetof(struct bpf_tcp_sock, segs_in):
+ BPF_TCP_SOCK_GET_COMMON(segs_in);
+ break;
+ case offsetof(struct bpf_tcp_sock, data_segs_in):
+ BPF_TCP_SOCK_GET_COMMON(data_segs_in);
+ break;
+ case offsetof(struct bpf_tcp_sock, segs_out):
+ BPF_TCP_SOCK_GET_COMMON(segs_out);
+ break;
+ case offsetof(struct bpf_tcp_sock, data_segs_out):
+ BPF_TCP_SOCK_GET_COMMON(data_segs_out);
+ break;
+ case offsetof(struct bpf_tcp_sock, lost_out):
+ BPF_TCP_SOCK_GET_COMMON(lost_out);
+ break;
+ case offsetof(struct bpf_tcp_sock, sacked_out):
+ BPF_TCP_SOCK_GET_COMMON(sacked_out);
+ break;
+ case offsetof(struct bpf_tcp_sock, bytes_received):
+ BPF_TCP_SOCK_GET_COMMON(bytes_received);
+ break;
+ case offsetof(struct bpf_tcp_sock, bytes_acked):
+ BPF_TCP_SOCK_GET_COMMON(bytes_acked);
+ break;
+ case offsetof(struct bpf_tcp_sock, dsack_dups):
+ BPF_TCP_SOCK_GET_COMMON(dsack_dups);
+ break;
+ case offsetof(struct bpf_tcp_sock, delivered):
+ BPF_TCP_SOCK_GET_COMMON(delivered);
+ break;
+ case offsetof(struct bpf_tcp_sock, delivered_ce):
+ BPF_TCP_SOCK_GET_COMMON(delivered_ce);
+ break;
+ case offsetof(struct bpf_tcp_sock, icsk_retransmits):
+ BPF_INET_SOCK_GET_COMMON(icsk_retransmits);
+ break;
}
return insn - insn_buf;
@@@ -5650,7 -5692,7 +5692,7 @@@ BPF_CALL_1(bpf_tcp_sock, struct sock *
return (unsigned long)NULL;
}
- static const struct bpf_func_proto bpf_tcp_sock_proto = {
+ const struct bpf_func_proto bpf_tcp_sock_proto = {
.func = bpf_tcp_sock,
.gpl_only = false,
.ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
@@@ -5694,6 -5736,46 +5736,46 @@@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct s
return INET_ECN_set_ce(skb);
}
+ bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
+ struct bpf_insn_access_aux *info)
+ {
+ if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
+ return false;
+
+ if (off % size != 0)
+ return false;
+
+ switch (off) {
+ default:
+ return size == sizeof(__u32);
+ }
+ }
+
+ u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog, u32 *target_size)
+ {
+ struct bpf_insn *insn = insn_buf;
+
+ #define BPF_XDP_SOCK_GET(FIELD) \
+ do { \
+ BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) > \
+ FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
+ si->dst_reg, si->src_reg, \
+ offsetof(struct xdp_sock, FIELD)); \
+ } while (0)
+
+ switch (si->off) {
+ case offsetof(struct bpf_xdp_sock, queue_id):
+ BPF_XDP_SOCK_GET(queue_id);
+ break;
+ }
+
+ return insn - insn_buf;
+ }
+
static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
.func = bpf_skb_ecn_set_ce,
.gpl_only = false,
@@@ -5896,6 -5978,10 +5978,10 @@@ sock_addr_func_proto(enum bpf_func_id f
case BPF_FUNC_skc_lookup_tcp:
return &bpf_sock_addr_skc_lookup_tcp_proto;
#endif /* CONFIG_INET */
+ case BPF_FUNC_sk_storage_get:
+ return &bpf_sk_storage_get_proto;
+ case BPF_FUNC_sk_storage_delete:
+ return &bpf_sk_storage_delete_proto;
default:
return bpf_base_func_proto(func_id);
}
@@@ -5933,6 -6019,10 +6019,10 @@@ cg_skb_func_proto(enum bpf_func_id func
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
+ #ifdef CONFIG_SOCK_CGROUP_DATA
+ case BPF_FUNC_skb_cgroup_id:
+ return &bpf_skb_cgroup_id_proto;
+ #endif
#ifdef CONFIG_INET
case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto;
@@@ -6113,6 -6203,14 +6203,14 @@@ sock_ops_func_proto(enum bpf_func_id fu
return &bpf_get_local_storage_proto;
case BPF_FUNC_perf_event_output:
return &bpf_sockopt_event_output_proto;
+ case BPF_FUNC_sk_storage_get:
+ return &bpf_sk_storage_get_proto;
+ case BPF_FUNC_sk_storage_delete:
+ return &bpf_sk_storage_delete_proto;
+ #ifdef CONFIG_INET
+ case BPF_FUNC_tcp_sock:
+ return &bpf_tcp_sock_proto;
+ #endif /* CONFIG_INET */
default:
return bpf_base_func_proto(func_id);
}
@@@ -6800,6 -6898,13 +6898,13 @@@ static bool sock_addr_is_valid_access(i
if (size != size_default)
return false;
break;
+ case offsetof(struct bpf_sock_addr, sk):
+ if (type != BPF_READ)
+ return false;
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_SOCKET;
+ break;
default:
if (type == BPF_READ) {
if (size != size_default)
@@@ -6843,6 -6948,11 +6948,11 @@@ static bool sock_ops_is_valid_access(in
if (size != sizeof(__u64))
return false;
break;
+ case offsetof(struct bpf_sock_ops, sk):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_SOCKET_OR_NULL;
+ break;
default:
if (size != size_default)
return false;
@@@ -7750,6 -7860,11 +7860,11 @@@ static u32 sock_addr_convert_ctx_access
struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
break;
+ case offsetof(struct bpf_sock_addr, sk):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_addr_kern, sk));
+ break;
}
return insn - insn_buf;
@@@ -7837,9 -7952,6 +7952,6 @@@ static u32 sock_ops_convert_ctx_access(
SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
} while (0)
- CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_sock_ops,
- SOCK_OPS_GET_TCP_SOCK_FIELD);
-
if (insn > insn_buf)
return insn - insn_buf;
@@@ -8009,6 -8121,82 +8121,82 @@@
SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
struct sock, type);
break;
+ case offsetof(struct bpf_sock_ops, snd_cwnd):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd);
+ break;
+ case offsetof(struct bpf_sock_ops, srtt_us):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us);
+ break;
+ case offsetof(struct bpf_sock_ops, snd_ssthresh):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh);
+ break;
+ case offsetof(struct bpf_sock_ops, rcv_nxt):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
+ break;
+ case offsetof(struct bpf_sock_ops, snd_nxt):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt);
+ break;
+ case offsetof(struct bpf_sock_ops, snd_una):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
+ break;
+ case offsetof(struct bpf_sock_ops, mss_cache):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache);
+ break;
+ case offsetof(struct bpf_sock_ops, ecn_flags):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags);
+ break;
+ case offsetof(struct bpf_sock_ops, rate_delivered):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered);
+ break;
+ case offsetof(struct bpf_sock_ops, rate_interval_us):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us);
+ break;
+ case offsetof(struct bpf_sock_ops, packets_out):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out);
+ break;
+ case offsetof(struct bpf_sock_ops, retrans_out):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out);
+ break;
+ case offsetof(struct bpf_sock_ops, total_retrans):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans);
+ break;
+ case offsetof(struct bpf_sock_ops, segs_in):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in);
+ break;
+ case offsetof(struct bpf_sock_ops, data_segs_in):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in);
+ break;
+ case offsetof(struct bpf_sock_ops, segs_out):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out);
+ break;
+ case offsetof(struct bpf_sock_ops, data_segs_out):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out);
+ break;
+ case offsetof(struct bpf_sock_ops, lost_out):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out);
+ break;
+ case offsetof(struct bpf_sock_ops, sacked_out):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out);
+ break;
+ case offsetof(struct bpf_sock_ops, bytes_received):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received);
+ break;
+ case offsetof(struct bpf_sock_ops, bytes_acked):
+ SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
+ break;
+ case offsetof(struct bpf_sock_ops, sk):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+ struct bpf_sock_ops_kern,
+ is_fullsock),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern,
+ is_fullsock));
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
+ struct bpf_sock_ops_kern, sk),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern, sk));
+ break;
}
return insn - insn_buf;
}
diff --combined net/ipv4/devinet.c
index c5ebfa199794,137d1892395d..a4b5bd4d2c89
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@@ -62,11 -62,6 +62,11 @@@
#include <net/net_namespace.h>
#include <net/addrconf.h>
+#define IPV6ONLY_FLAGS \
+ (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
+ IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
+ IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
+
static struct ipv4_devconf ipv4_devconf = {
.data = {
[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
@@@ -195,7 -190,8 +195,8 @@@ static void rtmsg_ifa(int event, struc
static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
static BLOCKING_NOTIFIER_HEAD(inetaddr_validator_chain);
- static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ static void inet_del_ifa(struct in_device *in_dev,
+ struct in_ifaddr __rcu **ifap,
int destroy);
#ifdef CONFIG_SYSCTL
static int devinet_sysctl_register(struct in_device *idev);
@@@ -301,8 -297,8 +302,8 @@@ static void in_dev_rcu_put(struct rcu_h
static void inetdev_destroy(struct in_device *in_dev)
{
- struct in_ifaddr *ifa;
struct net_device *dev;
+ struct in_ifaddr *ifa;
ASSERT_RTNL();
@@@ -312,7 -308,7 +313,7 @@@
ip_mc_destroy_dev(in_dev);
- while ((ifa = in_dev->ifa_list) != NULL) {
+ while ((ifa = rtnl_dereference(in_dev->ifa_list)) != NULL) {
inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
inet_free_ifa(ifa);
}
@@@ -328,30 -324,35 +329,35 @@@
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
{
+ const struct in_ifaddr *ifa;
+
rcu_read_lock();
- for_primary_ifa(in_dev) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
if (inet_ifa_match(a, ifa)) {
if (!b || inet_ifa_match(b, ifa)) {
rcu_read_unlock();
return 1;
}
}
- } endfor_ifa(in_dev);
+ }
rcu_read_unlock();
return 0;
}
- static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
- int destroy, struct nlmsghdr *nlh, u32 portid)
+ static void __inet_del_ifa(struct in_device *in_dev,
+ struct in_ifaddr __rcu **ifap,
+ int destroy, struct nlmsghdr *nlh, u32 portid)
{
struct in_ifaddr *promote = NULL;
- struct in_ifaddr *ifa, *ifa1 = *ifap;
- struct in_ifaddr *last_prim = in_dev->ifa_list;
+ struct in_ifaddr *ifa, *ifa1;
+ struct in_ifaddr *last_prim;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
ASSERT_RTNL();
+ ifa1 = rtnl_dereference(*ifap);
+ last_prim = rtnl_dereference(in_dev->ifa_list);
if (in_dev->dead)
goto no_promotions;
@@@ -360,9 -361,9 +366,9 @@@
**/
if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
- struct in_ifaddr **ifap1 = &ifa1->ifa_next;
+ struct in_ifaddr __rcu **ifap1 = &ifa1->ifa_next;
- while ((ifa = *ifap1) != NULL) {
+ while ((ifa = rtnl_dereference(*ifap1)) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
last_prim = ifa;
@@@ -395,7 -396,7 +401,7 @@@
* and later to add them back with new prefsrc. Do this
* while all addresses are on the device list.
*/
- for (ifa = promote; ifa; ifa = ifa->ifa_next) {
+ for (ifa = promote; ifa; ifa = rtnl_dereference(ifa->ifa_next)) {
if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa))
fib_del_ifaddr(ifa, ifa1);
@@@ -421,19 -422,25 +427,25 @@@ no_promotions
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
if (promote) {
- struct in_ifaddr *next_sec = promote->ifa_next;
+ struct in_ifaddr *next_sec;
+ next_sec = rtnl_dereference(promote->ifa_next);
if (prev_prom) {
- prev_prom->ifa_next = promote->ifa_next;
- promote->ifa_next = last_prim->ifa_next;
- last_prim->ifa_next = promote;
+ struct in_ifaddr *last_sec;
+
+ rcu_assign_pointer(prev_prom->ifa_next, next_sec);
+
+ last_sec = rtnl_dereference(last_prim->ifa_next);
+ rcu_assign_pointer(promote->ifa_next, last_sec);
+ rcu_assign_pointer(last_prim->ifa_next, promote);
}
promote->ifa_flags &= ~IFA_F_SECONDARY;
rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain,
NETDEV_UP, promote);
- for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
+ for (ifa = next_sec; ifa;
+ ifa = rtnl_dereference(ifa->ifa_next)) {
if (ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa))
continue;
@@@ -445,7 -452,8 +457,8 @@@
inet_free_ifa(ifa1);
}
- static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ static void inet_del_ifa(struct in_device *in_dev,
+ struct in_ifaddr __rcu **ifap,
int destroy)
{
__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
@@@ -458,9 -466,10 +471,10 @@@ static DECLARE_DELAYED_WORK(check_lifet
static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
u32 portid, struct netlink_ext_ack *extack)
{
+ struct in_ifaddr __rcu **last_primary, **ifap;
struct in_device *in_dev = ifa->ifa_dev;
- struct in_ifaddr *ifa1, **ifap, **last_primary;
struct in_validator_info ivi;
+ struct in_ifaddr *ifa1;
int ret;
ASSERT_RTNL();
@@@ -473,11 -482,10 +487,13 @@@
ifa->ifa_flags &= ~IFA_F_SECONDARY;
last_primary = &in_dev->ifa_list;
+ /* Don't set IPv6 only flags to IPv4 addresses */
+ ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
+
- for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
- ifap = &ifa1->ifa_next) {
+ ifap = &in_dev->ifa_list;
+ ifa1 = rtnl_dereference(*ifap);
+
+ while (ifa1) {
if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
ifa->ifa_scope <= ifa1->ifa_scope)
last_primary = &ifa1->ifa_next;
@@@ -493,6 -501,9 +509,9 @@@
}
ifa->ifa_flags |= IFA_F_SECONDARY;
}
+
+ ifap = &ifa1->ifa_next;
+ ifa1 = rtnl_dereference(*ifap);
}
/* Allow any devices that wish to register ifaddr validtors to weigh
@@@ -518,8 -529,8 +537,8 @@@
ifap = last_primary;
}
- ifa->ifa_next = *ifap;
- *ifap = ifa;
+ rcu_assign_pointer(ifa->ifa_next, *ifap);
+ rcu_assign_pointer(*ifap, ifa);
inet_hash_insert(dev_net(in_dev->dev), ifa);
@@@ -584,12 -595,14 +603,14 @@@ EXPORT_SYMBOL(inetdev_by_index)
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
__be32 mask)
{
+ struct in_ifaddr *ifa;
+
ASSERT_RTNL();
- for_primary_ifa(in_dev) {
+ in_dev_for_each_ifa_rtnl(ifa, in_dev) {
if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
return ifa;
- } endfor_ifa(in_dev);
+ }
return NULL;
}
@@@ -617,10 -630,12 +638,12 @@@ static int inet_rtm_deladdr(struct sk_b
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
+ struct in_ifaddr __rcu **ifap;
struct nlattr *tb[IFA_MAX+1];
struct in_device *in_dev;
struct ifaddrmsg *ifm;
- struct in_ifaddr *ifa, **ifap;
+ struct in_ifaddr *ifa;
+
int err = -EINVAL;
ASSERT_RTNL();
@@@ -637,7 -652,7 +660,7 @@@
goto errout;
}
- for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+ for (ifap = &in_dev->ifa_list; (ifa = rtnl_dereference(*ifap)) != NULL;
ifap = &ifa->ifa_next) {
if (tb[IFA_LOCAL] &&
ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
@@@ -725,15 -740,19 +748,19 @@@ static void check_lifetime(struct work_
if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
age >= ifa->ifa_valid_lft) {
- struct in_ifaddr **ifap;
+ struct in_ifaddr __rcu **ifap;
+ struct in_ifaddr *tmp;
- for (ifap = &ifa->ifa_dev->ifa_list;
- *ifap != NULL; ifap = &(*ifap)->ifa_next) {
- if (*ifap == ifa) {
+ ifap = &ifa->ifa_dev->ifa_list;
+ tmp = rtnl_dereference(*ifap);
+ while (tmp) {
+ if (tmp == ifa) {
inet_del_ifa(ifa->ifa_dev,
ifap, 1);
break;
}
+ ifap = &tmp->ifa_next;
+ tmp = rtnl_dereference(*ifap);
}
} else if (ifa->ifa_preferred_lft !=
INFINITY_LIFE_TIME &&
@@@ -877,13 -896,12 +904,12 @@@ errout
static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
{
struct in_device *in_dev = ifa->ifa_dev;
- struct in_ifaddr *ifa1, **ifap;
+ struct in_ifaddr *ifa1;
if (!ifa->ifa_local)
return NULL;
- for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
- ifap = &ifa1->ifa_next) {
+ in_dev_for_each_ifa_rtnl(ifa1, in_dev) {
if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa) &&
ifa1->ifa_local == ifa->ifa_local)
@@@ -978,8 -996,8 +1004,8 @@@ int devinet_ioctl(struct net *net, unsi
{
struct sockaddr_in sin_orig;
struct sockaddr_in *sin = (struct sockaddr_in *)&ifr->ifr_addr;
+ struct in_ifaddr __rcu **ifap = NULL;
struct in_device *in_dev;
- struct in_ifaddr **ifap = NULL;
struct in_ifaddr *ifa = NULL;
struct net_device *dev;
char *colon;
@@@ -1050,7 -1068,9 +1076,9 @@@
/* note: we only do this for a limited set of ioctls
and only if the original address family was AF_INET.
This is checked above. */
- for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+
+ for (ifap = &in_dev->ifa_list;
+ (ifa = rtnl_dereference(*ifap)) != NULL;
ifap = &ifa->ifa_next) {
if (!strcmp(ifr->ifr_name, ifa->ifa_label) &&
sin_orig.sin_addr.s_addr ==
@@@ -1063,7 -1083,8 +1091,8 @@@
4.3BSD-style and passed in junk so we fall back to
comparing just the label */
if (!ifa) {
- for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+ for (ifap = &in_dev->ifa_list;
+ (ifa = rtnl_dereference(*ifap)) != NULL;
ifap = &ifa->ifa_next)
if (!strcmp(ifr->ifr_name, ifa->ifa_label))
break;
@@@ -1212,7 -1233,7 +1241,7 @@@ out
static int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size)
{
struct in_device *in_dev = __in_dev_get_rtnl(dev);
- struct in_ifaddr *ifa;
+ const struct in_ifaddr *ifa;
struct ifreq ifr;
int done = 0;
@@@ -1222,7 -1243,7 +1251,7 @@@
if (!in_dev)
goto out;
- for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+ in_dev_for_each_ifa_rtnl(ifa, in_dev) {
if (!buf) {
done += size;
continue;
@@@ -1250,18 -1271,24 +1279,24 @@@ out
static __be32 in_dev_select_addr(const struct in_device *in_dev,
int scope)
{
- for_primary_ifa(in_dev) {
+ const struct in_ifaddr *ifa;
+
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY)
+ continue;
if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope)
return ifa->ifa_local;
- } endfor_ifa(in_dev);
+ }
return 0;
}
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
{
+ const struct in_ifaddr *ifa;
__be32 addr = 0;
+ unsigned char localnet_scope = RT_SCOPE_HOST;
struct in_device *in_dev;
struct net *net = dev_net(dev);
int master_idx;
@@@ -1271,8 -1298,13 +1306,13 @@@
if (!in_dev)
goto no_in_dev;
- for_primary_ifa(in_dev) {
- if (ifa->ifa_scope > scope)
+ if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
+ localnet_scope = RT_SCOPE_LINK;
+
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY)
+ continue;
+ if (min(ifa->ifa_scope, localnet_scope) > scope)
continue;
if (!dst || inet_ifa_match(dst, ifa)) {
addr = ifa->ifa_local;
@@@ -1280,7 -1312,7 +1320,7 @@@
}
if (!addr)
addr = ifa->ifa_local;
- } endfor_ifa(in_dev);
+ }
if (addr)
goto out_unlock;
@@@ -1325,13 -1357,20 +1365,20 @@@ EXPORT_SYMBOL(inet_select_addr)
static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
__be32 local, int scope)
{
- int same = 0;
+ unsigned char localnet_scope = RT_SCOPE_HOST;
+ const struct in_ifaddr *ifa;
__be32 addr = 0;
+ int same = 0;
+
+ if (unlikely(IN_DEV_ROUTE_LOCALNET(in_dev)))
+ localnet_scope = RT_SCOPE_LINK;
+
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
+ unsigned char min_scope = min(ifa->ifa_scope, localnet_scope);
- for_ifa(in_dev) {
if (!addr &&
(local == ifa->ifa_local || !local) &&
- ifa->ifa_scope <= scope) {
+ min_scope <= scope) {
addr = ifa->ifa_local;
if (same)
break;
@@@ -1346,7 -1385,7 +1393,7 @@@
if (inet_ifa_match(addr, ifa))
break;
/* No, then can we use new local src? */
- if (ifa->ifa_scope <= scope) {
+ if (min_scope <= scope) {
addr = ifa->ifa_local;
break;
}
@@@ -1354,7 -1393,7 +1401,7 @@@
same = 0;
}
}
- } endfor_ifa(in_dev);
+ }
return same ? addr : 0;
}
@@@ -1428,7 -1467,7 +1475,7 @@@ static void inetdev_changename(struct n
struct in_ifaddr *ifa;
int named = 0;
- for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+ in_dev_for_each_ifa_rtnl(ifa, in_dev) {
char old[IFNAMSIZ], *dot;
memcpy(old, ifa->ifa_label, IFNAMSIZ);
@@@ -1458,10 -1497,9 +1505,9 @@@ static void inetdev_send_gratuitous_arp
struct in_device *in_dev)
{
- struct in_ifaddr *ifa;
+ const struct in_ifaddr *ifa;
- for (ifa = in_dev->ifa_list; ifa;
- ifa = ifa->ifa_next) {
+ in_dev_for_each_ifa_rtnl(ifa, in_dev) {
arp_send(ARPOP_REQUEST, ETH_P_ARP,
ifa->ifa_local, dev,
ifa->ifa_local, NULL,
@@@ -1731,15 -1769,17 +1777,17 @@@ static int in_dev_dump_addr(struct in_d
int ip_idx = 0;
int err;
- for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next, ip_idx++) {
- if (ip_idx < s_ip_idx)
+ in_dev_for_each_ifa_rtnl(ifa, in_dev) {
+ if (ip_idx < s_ip_idx) {
+ ip_idx++;
continue;
-
+ }
err = inet_fill_ifaddr(skb, ifa, fillargs);
if (err < 0)
goto done;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+ ip_idx++;
}
err = 0;
diff --combined net/ipv4/igmp.c
index 85107bf812f2,9a206931a342..180f6896b98b
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@@ -332,14 -332,15 +332,15 @@@ static __be32 igmpv3_get_srcaddr(struc
const struct flowi4 *fl4)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
+ const struct in_ifaddr *ifa;
if (!in_dev)
return htonl(INADDR_ANY);
- for_ifa(in_dev) {
+ in_dev_for_each_ifa_rcu(ifa, in_dev) {
if (fl4->saddr == ifa->ifa_local)
return fl4->saddr;
- } endfor_ifa(in_dev);
+ }
return htonl(INADDR_ANY);
}
@@@ -1228,8 -1229,12 +1229,8 @@@ static void igmpv3_del_delrec(struct in
if (pmc) {
im->interface = pmc->interface;
if (im->sfmode == MCAST_INCLUDE) {
- im->tomb = pmc->tomb;
- pmc->tomb = NULL;
-
- im->sources = pmc->sources;
- pmc->sources = NULL;
-
+ swap(im->tomb, pmc->tomb);
+ swap(im->sources, pmc->sources);
for (psf = im->sources; psf; psf = psf->sf_next)
psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
} else {
diff --combined net/ipv4/route.c
index b2b35b38724d,dc1f510a7c81..69a289f8de4f
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@@ -95,6 -95,7 +95,7 @@@
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
+ #include <net/nexthop.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
@@@ -447,7 -448,7 +448,7 @@@ static struct neighbour *ipv4_neigh_loo
n = ip_neigh_gw4(dev, pkey);
}
- if (n && !refcount_inc_not_zero(&n->refcnt))
+ if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
n = NULL;
rcu_read_unlock_bh();
@@@ -1531,7 -1532,6 +1532,6 @@@ static void ipv4_dst_destroy(struct dst
void rt_flush_dev(struct net_device *dev)
{
- struct net *net = dev_net(dev);
struct rtable *rt;
int cpu;
@@@ -1542,7 -1542,7 +1542,7 @@@
list_for_each_entry(rt, &ul->head, rt_uncached) {
if (rt->dst.dev != dev)
continue;
- rt->dst.dev = net->loopback_dev;
+ rt->dst.dev = blackhole_netdev;
dev_hold(rt->dst.dev);
dev_put(dev);
}
@@@ -1580,7 -1580,7 +1580,7 @@@ static void rt_set_nexthop(struct rtabl
ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
#ifdef CONFIG_IP_ROUTE_CLASSID
- {
+ if (nhc->nhc_family == AF_INET) {
struct fib_nh *nh;
nh = container_of(nhc, struct fib_nh, nh_common);
@@@ -1962,6 -1962,23 +1962,23 @@@ int fib_multipath_hash(const struct ne
hash_keys.basic.ip_proto = fl4->flowi4_proto;
}
break;
+ case 2:
+ memset(&hash_keys, 0, sizeof(hash_keys));
+ hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ /* skb is currently provided only when forwarding */
+ if (skb) {
+ struct flow_keys keys;
+
+ skb_flow_dissect_flow_keys(skb, &keys, 0);
+
+ hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
+ hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
+ } else {
+ /* Same as case 0 */
+ hash_keys.addrs.v4addrs.src = fl4->saddr;
+ hash_keys.addrs.v4addrs.dst = fl4->daddr;
+ }
+ break;
}
mhash = flow_hash_from_keys(&hash_keys);
@@@ -1979,7 -1996,7 +1996,7 @@@ static int ip_mkroute_input(struct sk_b
struct flow_keys *hkeys)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res->fi && res->fi->fib_nhs > 1) {
+ if (res->fi && fib_info_num_path(res->fi) > 1) {
int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
fib_select_multipath(res, h);
@@@ -2714,7 -2731,7 +2731,7 @@@ static int rt_fill_info(struct net *net
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
- r->rtm_tos = fl4->flowi4_tos;
+ r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table_id))
goto nla_put_failure;
@@@ -2742,7 -2759,7 +2759,7 @@@
nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
goto nla_put_failure;
#endif
- if (!rt_is_input_route(rt) &&
+ if (fl4 && !rt_is_input_route(rt) &&
fl4->saddr != src) {
if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
goto nla_put_failure;
@@@ -2782,36 -2799,40 +2799,40 @@@
if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
- if (fl4->flowi4_mark &&
- nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
- goto nla_put_failure;
-
- if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
- nla_put_u32(skb, RTA_UID,
- from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
- goto nla_put_failure;
+ if (fl4) {
+ if (fl4->flowi4_mark &&
+ nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
+ goto nla_put_failure;
- error = rt->dst.error;
+ if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
+ nla_put_u32(skb, RTA_UID,
+ from_kuid_munged(current_user_ns(),
+ fl4->flowi4_uid)))
+ goto nla_put_failure;
- if (rt_is_input_route(rt)) {
+ if (rt_is_input_route(rt)) {
#ifdef CONFIG_IP_MROUTE
- if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
- int err = ipmr_get_route(net, skb,
- fl4->saddr, fl4->daddr,
- r, portid);
-
- if (err <= 0) {
- if (err == 0)
- return 0;
- goto nla_put_failure;
- }
- } else
+ if (ipv4_is_multicast(dst) &&
+ !ipv4_is_local_multicast(dst) &&
+ IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
+ int err = ipmr_get_route(net, skb,
+ fl4->saddr, fl4->daddr,
+ r, portid);
+
+ if (err <= 0) {
+ if (err == 0)
+ return 0;
+ goto nla_put_failure;
+ }
+ } else
#endif
- if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
- goto nla_put_failure;
+ if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
+ goto nla_put_failure;
+ }
}
+ error = rt->dst.error;
+
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
@@@ -2823,6 -2844,80 +2844,80 @@@ nla_put_failure
return -EMSGSIZE;
}
+ static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
+ struct netlink_callback *cb, u32 table_id,
+ struct fnhe_hash_bucket *bucket, int genid,
+ int *fa_index, int fa_start)
+ {
+ int i;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
+ fnhe = rcu_dereference(fnhe->fnhe_next)) {
+ struct rtable *rt;
+ int err;
+
+ if (*fa_index < fa_start)
+ goto next;
+
+ if (fnhe->fnhe_genid != genid)
+ goto next;
+
+ if (fnhe->fnhe_expires &&
+ time_after(jiffies, fnhe->fnhe_expires))
+ goto next;
+
+ rt = rcu_dereference(fnhe->fnhe_rth_input);
+ if (!rt)
+ rt = rcu_dereference(fnhe->fnhe_rth_output);
+ if (!rt)
+ goto next;
+
+ err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
+ table_id, NULL, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq);
+ if (err)
+ return err;
+ next:
+ (*fa_index)++;
+ }
+ }
+
+ return 0;
+ }
+
+ int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
+ u32 table_id, struct fib_info *fi,
+ int *fa_index, int fa_start)
+ {
+ struct net *net = sock_net(cb->skb->sk);
+ int nhsel, genid = fnhe_genid(net);
+
+ for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
+ struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
+ struct fnhe_hash_bucket *bucket;
+ int err;
+
+ if (nhc->nhc_flags & RTNH_F_DEAD)
+ continue;
+
+ rcu_read_lock();
+ bucket = rcu_dereference(nhc->nhc_exceptions);
+ err = 0;
+ if (bucket)
+ err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
+ genid, fa_index, fa_start);
+ rcu_read_unlock();
+ if (err)
+ return err;
+ }
+
+ return 0;
+ }
+
static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
u8 ip_proto, __be16 sport,
__be16 dport)
@@@ -3230,9 -3325,11 +3325,11 @@@ static struct ctl_table ipv4_route_tabl
{ }
};
+ static const char ipv4_route_flush_procname[] = "flush";
+
static struct ctl_table ipv4_route_flush_table[] = {
{
- .procname = "flush",
+ .procname = ipv4_route_flush_procname,
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
@@@ -3250,9 -3347,11 +3347,11 @@@ static __net_init int sysctl_route_net_
if (!tbl)
goto err_dup;
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- tbl[0].procname = NULL;
+ /* Don't export non-whitelisted sysctls to unprivileged users */
+ if (net->user_ns != &init_user_ns) {
+ if (tbl[0].procname != ipv4_route_flush_procname)
+ tbl[0].procname = NULL;
+ }
}
tbl[0].extra1 = net;
diff --combined net/key/af_key.c
index fe5fc4bab7ee,39b3d95094eb..b67ed3a8486c
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@@ -928,8 -928,7 +928,7 @@@ static struct sk_buff *__pfkey_xfrm_sta
pfkey_sockaddr_fill(&x->props.saddr, 0,
(struct sockaddr *) (addr + 1),
x->props.family);
- if (!addr->sadb_address_prefixlen)
- BUG();
+ BUG_ON(!addr->sadb_address_prefixlen);
/* dst address */
addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size);
@@@ -944,8 -943,7 +943,7 @@@
pfkey_sockaddr_fill(&x->id.daddr, 0,
(struct sockaddr *) (addr + 1),
x->props.family);
- if (!addr->sadb_address_prefixlen)
- BUG();
+ BUG_ON(!addr->sadb_address_prefixlen);
if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr,
x->props.family)) {
@@@ -2438,10 -2436,8 +2436,10 @@@ static int key_pol_get_resp(struct soc
goto out;
}
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
goto out;
+ }
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = hdr->sadb_msg_version;
@@@ -2692,10 -2688,8 +2690,10 @@@ static int dump_sp(struct xfrm_policy *
return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
- if (err < 0)
+ if (err < 0) {
+ kfree_skb(out_skb);
return err;
+ }
out_hdr = (struct sadb_msg *) out_skb->data;
out_hdr->sadb_msg_version = pfk->dump.msg_version;
diff --combined net/netfilter/ipvs/ip_vs_core.c
index d5103a9eb302,e8651fd621ef..f662f198b458
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@@ -34,6 -34,7 +34,7 @@@
#include <net/tcp.h>
#include <net/udp.h>
#include <net/icmp.h> /* for icmp_send */
+ #include <net/gue.h>
#include <net/route.h>
#include <net/ip6_checksum.h>
#include <net/netns/generic.h> /* net_generic() */
@@@ -892,7 -893,7 +893,7 @@@ static int handle_response_icmp(int af
if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol ||
IPPROTO_SCTP == protocol)
offset += 2 * sizeof(__u16);
- if (!skb_make_writable(skb, offset))
+ if (skb_ensure_writable(skb, offset))
goto out;
#ifdef CONFIG_IP_VS_IPV6
@@@ -1282,7 -1283,7 +1283,7 @@@ handle_response(int af, struct sk_buff
IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
- if (!skb_make_writable(skb, iph->len))
+ if (skb_ensure_writable(skb, iph->len))
goto drop;
/* mangle the packet */
@@@ -1574,6 -1575,41 +1575,41 @@@ ip_vs_try_to_schedule(struct netns_ipv
return 1;
}
+ /* Check the UDP tunnel and return its header length */
+ static int ipvs_udp_decap(struct netns_ipvs *ipvs, struct sk_buff *skb,
+ unsigned int offset, __u16 af,
+ const union nf_inet_addr *daddr, __u8 *proto)
+ {
+ struct udphdr _udph, *udph;
+ struct ip_vs_dest *dest;
+
+ udph = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (!udph)
+ goto unk;
+ offset += sizeof(struct udphdr);
+ dest = ip_vs_find_tunnel(ipvs, af, daddr, udph->dest);
+ if (!dest)
+ goto unk;
+ if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) {
+ struct guehdr _gueh, *gueh;
+
+ gueh = skb_header_pointer(skb, offset, sizeof(_gueh), &_gueh);
+ if (!gueh)
+ goto unk;
+ if (gueh->control != 0 || gueh->version != 0)
+ goto unk;
+ /* Later we can support also IPPROTO_IPV6 */
+ if (gueh->proto_ctype != IPPROTO_IPIP)
+ goto unk;
+ *proto = gueh->proto_ctype;
+ return sizeof(struct udphdr) + sizeof(struct guehdr) +
+ (gueh->hlen << 2);
+ }
+
+ unk:
+ return 0;
+ }
+
/*
* Handle ICMP messages in the outside-to-inside direction (incoming).
* Find any that might be relevant, check against existing connections,
@@@ -1593,6 -1629,7 +1629,7 @@@ ip_vs_in_icmp(struct netns_ipvs *ipvs,
struct ip_vs_proto_data *pd;
unsigned int offset, offset2, ihl, verdict;
bool ipip, new_cp = false;
+ union nf_inet_addr *raddr;
*related = 1;
@@@ -1631,20 -1668,51 +1668,51 @@@
cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
+ raddr = (union nf_inet_addr *)&cih->daddr;
/* Special case for errors for IPIP packets */
ipip = false;
if (cih->protocol == IPPROTO_IPIP) {
+ struct ip_vs_dest *dest;
+
if (unlikely(cih->frag_off & htons(IP_OFFSET)))
return NF_ACCEPT;
/* Error for our IPIP must arrive at LOCAL_IN */
if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL))
return NF_ACCEPT;
+ dest = ip_vs_find_tunnel(ipvs, AF_INET, raddr, 0);
+ /* Only for known tunnel */
+ if (!dest || dest->tun_type != IP_VS_CONN_F_TUNNEL_TYPE_IPIP)
+ return NF_ACCEPT;
offset += cih->ihl * 4;
cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph);
if (cih == NULL)
return NF_ACCEPT; /* The packet looks wrong, ignore */
ipip = true;
+ } else if (cih->protocol == IPPROTO_UDP && /* Can be UDP encap */
+ /* Error for our tunnel must arrive at LOCAL_IN */
+ (skb_rtable(skb)->rt_flags & RTCF_LOCAL)) {
+ __u8 iproto;
+ int ulen;
+
+ /* Non-first fragment has no UDP header */
+ if (unlikely(cih->frag_off & htons(IP_OFFSET)))
+ return NF_ACCEPT;
+ offset2 = offset + cih->ihl * 4;
+ ulen = ipvs_udp_decap(ipvs, skb, offset2, AF_INET, raddr,
+ &iproto);
+ if (ulen > 0) {
+ /* Skip IP and UDP tunnel headers */
+ offset = offset2 + ulen;
+ /* Now we should be at the original IP header */
+ cih = skb_header_pointer(skb, offset, sizeof(_ciph),
+ &_ciph);
+ if (cih && cih->version == 4 && cih->ihl >= 5 &&
+ iproto == IPPROTO_IPIP)
+ ipip = true;
+ else
+ return NF_ACCEPT;
+ }
}
pd = ip_vs_proto_data_get(ipvs, cih->protocol);
@@@ -2245,6 -2313,7 +2313,6 @@@ static const struct nf_hook_ops ip_vs_o
static int __net_init __ip_vs_init(struct net *net)
{
struct netns_ipvs *ipvs;
- int ret;
ipvs = net_generic(net, ip_vs_net_id);
if (ipvs == NULL)
@@@ -2276,11 -2345,17 +2344,11 @@@
if (ip_vs_sync_net_init(ipvs) < 0)
goto sync_fail;
- ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
- if (ret < 0)
- goto hook_fail;
-
return 0;
/*
* Error handling
*/
-hook_fail:
- ip_vs_sync_net_cleanup(ipvs);
sync_fail:
ip_vs_conn_net_cleanup(ipvs);
conn_fail:
@@@ -2310,19 -2385,6 +2378,19 @@@ static void __net_exit __ip_vs_cleanup(
net->ipvs = NULL;
}
+static int __net_init __ip_vs_dev_init(struct net *net)
+{
+ int ret;
+
+ ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+ if (ret < 0)
+ goto hook_fail;
+ return 0;
+
+hook_fail:
+ return ret;
+}
+
static void __net_exit __ip_vs_dev_cleanup(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);
@@@ -2342,7 -2404,6 +2410,7 @@@ static struct pernet_operations ipvs_co
};
static struct pernet_operations ipvs_core_dev_ops = {
+ .init = __ip_vs_dev_init,
.exit = __ip_vs_dev_cleanup,
};
diff --combined net/netfilter/ipvs/ip_vs_ctl.c
index 741d91aa4a8d,84384d896e29..a8abba7e3d2d
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@@ -510,15 -510,36 +510,36 @@@ static inline unsigned int ip_vs_rs_has
static void ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
{
unsigned int hash;
+ __be16 port;
if (dest->in_rs_table)
return;
+ switch (IP_VS_DFWD_METHOD(dest)) {
+ case IP_VS_CONN_F_MASQ:
+ port = dest->port;
+ break;
+ case IP_VS_CONN_F_TUNNEL:
+ switch (dest->tun_type) {
+ case IP_VS_CONN_F_TUNNEL_TYPE_GUE:
+ port = dest->tun_port;
+ break;
+ case IP_VS_CONN_F_TUNNEL_TYPE_IPIP:
+ port = 0;
+ break;
+ default:
+ return;
+ }
+ break;
+ default:
+ return;
+ }
+
/*
* Hash by proto,addr,port,
* which are the parameters of the real service.
*/
- hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
+ hash = ip_vs_rs_hashkey(dest->af, &dest->addr, port);
hlist_add_head_rcu(&dest->d_list, &ipvs->rs_table[hash]);
dest->in_rs_table = 1;
@@@ -550,7 -571,8 +571,8 @@@ bool ip_vs_has_real_service(struct netn
if (dest->port == dport &&
dest->af == af &&
ip_vs_addr_equal(af, &dest->addr, daddr) &&
- (dest->protocol == protocol || dest->vfwmark)) {
+ (dest->protocol == protocol || dest->vfwmark) &&
+ IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) {
/* HIT */
return true;
}
@@@ -580,7 -602,37 +602,37 @@@ struct ip_vs_dest *ip_vs_find_real_serv
if (dest->port == dport &&
dest->af == af &&
ip_vs_addr_equal(af, &dest->addr, daddr) &&
- (dest->protocol == protocol || dest->vfwmark)) {
+ (dest->protocol == protocol || dest->vfwmark) &&
+ IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) {
+ /* HIT */
+ return dest;
+ }
+ }
+
+ return NULL;
+ }
+
+ /* Find real service record by <af,addr,tun_port>.
+ * In case of multiple records with the same <af,addr,tun_port>, only
+ * the first found record is returned.
+ *
+ * To be called under RCU lock.
+ */
+ struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af,
+ const union nf_inet_addr *daddr,
+ __be16 tun_port)
+ {
+ struct ip_vs_dest *dest;
+ unsigned int hash;
+
+ /* Check for "full" addressed entries */
+ hash = ip_vs_rs_hashkey(af, daddr, tun_port);
+
+ hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
+ if (dest->tun_port == tun_port &&
+ dest->af == af &&
+ ip_vs_addr_equal(af, &dest->addr, daddr) &&
+ IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_TUNNEL) {
/* HIT */
return dest;
}
@@@ -826,24 -878,29 +878,29 @@@ __ip_vs_update_dest(struct ip_vs_servic
conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
conn_flags |= IP_VS_CONN_F_INACTIVE;
+ /* Need to rehash? */
+ if ((udest->conn_flags & IP_VS_CONN_F_FWD_MASK) !=
+ IP_VS_DFWD_METHOD(dest) ||
+ udest->tun_type != dest->tun_type ||
+ udest->tun_port != dest->tun_port)
+ ip_vs_rs_unhash(dest);
+
/* set the tunnel info */
dest->tun_type = udest->tun_type;
dest->tun_port = udest->tun_port;
+ dest->tun_flags = udest->tun_flags;
/* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */
if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) {
conn_flags |= IP_VS_CONN_F_NOOUTPUT;
} else {
- /*
- * Put the real service in rs_table if not present.
- * For now only for NAT!
- */
- ip_vs_rs_hash(ipvs, dest);
/* FTP-NAT requires conntrack for mangling */
if (svc->port == FTPPORT)
ip_vs_register_conntrack(svc);
}
atomic_set(&dest->conn_flags, conn_flags);
+ /* Put the real service in rs_table if not present. */
+ ip_vs_rs_hash(ipvs, dest);
/* bind the service */
old_svc = rcu_dereference_protected(dest->svc, 1);
@@@ -2396,7 -2453,9 +2453,7 @@@ do_ip_vs_set_ctl(struct sock *sk, int c
cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state);
} else {
- mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state);
- mutex_unlock(&ipvs->sync_mutex);
}
goto out_dec;
}
@@@ -2904,6 -2963,7 +2961,7 @@@ static const struct nla_policy ip_vs_de
[IPVS_DEST_ATTR_ADDR_FAMILY] = { .type = NLA_U16 },
[IPVS_DEST_ATTR_TUN_TYPE] = { .type = NLA_U8 },
[IPVS_DEST_ATTR_TUN_PORT] = { .type = NLA_U16 },
+ [IPVS_DEST_ATTR_TUN_FLAGS] = { .type = NLA_U16 },
};
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
@@@ -3210,6 -3270,8 +3268,8 @@@ static int ip_vs_genl_fill_dest(struct
dest->tun_type) ||
nla_put_be16(skb, IPVS_DEST_ATTR_TUN_PORT,
dest->tun_port) ||
+ nla_put_u16(skb, IPVS_DEST_ATTR_TUN_FLAGS,
+ dest->tun_flags) ||
nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
@@@ -3330,7 -3392,8 +3390,8 @@@ static int ip_vs_genl_parse_dest(struc
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
- *nla_l_thresh, *nla_tun_type, *nla_tun_port;
+ *nla_l_thresh, *nla_tun_type, *nla_tun_port,
+ *nla_tun_flags;
nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD];
nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT];
@@@ -3338,6 -3401,7 +3399,7 @@@
nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH];
nla_tun_type = attrs[IPVS_DEST_ATTR_TUN_TYPE];
nla_tun_port = attrs[IPVS_DEST_ATTR_TUN_PORT];
+ nla_tun_flags = attrs[IPVS_DEST_ATTR_TUN_FLAGS];
if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh))
return -EINVAL;
@@@ -3353,6 -3417,9 +3415,9 @@@
if (nla_tun_port)
udest->tun_port = nla_get_be16(nla_tun_port);
+
+ if (nla_tun_flags)
+ udest->tun_flags = nla_get_u16(nla_tun_flags);
}
return 0;
@@@ -3513,8 -3580,10 +3578,8 @@@ static int ip_vs_genl_del_daemon(struc
if (!attrs[IPVS_DAEMON_ATTR_STATE])
return -EINVAL;
- mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
- mutex_unlock(&ipvs->sync_mutex);
return ret;
}
diff --combined net/netfilter/nf_nat_proto.c
index 83a24cc5753b,888292e8fbb2..7ac733ebd060
--- a/net/netfilter/nf_nat_proto.c
+++ b/net/netfilter/nf_nat_proto.c
@@@ -70,7 -70,7 +70,7 @@@ static bool udp_manip_pkt(struct sk_buf
struct udphdr *hdr;
bool do_csum;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct udphdr *)(skb->data + hdroff);
@@@ -88,7 -88,7 +88,7 @@@ static bool udplite_manip_pkt(struct sk
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
struct udphdr *hdr;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct udphdr *)(skb->data + hdroff);
@@@ -114,7 -114,7 +114,7 @@@ sctp_manip_pkt(struct sk_buff *skb
if (skb->len >= hdroff + sizeof(*hdr))
hdrsize = sizeof(*hdr);
- if (!skb_make_writable(skb, hdroff + hdrsize))
+ if (skb_ensure_writable(skb, hdroff + hdrsize))
return false;
hdr = (struct sctphdr *)(skb->data + hdroff);
@@@ -155,7 -155,7 +155,7 @@@ tcp_manip_pkt(struct sk_buff *skb
if (skb->len >= hdroff + sizeof(struct tcphdr))
hdrsize = sizeof(struct tcphdr);
- if (!skb_make_writable(skb, hdroff + hdrsize))
+ if (skb_ensure_writable(skb, hdroff + hdrsize))
return false;
hdr = (struct tcphdr *)(skb->data + hdroff);
@@@ -195,7 -195,7 +195,7 @@@ dccp_manip_pkt(struct sk_buff *skb
if (skb->len >= hdroff + sizeof(struct dccp_hdr))
hdrsize = sizeof(struct dccp_hdr);
- if (!skb_make_writable(skb, hdroff + hdrsize))
+ if (skb_ensure_writable(skb, hdroff + hdrsize))
return false;
hdr = (struct dccp_hdr *)(skb->data + hdroff);
@@@ -229,7 -229,7 +229,7 @@@ icmp_manip_pkt(struct sk_buff *skb
{
struct icmphdr *hdr;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct icmphdr *)(skb->data + hdroff);
@@@ -247,7 -247,7 +247,7 @@@ icmpv6_manip_pkt(struct sk_buff *skb
{
struct icmp6hdr *hdr;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
return false;
hdr = (struct icmp6hdr *)(skb->data + hdroff);
@@@ -275,7 -275,7 +275,7 @@@ gre_manip_pkt(struct sk_buff *skb
/* pgreh includes two optional 32bit fields which are not required
* to be there. That's where the magic '8' comes from */
- if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8))
+ if (skb_ensure_writable(skb, hdroff + sizeof(*pgreh) - 8))
return false;
greh = (void *)skb->data + hdroff;
@@@ -347,7 -347,7 +347,7 @@@ static bool nf_nat_ipv4_manip_pkt(struc
struct iphdr *iph;
unsigned int hdroff;
- if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
+ if (skb_ensure_writable(skb, iphdroff + sizeof(*iph)))
return false;
iph = (void *)skb->data + iphdroff;
@@@ -378,7 -378,7 +378,7 @@@ static bool nf_nat_ipv6_manip_pkt(struc
int hdroff;
u8 nexthdr;
- if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h)))
+ if (skb_ensure_writable(skb, iphdroff + sizeof(*ipv6h)))
return false;
ipv6h = (void *)skb->data + iphdroff;
@@@ -562,9 -562,9 +562,9 @@@ int nf_nat_icmp_reply_translation(struc
WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
- if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+ if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
return 0;
- if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
+ if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP))
return 0;
inside = (void *)skb->data + hdrlen;
@@@ -784,7 -784,7 +784,7 @@@ int nf_nat_icmpv6_reply_translation(str
WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
- if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
+ if (skb_ensure_writable(skb, hdrlen + sizeof(*inside)))
return 0;
if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6))
return 0;
diff --combined net/sched/cls_flower.c
index fdeede3af72e,ce2e9b1c9850..5d4935b51e6f
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@@ -27,7 -27,7 +27,7 @@@
#include <net/dst_metadata.h>
struct fl_flow_key {
- int indev_ifindex;
+ struct flow_dissector_key_meta meta;
struct flow_dissector_key_control control;
struct flow_dissector_key_control enc_control;
struct flow_dissector_key_basic basic;
@@@ -284,7 -284,7 +284,7 @@@ static int fl_classify(struct sk_buff *
list_for_each_entry_rcu(mask, &head->masks, list) {
fl_clear_masked_range(&skb_key, mask);
- skb_key.indev_ifindex = skb->skb_iif;
+ skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
/* skb_flow_dissect() does not set n_proto in case an unknown
* protocol, so do it rather here.
*/
@@@ -524,6 -524,24 +524,6 @@@ static struct cls_fl_filter *__fl_get(s
return f;
}
-static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
- unsigned long *handle)
-{
- struct cls_fl_head *head = fl_head_dereference(tp);
- struct cls_fl_filter *f;
-
- rcu_read_lock();
- while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
- /* don't return filters that are being deleted */
- if (refcount_inc_not_zero(&f->refcnt))
- break;
- ++(*handle);
- }
- rcu_read_unlock();
-
- return f;
-}
-
static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
bool *last, bool rtnl_held,
struct netlink_ext_ack *extack)
@@@ -1003,15 -1021,14 +1003,14 @@@ static int fl_set_key(struct net *net,
{
__be16 ethertype;
int ret = 0;
- #ifdef CONFIG_NET_CLS_IND
+
if (tb[TCA_FLOWER_INDEV]) {
int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
if (err < 0)
return err;
- key->indev_ifindex = err;
- mask->indev_ifindex = 0xffffffff;
+ key->meta.ingress_ifindex = err;
+ mask->meta.ingress_ifindex = 0xffffffff;
}
- #endif
fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
@@@ -1264,6 -1281,8 +1263,8 @@@ static void fl_init_dissector(struct fl
struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
size_t cnt = 0;
+ FL_KEY_SET_IF_MASKED(mask, keys, cnt,
+ FLOW_DISSECTOR_KEY_META, meta);
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
@@@ -1673,25 -1692,20 +1674,25 @@@ static int fl_delete(struct tcf_proto *
static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
bool rtnl_held)
{
+ struct cls_fl_head *head = fl_head_dereference(tp);
+ unsigned long id = arg->cookie, tmp;
struct cls_fl_filter *f;
arg->count = arg->skip;
- while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
+ idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
+ /* don't return filters that are being deleted */
+ if (!refcount_inc_not_zero(&f->refcnt))
+ continue;
if (arg->fn(tp, f, arg) < 0) {
__fl_put(f);
arg->stop = 1;
break;
}
__fl_put(f);
- arg->cookie++;
arg->count++;
}
+ arg->cookie = id;
}
static struct cls_fl_filter *
@@@ -2110,10 -2124,10 +2111,10 @@@ static int fl_dump_key_enc_opt(struct s
static int fl_dump_key(struct sk_buff *skb, struct net *net,
struct fl_flow_key *key, struct fl_flow_key *mask)
{
- if (mask->indev_ifindex) {
+ if (mask->meta.ingress_ifindex) {
struct net_device *dev;
- dev = __dev_get_by_index(net, key->indev_ifindex);
+ dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
goto nla_put_failure;
}
diff --combined net/socket.c
index 92739450956b,d97b74f762e8..77b8f3d3448f
--- a/net/socket.c
+++ b/net/socket.c
@@@ -73,7 -73,6 +73,7 @@@
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
@@@ -104,13 -103,6 +104,6 @@@
#include <net/busy_poll.h>
#include <linux/errqueue.h>
- /* proto_ops for ipv4 and ipv6 use the same {recv,send}msg function */
- #if IS_ENABLED(CONFIG_INET)
- #define INDIRECT_CALL_INET4(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
- #else
- #define INDIRECT_CALL_INET4(f, f1, ...) f(__VA_ARGS__)
- #endif
-
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
@@@ -354,22 -346,19 +347,22 @@@ static const struct xattr_handler *sock
NULL
};
-static struct dentry *sockfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int sockfs_init_fs_context(struct fs_context *fc)
{
- return mount_pseudo_xattr(fs_type, "socket:", &sockfs_ops,
- sockfs_xattr_handlers,
- &sockfs_dentry_operations, SOCKFS_MAGIC);
+ struct pseudo_fs_context *ctx = init_pseudo(fc, SOCKFS_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->ops = &sockfs_ops;
+ ctx->dops = &sockfs_dentry_operations;
+ ctx->xattr = sockfs_xattr_handlers;
+ return 0;
}
static struct vfsmount *sock_mnt __read_mostly;
static struct file_system_type sock_fs_type = {
.name = "sockfs",
- .mount = sockfs_mount,
+ .init_fs_context = sockfs_init_fs_context,
.kill_sb = kill_anon_super,
};
@@@ -433,7 -422,7 +426,7 @@@ static int sock_map_fd(struct socket *s
}
newfile = sock_alloc_file(sock, flags, NULL);
- if (likely(!IS_ERR(newfile))) {
+ if (!IS_ERR(newfile)) {
fd_install(fd, newfile);
return fd;
}
@@@ -645,10 -634,13 +638,13 @@@ EXPORT_SYMBOL(__sock_tx_timestamp)
INDIRECT_CALLABLE_DECLARE(int inet_sendmsg(struct socket *, struct msghdr *,
size_t));
+ INDIRECT_CALLABLE_DECLARE(int inet6_sendmsg(struct socket *, struct msghdr *,
+ size_t));
static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
{
- int ret = INDIRECT_CALL_INET4(sock->ops->sendmsg, inet_sendmsg, sock,
- msg, msg_data_left(msg));
+ int ret = INDIRECT_CALL_INET(sock->ops->sendmsg, inet6_sendmsg,
+ inet_sendmsg, sock, msg,
+ msg_data_left(msg));
BUG_ON(ret == -EIOCBQUEUED);
return ret;
}
@@@ -874,12 -866,15 +870,15 @@@ void __sock_recv_ts_and_drops(struct ms
EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
INDIRECT_CALLABLE_DECLARE(int inet_recvmsg(struct socket *, struct msghdr *,
- size_t , int ));
+ size_t, int));
+ INDIRECT_CALLABLE_DECLARE(int inet6_recvmsg(struct socket *, struct msghdr *,
+ size_t, int));
static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
int flags)
{
- return INDIRECT_CALL_INET4(sock->ops->recvmsg, inet_recvmsg, sock, msg,
- msg_data_left(msg), flags);
+ return INDIRECT_CALL_INET(sock->ops->recvmsg, inet6_recvmsg,
+ inet_recvmsg, sock, msg, msg_data_left(msg),
+ flags);
}
/**
@@@ -2055,6 -2050,8 +2054,8 @@@ SYSCALL_DEFINE4(recv, int, fd, void __u
static int __sys_setsockopt(int fd, int level, int optname,
char __user *optval, int optlen)
{
+ mm_segment_t oldfs = get_fs();
+ char *kernel_optval = NULL;
int err, fput_needed;
struct socket *sock;
@@@ -2067,6 -2064,22 +2068,22 @@@
if (err)
goto out_put;
+ err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level,
+ &optname, optval, &optlen,
+ &kernel_optval);
+
+ if (err < 0) {
+ goto out_put;
+ } else if (err > 0) {
+ err = 0;
+ goto out_put;
+ }
+
+ if (kernel_optval) {
+ set_fs(KERNEL_DS);
+ optval = (char __user __force *)kernel_optval;
+ }
+
if (level == SOL_SOCKET)
err =
sock_setsockopt(sock, level, optname, optval,
@@@ -2075,6 -2088,11 +2092,11 @@@
err =
sock->ops->setsockopt(sock, level, optname, optval,
optlen);
+
+ if (kernel_optval) {
+ set_fs(oldfs);
+ kfree(kernel_optval);
+ }
out_put:
fput_light(sock->file, fput_needed);
}
@@@ -2097,6 -2115,7 +2119,7 @@@ static int __sys_getsockopt(int fd, in
{
int err, fput_needed;
struct socket *sock;
+ int max_optlen;
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock != NULL) {
@@@ -2104,6 -2123,8 +2127,8 @@@
if (err)
goto out_put;
+ max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen);
+
if (level == SOL_SOCKET)
err =
sock_getsockopt(sock, level, optname, optval,
@@@ -2112,6 -2133,10 +2137,10 @@@
err =
sock->ops->getsockopt(sock, level, optname, optval,
optlen);
+
+ err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname,
+ optval, optlen,
+ max_optlen, err);
out_put:
fput_light(sock->file, fput_needed);
}
diff --combined net/tls/tls_device.c
index eb8f24f420f0,40076f423dcb..92fd1352c037
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@@ -61,7 -61,7 +61,7 @@@ static void tls_device_free_ctx(struct
if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx));
- kfree(ctx);
+ tls_ctx_free(ctx);
}
static void tls_device_gc_task(struct work_struct *work)
@@@ -209,6 -209,29 +209,29 @@@ void tls_device_free_resources_tx(struc
tls_free_partial_record(sk, tls_ctx);
}
+ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
+ u32 seq)
+ {
+ struct net_device *netdev;
+ struct sk_buff *skb;
+ u8 *rcd_sn;
+
+ skb = tcp_write_queue_tail(sk);
+ if (skb)
+ TCP_SKB_CB(skb)->eor = 1;
+
+ rcd_sn = tls_ctx->tx.rec_seq;
+
+ down_read(&device_offload_lock);
+ netdev = tls_ctx->netdev;
+ if (netdev)
+ netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
+ TLS_OFFLOAD_CTX_DIR_TX);
+ up_read(&device_offload_lock);
+
+ clear_bit_unlock(TLS_TX_SYNC_SCHED, &tls_ctx->flags);
+ }
+
static void tls_append_frag(struct tls_record_info *record,
struct page_frag *pfrag,
int size)
@@@ -252,7 -275,7 +275,7 @@@ static int tls_push_record(struct sock
skb_frag_address(frag),
record->len - prot->prepend_size,
record_type,
- ctx->crypto_send.info.version);
+ prot->version);
/* HW doesn't care about the data in the tag, because it fills it. */
dummy_tag_frag.page = skb_frag_page(frag);
@@@ -264,7 -287,11 +287,11 @@@
list_add_tail(&record->list, &offload_ctx->records_list);
spin_unlock_irq(&offload_ctx->lock);
offload_ctx->open_record = NULL;
- tls_advance_record_sn(sk, &ctx->tx, ctx->crypto_send.info.version);
+
+ if (test_bit(TLS_TX_SYNC_SCHED, &ctx->flags))
+ tls_device_resync_tx(sk, ctx, tp->write_seq);
+
+ tls_advance_record_sn(sk, prot, &ctx->tx);
for (i = 0; i < record->num_frags; i++) {
frag = &record->frags[i];
@@@ -551,7 -578,7 +578,7 @@@ void tls_device_write_space(struct soc
}
static void tls_device_resync_rx(struct tls_context *tls_ctx,
- struct sock *sk, u32 seq, u64 rcd_sn)
+ struct sock *sk, u32 seq, u8 *rcd_sn)
{
struct net_device *netdev;
@@@ -559,14 -586,17 +586,17 @@@
return;
netdev = READ_ONCE(tls_ctx->netdev);
if (netdev)
- netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
+ netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
+ TLS_OFFLOAD_CTX_DIR_RX);
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
}
- void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
+ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+ struct tls_prot_info *prot;
u32 is_req_pending;
s64 resync_req;
u32 req_seq;
@@@ -574,15 -604,83 +604,83 @@@
if (tls_ctx->rx_conf != TLS_HW)
return;
+ prot = &tls_ctx->prot_info;
rx_ctx = tls_offload_ctx_rx(tls_ctx);
- resync_req = atomic64_read(&rx_ctx->resync_req);
- req_seq = (resync_req >> 32) - ((u32)TLS_HEADER_SIZE - 1);
- is_req_pending = resync_req;
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
- if (unlikely(is_req_pending) && req_seq == seq &&
- atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
+ switch (rx_ctx->resync_type) {
+ case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ:
+ resync_req = atomic64_read(&rx_ctx->resync_req);
+ req_seq = resync_req >> 32;
seq += TLS_HEADER_SIZE - 1;
- tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+ is_req_pending = resync_req;
+
+ if (likely(!is_req_pending) || req_seq != seq ||
+ !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
+ return;
+ break;
+ case TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT:
+ if (likely(!rx_ctx->resync_nh_do_now))
+ return;
+
+ /* head of next rec is already in, note that the sock_inq will
+ * include the currently parsed message when called from parser
+ */
+ if (tcp_inq(sk) > rcd_len)
+ return;
+
+ rx_ctx->resync_nh_do_now = 0;
+ seq += rcd_len;
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
+ break;
+ }
+
+ tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
+ }
+
+ static void tls_device_core_ctrl_rx_resync(struct tls_context *tls_ctx,
+ struct tls_offload_context_rx *ctx,
+ struct sock *sk, struct sk_buff *skb)
+ {
+ struct strp_msg *rxm;
+
+ /* device will request resyncs by itself based on stream scan */
+ if (ctx->resync_type != TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT)
+ return;
+ /* already scheduled */
+ if (ctx->resync_nh_do_now)
+ return;
+ /* seen decrypted fragments since last fully-failed record */
+ if (ctx->resync_nh_reset) {
+ ctx->resync_nh_reset = 0;
+ ctx->resync_nh.decrypted_failed = 1;
+ ctx->resync_nh.decrypted_tgt = TLS_DEVICE_RESYNC_NH_START_IVAL;
+ return;
+ }
+
+ if (++ctx->resync_nh.decrypted_failed <= ctx->resync_nh.decrypted_tgt)
+ return;
+
+ /* doing resync, bump the next target in case it fails */
+ if (ctx->resync_nh.decrypted_tgt < TLS_DEVICE_RESYNC_NH_MAX_IVAL)
+ ctx->resync_nh.decrypted_tgt *= 2;
+ else
+ ctx->resync_nh.decrypted_tgt += TLS_DEVICE_RESYNC_NH_MAX_IVAL;
+
+ rxm = strp_msg(skb);
+
+ /* head of next rec is already in, parser will sync for us */
+ if (tcp_inq(sk) > rxm->full_len) {
+ ctx->resync_nh_do_now = 1;
+ } else {
+ struct tls_prot_info *prot = &tls_ctx->prot_info;
+ u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+
+ memcpy(rcd_sn, tls_ctx->rx.rec_seq, prot->rec_seq_size);
+ tls_bigint_increment(rcd_sn, prot->rec_seq_size);
+
+ tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
+ rcd_sn);
}
}
@@@ -610,8 -708,10 +708,10 @@@ static int tls_device_reencrypt(struct
sg_set_buf(&sg[0], buf,
rxm->full_len + TLS_HEADER_SIZE +
TLS_CIPHER_AES_GCM_128_IV_SIZE);
- skb_copy_bits(skb, offset, buf,
- TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ err = skb_copy_bits(skb, offset, buf,
+ TLS_HEADER_SIZE + TLS_CIPHER_AES_GCM_128_IV_SIZE);
+ if (err)
+ goto free_buf;
/* We are interested only in the decrypted data not the auth */
err = decrypt_skb(sk, skb, sg);
@@@ -625,8 -725,11 +725,11 @@@
if (skb_pagelen(skb) > offset) {
copy = min_t(int, skb_pagelen(skb) - offset, data_len);
- if (skb->decrypted)
- skb_store_bits(skb, offset, buf, copy);
+ if (skb->decrypted) {
+ err = skb_store_bits(skb, offset, buf, copy);
+ if (err)
+ goto free_buf;
+ }
offset += copy;
buf += copy;
@@@ -649,8 -752,11 +752,11 @@@
copy = min_t(int, skb_iter->len - frag_pos,
data_len + rxm->offset - offset);
- if (skb_iter->decrypted)
- skb_store_bits(skb_iter, frag_pos, buf, copy);
+ if (skb_iter->decrypted) {
+ err = skb_store_bits(skb_iter, frag_pos, buf, copy);
+ if (err)
+ goto free_buf;
+ }
offset += copy;
buf += copy;
@@@ -671,10 -777,6 +777,6 @@@ int tls_device_decrypted(struct sock *s
int is_encrypted = !is_decrypted;
struct sk_buff *skb_iter;
- /* Skip if it is already decrypted */
- if (ctx->sw.decrypted)
- return 0;
-
/* Check if all the data is decrypted already */
skb_walk_frags(skb, skb_iter) {
is_decrypted &= skb_iter->decrypted;
@@@ -683,12 -785,21 +785,21 @@@
ctx->sw.decrypted |= is_decrypted;
- /* Return immedeatly if the record is either entirely plaintext or
+ /* Return immediately if the record is either entirely plaintext or
* entirely ciphertext. Otherwise handle reencrypt partially decrypted
* record.
*/
- return (is_encrypted || is_decrypted) ? 0 :
- tls_device_reencrypt(sk, skb);
+ if (is_decrypted) {
+ ctx->resync_nh_reset = 1;
+ return 0;
+ }
+ if (is_encrypted) {
+ tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
+ return 0;
+ }
+
+ ctx->resync_nh_reset = 1;
+ return tls_device_reencrypt(sk, skb);
}
static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
@@@ -742,11 -853,6 +853,11 @@@ int tls_set_device_offload(struct sock
}
crypto_info = &ctx->crypto_send.info;
+ if (crypto_info->version != TLS_1_2_VERSION) {
+ rc = -EOPNOTSUPP;
+ goto free_offload_ctx;
+ }
+
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
@@@ -762,6 -868,12 +873,12 @@@
goto free_offload_ctx;
}
+ /* Sanity-check the rec_seq_size for stack allocations */
+ if (rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
+ rc = -EINVAL;
+ goto free_offload_ctx;
+ }
+
prot->prepend_size = TLS_HEADER_SIZE + nonce_size;
prot->tag_size = tag_size;
prot->overhead_size = prot->prepend_size + prot->tag_size;
@@@ -881,9 -993,6 +998,9 @@@ int tls_set_device_offload_rx(struct so
struct net_device *netdev;
int rc = 0;
+ if (ctx->crypto_recv.info.version != TLS_1_2_VERSION)
+ return -EOPNOTSUPP;
+
/* We support starting offload on multiple sockets
* concurrently, so we only need a read lock here.
* This lock must precede get_netdev_for_sock to prevent races between
@@@ -916,6 -1025,7 +1033,7 @@@
rc = -ENOMEM;
goto release_netdev;
}
+ context->resync_nh_reset = 1;
ctx->priv_ctx_rx = context;
rc = tls_set_sw_offload(sk, ctx, 0);
@@@ -1023,7 -1133,7 +1141,7 @@@ static int tls_dev_event(struct notifie
case NETDEV_REGISTER:
case NETDEV_FEAT_CHANGE:
if ((dev->features & NETIF_F_HW_TLS_RX) &&
- !dev->tlsdev_ops->tls_dev_resync_rx)
+ !dev->tlsdev_ops->tls_dev_resync)
return NOTIFY_BAD;
if (dev->tlsdev_ops &&
diff --combined net/tls/tls_sw.c
index e2385183526e,db585964b52b..53b4ad94e74a
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@@ -534,7 -534,7 +534,7 @@@ static int tls_do_encryption(struct soc
/* Unhook the record from context if encryption is not failure */
ctx->open_rec = NULL;
- tls_advance_record_sn(sk, &tls_ctx->tx, prot->version);
+ tls_advance_record_sn(sk, prot, &tls_ctx->tx);
return rc;
}
@@@ -1485,15 -1485,16 +1485,16 @@@ static int decrypt_skb_update(struct so
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct tls_prot_info *prot = &tls_ctx->prot_info;
- int version = prot->version;
struct strp_msg *rxm = strp_msg(skb);
int pad, err = 0;
if (!ctx->decrypted) {
#ifdef CONFIG_TLS_DEVICE
- err = tls_device_decrypted(sk, skb);
- if (err < 0)
- return err;
+ if (tls_ctx->rx_conf == TLS_HW) {
+ err = tls_device_decrypted(sk, skb);
+ if (err < 0)
+ return err;
+ }
#endif
/* Still not decrypted after tls_device */
if (!ctx->decrypted) {
@@@ -1501,8 -1502,8 +1502,8 @@@
async);
if (err < 0) {
if (err == -EINPROGRESS)
- tls_advance_record_sn(sk, &tls_ctx->rx,
- version);
+ tls_advance_record_sn(sk, prot,
+ &tls_ctx->rx);
return err;
}
@@@ -1517,7 -1518,7 +1518,7 @@@
rxm->full_len -= pad;
rxm->offset += prot->prepend_size;
rxm->full_len -= prot->overhead_size;
- tls_advance_record_sn(sk, &tls_ctx->rx, version);
+ tls_advance_record_sn(sk, prot, &tls_ctx->rx);
ctx->decrypted = true;
ctx->saved_data_ready(sk);
} else {
@@@ -1958,8 -1959,7 +1959,8 @@@ bool tls_sw_stream_read(const struct so
ingress_empty = list_empty(&psock->ingress_msg);
rcu_read_unlock();
- return !ingress_empty || ctx->recv_pkt;
+ return !ingress_empty || ctx->recv_pkt ||
+ !skb_queue_empty(&ctx->rx_list);
}
static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@@ -2014,8 -2014,8 +2015,8 @@@
goto read_failure;
}
#ifdef CONFIG_TLS_DEVICE
- handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset,
- *(u64*)tls_ctx->rx.rec_seq);
+ tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
+ TCP_SKB_CB(skb)->seq + rxm->offset);
#endif
return data_len + TLS_HEADER_SIZE;
@@@ -2282,8 -2282,9 +2283,9 @@@ int tls_set_sw_offload(struct sock *sk
goto free_priv;
}
- /* Sanity-check the IV size for stack allocations. */
- if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) {
+ /* Sanity-check the sizes for stack allocations. */
+ if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE ||
+ rec_seq_size > TLS_MAX_REC_SEQ_SIZE) {
rc = -EINVAL;
goto free_priv;
}
diff --combined net/xdp/xsk.c
index f53a6ef7c155,74417a851ed5..ada385a89cc3
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@@ -37,6 -37,12 +37,12 @@@ bool xsk_is_setup_for_bpf_map(struct xd
READ_ONCE(xs->umem->fq);
}
+ bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
+ {
+ return xskq_has_addrs(umem->fq, cnt);
+ }
+ EXPORT_SYMBOL(xsk_umem_has_addrs);
+
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
{
return xskq_peek_addr(umem->fq, addr);
@@@ -166,22 -172,18 +172,18 @@@ void xsk_umem_consume_tx_done(struct xd
}
EXPORT_SYMBOL(xsk_umem_consume_tx_done);
- bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
+ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
{
- struct xdp_desc desc;
struct xdp_sock *xs;
rcu_read_lock();
list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
- if (!xskq_peek_desc(xs->tx, &desc))
+ if (!xskq_peek_desc(xs->tx, desc))
continue;
- if (xskq_produce_addr_lazy(umem->cq, desc.addr))
+ if (xskq_produce_addr_lazy(umem->cq, desc->addr))
goto out;
- *dma = xdp_umem_get_dma(umem, desc.addr);
- *len = desc.len;
-
xskq_discard_desc(xs->tx);
rcu_read_unlock();
return true;
@@@ -335,22 -337,6 +337,22 @@@ static int xsk_init_queue(u32 entries,
return 0;
}
+static void xsk_unbind_dev(struct xdp_sock *xs)
+{
+ struct net_device *dev = xs->dev;
+
+ if (!dev || xs->state != XSK_BOUND)
+ return;
+
+ xs->state = XSK_UNBOUND;
+
+ /* Wait for driver to stop using the xdp socket. */
+ xdp_del_sk_umem(xs->umem, xs);
+ xs->dev = NULL;
+ synchronize_net();
+ dev_put(dev);
+}
+
static int xsk_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@@ -370,7 -356,15 +372,7 @@@
sock_prot_inuse_add(net, sk->sk_prot, -1);
local_bh_enable();
- if (xs->dev) {
- struct net_device *dev = xs->dev;
-
- /* Wait for driver to stop using the xdp socket. */
- xdp_del_sk_umem(xs->umem, xs);
- xs->dev = NULL;
- synchronize_net();
- dev_put(dev);
- }
+ xsk_unbind_dev(xs);
xskq_destroy(xs->rx);
xskq_destroy(xs->tx);
@@@ -420,7 -414,7 +422,7 @@@ static int xsk_bind(struct socket *sock
return -EINVAL;
mutex_lock(&xs->mutex);
- if (xs->dev) {
+ if (xs->state != XSK_READY) {
err = -EBUSY;
goto out_release;
}
@@@ -500,8 -494,6 +502,8 @@@
out_unlock:
if (err)
dev_put(dev);
+ else
+ xs->state = XSK_BOUND;
out_release:
mutex_unlock(&xs->mutex);
return err;
@@@ -530,10 -522,6 +532,10 @@@ static int xsk_setsockopt(struct socke
return -EFAULT;
mutex_lock(&xs->mutex);
+ if (xs->state != XSK_READY) {
+ mutex_unlock(&xs->mutex);
+ return -EBUSY;
+ }
q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
err = xsk_init_queue(entries, q, false);
mutex_unlock(&xs->mutex);
@@@ -548,7 -536,7 +550,7 @@@
return -EFAULT;
mutex_lock(&xs->mutex);
- if (xs->umem) {
+ if (xs->state != XSK_READY || xs->umem) {
mutex_unlock(&xs->mutex);
return -EBUSY;
}
@@@ -575,10 -563,6 +577,10 @@@
return -EFAULT;
mutex_lock(&xs->mutex);
+ if (xs->state != XSK_READY) {
+ mutex_unlock(&xs->mutex);
+ return -EBUSY;
+ }
if (!xs->umem) {
mutex_unlock(&xs->mutex);
return -EINVAL;
@@@ -662,6 -646,26 +664,26 @@@ static int xsk_getsockopt(struct socke
return 0;
}
+ case XDP_OPTIONS:
+ {
+ struct xdp_options opts = {};
+
+ if (len < sizeof(opts))
+ return -EINVAL;
+
+ mutex_lock(&xs->mutex);
+ if (xs->zc)
+ opts.flags |= XDP_OPTIONS_ZEROCOPY;
+ mutex_unlock(&xs->mutex);
+
+ len = sizeof(opts);
+ if (copy_to_user(optval, &opts, len))
+ return -EFAULT;
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ return 0;
+ }
default:
break;
}
@@@ -680,9 -684,6 +702,9 @@@ static int xsk_mmap(struct file *file,
unsigned long pfn;
struct page *qpg;
+ if (xs->state != XSK_READY)
+ return -EBUSY;
+
if (offset == XDP_PGOFF_RX_RING) {
q = READ_ONCE(xs->rx);
} else if (offset == XDP_PGOFF_TX_RING) {
@@@ -714,38 -715,6 +736,38 @@@
size, vma->vm_page_prot);
}
+static int xsk_notifier(struct notifier_block *this,
+ unsigned long msg, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+ struct sock *sk;
+
+ switch (msg) {
+ case NETDEV_UNREGISTER:
+ mutex_lock(&net->xdp.lock);
+ sk_for_each(sk, &net->xdp.list) {
+ struct xdp_sock *xs = xdp_sk(sk);
+
+ mutex_lock(&xs->mutex);
+ if (xs->dev == dev) {
+ sk->sk_err = ENETDOWN;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+
+ xsk_unbind_dev(xs);
+
+ /* Clear device references in umem. */
+ xdp_umem_clear_dev(xs->umem);
+ }
+ mutex_unlock(&xs->mutex);
+ }
+ mutex_unlock(&net->xdp.lock);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static struct proto xsk_proto = {
.name = "XDP",
.owner = THIS_MODULE,
@@@ -817,7 -786,6 +839,7 @@@ static int xsk_create(struct net *net,
sock_set_flag(sk, SOCK_RCU_FREE);
xs = xdp_sk(sk);
+ xs->state = XSK_READY;
mutex_init(&xs->mutex);
spin_lock_init(&xs->tx_completion_lock);
@@@ -838,10 -806,6 +860,10 @@@ static const struct net_proto_family xs
.owner = THIS_MODULE,
};
+static struct notifier_block xsk_netdev_notifier = {
+ .notifier_call = xsk_notifier,
+};
+
static int __net_init xsk_net_init(struct net *net)
{
mutex_init(&net->xdp.lock);
@@@ -874,15 -838,8 +896,15 @@@ static int __init xsk_init(void
err = register_pernet_subsys(&xsk_net_ops);
if (err)
goto out_sk;
+
+ err = register_netdevice_notifier(&xsk_netdev_notifier);
+ if (err)
+ goto out_pernet;
+
return 0;
+out_pernet:
+ unregister_pernet_subsys(&xsk_net_ops);
out_sk:
sock_unregister(PF_XDP);
out_proto:
diff --combined net/xdp/xsk_queue.h
index cba4a640d5e8,12b49784a6d5..909c5168ed0f
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@@ -117,6 -117,20 +117,20 @@@ static inline u32 xskq_nb_free(struct x
return q->nentries - (producer - q->cons_tail);
}
+ static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt)
+ {
+ u32 entries = q->prod_tail - q->cons_tail;
+
+ if (entries >= cnt)
+ return true;
+
+ /* Refresh the local pointer. */
+ q->prod_tail = READ_ONCE(q->ring->producer);
+ entries = q->prod_tail - q->cons_tail;
+
+ return entries >= cnt;
+ }
+
/* UMEM queue */
static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
@@@ -288,7 -302,7 +302,7 @@@ static inline void xskq_produce_flush_d
/* Order producer and data */
smp_wmb(); /* B, matches C */
- q->prod_tail = q->prod_head,
+ q->prod_tail = q->prod_head;
WRITE_ONCE(q->ring->producer, q->prod_tail);
}
diff --combined net/xfrm/xfrm_interface.c
index 7dbe0c608df5,f8eb9e342173..74868f9d81fb
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@@ -133,7 -133,7 +133,7 @@@ static void xfrmi_dev_free(struct net_d
free_percpu(dev->tstats);
}
-static int xfrmi_create2(struct net_device *dev)
+static int xfrmi_create(struct net_device *dev)
{
struct xfrm_if *xi = netdev_priv(dev);
struct net *net = dev_net(dev);
@@@ -156,7 -156,54 +156,7 @@@ out
return err;
}
-static struct xfrm_if *xfrmi_create(struct net *net, struct xfrm_if_parms *p)
-{
- struct net_device *dev;
- struct xfrm_if *xi;
- char name[IFNAMSIZ];
- int err;
-
- if (p->name[0]) {
- strlcpy(name, p->name, IFNAMSIZ);
- } else {
- err = -EINVAL;
- goto failed;
- }
-
- dev = alloc_netdev(sizeof(*xi), name, NET_NAME_UNKNOWN, xfrmi_dev_setup);
- if (!dev) {
- err = -EAGAIN;
- goto failed;
- }
-
- dev_net_set(dev, net);
-
- xi = netdev_priv(dev);
- xi->p = *p;
- xi->net = net;
- xi->dev = dev;
- xi->phydev = dev_get_by_index(net, p->link);
- if (!xi->phydev) {
- err = -ENODEV;
- goto failed_free;
- }
-
- err = xfrmi_create2(dev);
- if (err < 0)
- goto failed_dev_put;
-
- return xi;
-
-failed_dev_put:
- dev_put(xi->phydev);
-failed_free:
- free_netdev(dev);
-failed:
- return ERR_PTR(err);
-}
-
-static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p,
- int create)
+static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
{
struct xfrm_if __rcu **xip;
struct xfrm_if *xi;
@@@ -164,11 -211,17 +164,11 @@@
for (xip = &xfrmn->xfrmi[0];
(xi = rtnl_dereference(*xip)) != NULL;
- xip = &xi->next) {
- if (xi->p.if_id == p->if_id) {
- if (create)
- return ERR_PTR(-EEXIST);
-
+ xip = &xi->next)
+ if (xi->p.if_id == p->if_id)
return xi;
- }
- }
- if (!create)
- return ERR_PTR(-ENODEV);
- return xfrmi_create(net, p);
+
+ return NULL;
}
static void xfrmi_dev_uninit(struct net_device *dev)
@@@ -633,33 -686,21 +633,33 @@@ static int xfrmi_newlink(struct net *sr
struct netlink_ext_ack *extack)
{
struct net *net = dev_net(dev);
- struct xfrm_if_parms *p;
+ struct xfrm_if_parms p;
struct xfrm_if *xi;
+ int err;
- xi = netdev_priv(dev);
- p = &xi->p;
-
- xfrmi_netlink_parms(data, p);
+ xfrmi_netlink_parms(data, &p);
if (!tb[IFLA_IFNAME])
return -EINVAL;
- nla_strlcpy(p->name, tb[IFLA_IFNAME], IFNAMSIZ);
+ nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ);
- xi = xfrmi_locate(net, p, 1);
- return PTR_ERR_OR_ZERO(xi);
+ xi = xfrmi_locate(net, &p);
+ if (xi)
+ return -EEXIST;
+
+ xi = netdev_priv(dev);
+ xi->p = p;
+ xi->net = net;
+ xi->dev = dev;
+ xi->phydev = dev_get_by_index(net, p.link);
+ if (!xi->phydev)
+ return -ENODEV;
+
+ err = xfrmi_create(dev);
+ if (err < 0)
+ dev_put(xi->phydev);
+ return err;
}
static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
@@@ -676,8 -717,9 +676,8 @@@ static int xfrmi_changelink(struct net_
xfrmi_netlink_parms(data, &xi->p);
- xi = xfrmi_locate(net, &xi->p, 0);
-
- if (IS_ERR_OR_NULL(xi)) {
+ xi = xfrmi_locate(net, &xi->p);
+ if (!xi) {
xi = netdev_priv(dev);
} else {
if (xi->dev != dev)
@@@ -751,11 -793,6 +751,6 @@@ static void __net_exit xfrmi_destroy_in
unregister_netdevice_many(&list);
}
- static int __net_init xfrmi_init_net(struct net *net)
- {
- return 0;
- }
-
static void __net_exit xfrmi_exit_net(struct net *net)
{
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
@@@ -766,7 -803,6 +761,6 @@@
}
static struct pernet_operations xfrmi_net_ops = {
- .init = xfrmi_init_net,
.exit = xfrmi_exit_net,
.id = &xfrmi_net_id,
.size = sizeof(struct xfrmi_net),
diff --combined net/xfrm/xfrm_policy.c
index 4fb58dfecc7a,1070dfece76b..8ca637a72697
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@@ -585,6 -585,9 +585,6 @@@ static void xfrm_bydst_resize(struct ne
odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock));
- odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
- lockdep_is_held(&net->xfrm.xfrm_policy_lock));
-
for (i = hmask; i >= 0; i--)
xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
@@@ -1277,17 -1280,13 +1277,17 @@@ static void xfrm_hash_rebuild(struct wo
hlist_for_each_entry_safe(policy, n,
&net->xfrm.policy_inexact[dir],
- bydst_inexact_list)
+ bydst_inexact_list) {
+ hlist_del_rcu(&policy->bydst);
hlist_del_init(&policy->bydst_inexact_list);
+ }
hmask = net->xfrm.policy_bydst[dir].hmask;
odst = net->xfrm.policy_bydst[dir].table;
- for (i = hmask; i >= 0; i--)
- INIT_HLIST_HEAD(odst + i);
+ for (i = hmask; i >= 0; i--) {
+ hlist_for_each_entry_safe(policy, n, odst + i, bydst)
+ hlist_del_rcu(&policy->bydst);
+ }
if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
/* dir out => dst = remote, src = local */
net->xfrm.policy_bydst[dir].dbits4 = rbits4;
@@@ -1316,6 -1315,8 +1316,6 @@@
chain = policy_hash_bysel(net, &policy->selector,
policy->family, dir);
- hlist_del_rcu(&policy->bydst);
-
if (!chain) {
void *p = xfrm_policy_inexact_insert(policy, dir, 0);
@@@ -3627,7 -3628,7 +3627,7 @@@ int __xfrm_policy_check(struct sock *sk
}
xfrm_nr = ti;
if (npols > 1) {
- xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
+ xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
tpp = stp;
}
diff --combined samples/bpf/xdp_redirect_user.c
index 003c0c6e38c5,39de06f3ec25..5440cd620607
--- a/samples/bpf/xdp_redirect_user.c
+++ b/samples/bpf/xdp_redirect_user.c
@@@ -10,13 -10,14 +10,14 @@@
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
+ #include <net/if.h>
#include <unistd.h>
#include <libgen.h>
#include <sys/resource.h>
#include "bpf_util.h"
#include <bpf/bpf.h>
- #include "bpf/libbpf.h"
+ #include "libbpf.h"
static int ifindex_in;
static int ifindex_out;
@@@ -85,7 -86,7 +86,7 @@@ static void poll_stats(int interval, in
static void usage(const char *prog)
{
fprintf(stderr,
- "usage: %s [OPTS] IFINDEX_IN IFINDEX_OUT\n\n"
+ "usage: %s [OPTS] <IFNAME|IFINDEX>_IN <IFNAME|IFINDEX>_OUT\n\n"
"OPTS:\n"
" -S use skb-mode\n"
" -N enforce native mode\n"
@@@ -128,7 -129,7 +129,7 @@@ int main(int argc, char **argv
}
if (optind == argc) {
- printf("usage: %s IFINDEX_IN IFINDEX_OUT\n", argv[0]);
+ printf("usage: %s <IFNAME|IFINDEX>_IN <IFNAME|IFINDEX>_OUT\n", argv[0]);
return 1;
}
@@@ -137,8 -138,14 +138,14 @@@
return 1;
}
- ifindex_in = strtoul(argv[optind], NULL, 0);
- ifindex_out = strtoul(argv[optind + 1], NULL, 0);
+ ifindex_in = if_nametoindex(argv[optind]);
+ if (!ifindex_in)
+ ifindex_in = strtoul(argv[optind], NULL, 0);
+
+ ifindex_out = if_nametoindex(argv[optind + 1]);
+ if (!ifindex_out)
+ ifindex_out = strtoul(argv[optind + 1], NULL, 0);
+
printf("input: %d output: %d\n", ifindex_in, ifindex_out);
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
@@@ -189,7 -196,7 +196,7 @@@
}
memset(&info, 0, sizeof(info));
- ret = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ ret = bpf_obj_get_info_by_fd(dummy_prog_fd, &info, &info_len);
if (ret) {
printf("can't get prog info - %s\n", strerror(errno));
return ret;
diff --combined tools/bpf/bpftool/cgroup.c
index a13fb7265d1a,390b89a224f1..f3c05b08c68c
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@@ -26,7 -26,8 +26,8 @@@
" sock_ops | device | bind4 | bind6 |\n" \
" post_bind4 | post_bind6 | connect4 |\n" \
" connect6 | sendmsg4 | sendmsg6 |\n" \
- " recvmsg4 | recvmsg6 | sysctl }"
+ " recvmsg4 | recvmsg6 | sysctl |\n" \
+ " getsockopt | setsockopt }"
static const char * const attach_type_strings[] = {
[BPF_CGROUP_INET_INGRESS] = "ingress",
@@@ -45,6 -46,8 +46,8 @@@
[BPF_CGROUP_SYSCTL] = "sysctl",
[BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4",
[BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
+ [BPF_CGROUP_GETSOCKOPT] = "getsockopt",
+ [BPF_CGROUP_SETSOCKOPT] = "setsockopt",
[__MAX_BPF_ATTACH_TYPE] = NULL,
};
@@@ -168,7 -171,7 +171,7 @@@ static int do_show(int argc, char **arg
cgroup_fd = open(argv[0], O_RDONLY);
if (cgroup_fd < 0) {
- p_err("can't open cgroup %s", argv[1]);
+ p_err("can't open cgroup %s", argv[0]);
goto exit;
}
@@@ -356,7 -359,7 +359,7 @@@ static int do_attach(int argc, char **a
cgroup_fd = open(argv[0], O_RDONLY);
if (cgroup_fd < 0) {
- p_err("can't open cgroup %s", argv[1]);
+ p_err("can't open cgroup %s", argv[0]);
goto exit;
}
@@@ -414,7 -417,7 +417,7 @@@ static int do_detach(int argc, char **a
cgroup_fd = open(argv[0], O_RDONLY);
if (cgroup_fd < 0) {
- p_err("can't open cgroup %s", argv[1]);
+ p_err("can't open cgroup %s", argv[0]);
goto exit;
}
diff --combined tools/include/uapi/linux/bpf.h
index 29a5bc3d5c66,cecf42c871d4..387d28252af6
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@@ -170,6 -170,7 +170,7 @@@ enum bpf_prog_type
BPF_PROG_TYPE_FLOW_DISSECTOR,
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
+ BPF_PROG_TYPE_CGROUP_SOCKOPT,
};
enum bpf_attach_type {
@@@ -194,6 -195,8 +195,8 @@@
BPF_CGROUP_SYSCTL,
BPF_CGROUP_UDP4_RECVMSG,
BPF_CGROUP_UDP6_RECVMSG,
+ BPF_CGROUP_GETSOCKOPT,
+ BPF_CGROUP_SETSOCKOPT,
__MAX_BPF_ATTACH_TYPE
};
@@@ -262,6 -265,24 +265,24 @@@
*/
#define BPF_F_ANY_ALIGNMENT (1U << 1)
+ /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
+ * Verifier does sub-register def/use analysis and identifies instructions whose
+ * def only matters for low 32-bit, high 32-bit is never referenced later
+ * through implicit zero extension. Therefore verifier notifies JIT back-ends
+ * that it is safe to ignore clearing high 32-bit for these instructions. This
+ * saves some back-ends a lot of code-gen. However such optimization is not
+ * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends
+ * hence hasn't used verifier's analysis result. But, we really want to have a
+ * way to be able to verify the correctness of the described optimization on
+ * x86_64 on which testsuites are frequently exercised.
+ *
+ * So, this flag is introduced. Once it is set, verifier will randomize high
+ * 32-bit for those instructions who has been identified as safe to ignore them.
+ * Then, if verifier is not doing correct analysis, such randomization will
+ * regress tests to expose bugs.
+ */
+ #define BPF_F_TEST_RND_HI32 (1U << 2)
+
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* two extensions:
*
@@@ -1746,6 -1767,7 +1767,7 @@@ union bpf_attr
* * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
* * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
* * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
+ * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT)
*
* Therefore, this function can be used to clear a callback flag by
* setting the appropriate bit to zero. e.g. to disable the RTO
@@@ -2674,6 -2696,20 +2696,20 @@@
* 0 on success.
*
* **-ENOENT** if the bpf-local-storage cannot be found.
+ *
+ * int bpf_send_signal(u32 sig)
+ * Description
+ * Send signal *sig* to the current task.
+ * Return
+ * 0 on success or successfully queued.
+ *
+ * **-EBUSY** if work queue under nmi is full.
+ *
+ * **-EINVAL** if *sig* is invalid.
+ *
+ * **-EPERM** if no permission to send the *sig*.
+ *
+ * **-EAGAIN** if bpf program can try again.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@@ -2784,7 -2820,8 +2820,8 @@@
FN(strtol), \
FN(strtoul), \
FN(sk_storage_get), \
- FN(sk_storage_delete),
+ FN(sk_storage_delete), \
+ FN(send_signal),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@@ -3033,6 -3070,12 +3070,12 @@@ struct bpf_tcp_sock
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
+ * total number of DSACK blocks received
+ */
+ __u32 delivered; /* Total data packets delivered incl. rexmits */
+ __u32 delivered_ce; /* Like the above but only ECE marked packets */
+ __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */
};
struct bpf_sock_tuple {
@@@ -3052,6 -3095,10 +3095,10 @@@
};
};
+ struct bpf_xdp_sock {
+ __u32 queue_id;
+ };
+
#define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type.
@@@ -3143,7 -3190,6 +3190,7 @@@ struct bpf_prog_info
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u32 gpl_compatible:1;
+ __u32 :31; /* alignment pad */
__u64 netns_dev;
__u64 netns_ino;
__u32 nr_jited_ksyms;
@@@ -3213,6 -3259,7 +3260,7 @@@ struct bpf_sock_addr
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
* Stored in network byte order.
*/
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
/* User bpf_sock_ops struct to access socket values and specify request ops
@@@ -3264,13 -3311,15 +3312,15 @@@ struct bpf_sock_ops
__u32 sk_txhash;
__u64 bytes_received;
__u64 bytes_acked;
+ __bpf_md_ptr(struct bpf_sock *, sk);
};
/* Definitions for bpf_sock_ops_cb_flags */
#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
- #define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
+ #define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3)
+ #define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently
* supported cb flags
*/
@@@ -3325,6 -3374,8 +3375,8 @@@ enum
BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
* socket transition to LISTEN state.
*/
+ BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@@ -3503,4 -3554,15 +3555,15 @@@ struct bpf_sysctl
*/
};
+ struct bpf_sockopt {
+ __bpf_md_ptr(struct bpf_sock *, sk);
+ __bpf_md_ptr(void *, optval);
+ __bpf_md_ptr(void *, optval_end);
+
+ __s32 level;
+ __s32 optname;
+ __s32 optlen;
+ __s32 retval;
+ };
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --combined tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
index e2f6ed0a583d,7c7cb3177463..a334a0e882e4
--- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
+++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
@@@ -6,13 -6,6 +6,6 @@@
#include "bpf_helpers.h"
#include "bpf_endian.h"
- #define bpf_printk(fmt, ...) \
- ({ \
- char ____fmt[] = fmt; \
- bpf_trace_printk(____fmt, sizeof(____fmt), \
- ##__VA_ARGS__); \
- })
-
/* Packet parsing state machine helpers. */
#define cursor_advance(_cursor, _len) \
({ void *_tmp = _cursor; _cursor += _len; _tmp; })
@@@ -61,7 -54,7 +54,7 @@@ struct sr6_tlv_t
unsigned char value[0];
} BPF_PACKET_HEADER;
-__attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
+static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
{
void *cursor, *data_end;
struct ip6_srh_t *srh;
@@@ -95,7 -88,7 +88,7 @@@
return srh;
}
-__attribute__((always_inline))
+static __always_inline
int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
uint32_t old_pad, uint32_t pad_off)
{
@@@ -125,7 -118,7 +118,7 @@@
return 0;
}
-__attribute__((always_inline))
+static __always_inline
int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t *tlv_off, uint32_t *pad_size,
uint32_t *pad_off)
@@@ -184,7 -177,7 +177,7 @@@
return 0;
}
-__attribute__((always_inline))
+static __always_inline
int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
struct sr6_tlv_t *itlv, uint8_t tlv_size)
{
@@@ -228,7 -221,7 +221,7 @@@
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}
-__attribute__((always_inline))
+static __always_inline
int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
uint32_t tlv_off)
{
@@@ -266,7 -259,7 +259,7 @@@
return update_tlv_pad(skb, new_pad, pad_size, pad_off);
}
-__attribute__((always_inline))
+static __always_inline
int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
{
int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) +
diff --combined tools/testing/selftests/net/config
index 3dea2cba2325,e4b878d95ba0..d98a17fc0837
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@@ -25,4 -25,6 +25,7 @@@ CONFIG_NF_TABLES_IPV6=
CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_KALLSYMS=y
+ CONFIG_NET_SCH_FQ=m
+ CONFIG_NET_SCH_ETF=m
+ CONFIG_TEST_BLACKHOLE_DEV=m
diff --combined tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
index 16559c436f21,8135778040c2..0f89cd50a94b
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@@ -6,6 -6,9 +6,9 @@@
"filter",
"u32"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 ingress"
],
@@@ -25,6 -28,9 +28,9 @@@
"filter",
"matchall"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV1 clsact",
"$TC filter add dev $DEV1 protocol all pref 1 ingress handle 0x1234 matchall action ok"
@@@ -39,31 -45,15 +45,34 @@@
]
},
{
+ "id": "2ff3",
+ "name": "Add flower with max handle and then dump it",
+ "category": [
+ "filter",
+ "flower"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV2 ingress"
+ ],
+ "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 parent ffff: handle 0xffffffff flower action ok",
+ "expExitCode": "0",
+ "verifyCmd": "$TC filter show dev $DEV2 ingress",
+ "matchPattern": "filter protocol ip pref 1 flower.*handle 0xffffffff",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress"
+ ]
+ },
+ {
"id": "d052",
"name": "Add 1M filters with the same action",
"category": [
"filter",
"flower"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV2 ingress",
"./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000"
@@@ -85,6 -75,9 +94,9 @@@
"filter",
"flower"
],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
"setup": [
"$TC qdisc add dev $DEV2 ingress",
"$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop"
--
LinuxNextTracking