The following commit has been merged in the master branch: commit 469d5bf1e42008f13e5b68a6b473b468edc3b35a Merge: fe088f24267d9b41a7345aae965d1850b1b7c99e 954a5a029472568845a25cd1c59e02e09db3316c Author: Stephen Rothwell sfr@canb.auug.org.au Date: Mon Jul 1 14:14:34 2019 +1000
Merge remote-tracking branch 'net-next/master'
diff --combined MAINTAINERS index 172e00148847,b4304d10f14e..a2a392b100e4 --- a/MAINTAINERS +++ b/MAINTAINERS @@@ -668,13 -668,6 +668,13 @@@ S: Maintaine F: Documentation/i2c/busses/i2c-ali1563 F: drivers/i2c/busses/i2c-ali1563.c
+ALLEGRO DVT VIDEO IP CORE DRIVER +M: Michael Tretter m.tretter@pengutronix.de +R: Pengutronix Kernel Team kernel@pengutronix.de +L: linux-media@vger.kernel.org +S: Maintained +F: drivers/staging/media/allegro-dvt/ + ALLWINNER SECURITY SYSTEM M: Corentin Labbe clabbe.montjoie@gmail.com L: linux-crypto@vger.kernel.org @@@ -917,7 -910,7 +917,7 @@@ F: drivers/iio/adc/ad7768-1. F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt
ANALOG DEVICES INC AD9389B DRIVER -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/ad9389b* @@@ -949,19 -942,19 +949,19 @@@ S: Maintaine F: drivers/media/i2c/adv748x/*
ANALOG DEVICES INC ADV7511 DRIVER -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adv7511*
ANALOG DEVICES INC ADV7604 DRIVER -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adv7604*
ANALOG DEVICES INC ADV7842 DRIVER -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/adv7842* @@@ -1147,6 -1140,15 +1147,15 @@@ L: linux-media@vger.kernel.or S: Maintained F: drivers/media/i2c/aptina-pll.*
+ AQUANTIA ETHERNET DRIVER (atlantic) + M: Igor Russkikh igor.russkikh@aquantia.com + L: netdev@vger.kernel.org + S: Supported + W: http://www.aquantia.com + Q: http://patchwork.ozlabs.org/project/netdev/list/ + F: drivers/net/ethernet/aquantia/atlantic/ + F: Documentation/networking/device_drivers/aquantia/atlantic.txt + ARC FRAMEBUFFER DRIVER M: Jaya Kumar jayalk@intworks.biz S: Maintained @@@ -1238,7 -1240,7 +1247,7 @@@ F: include/uapi/drm/panfrost_drm. ARM MFM AND FLOPPY DRIVERS M: Ian Molton spyro@f2s.com S: Maintained -F: arch/arm/lib/floppydma.S +F: arch/arm/mach-rpc/floppydma.S F: arch/arm/include/asm/floppy.h
ARM PMU PROFILING AND DEBUGGING @@@ -1831,7 -1833,6 +1840,7 @@@ F: arch/arm/mach-orion5x F: arch/arm/plat-orion/ F: arch/arm/boot/dts/dove* F: arch/arm/boot/dts/orion5x* +T: git git://git.infradead.org/linux-mvebu.git
ARM/Marvell Kirkwood and Armada 370, 375, 38x, 39x, XP, 3700, 7K/8K SOC support M: Jason Cooper jason@lakedaemon.net @@@ -1852,7 -1853,6 +1861,7 @@@ F: drivers/irqchip/irq-armada-370-xp. F: drivers/irqchip/irq-mvebu-* F: drivers/pinctrl/mvebu/ F: drivers/rtc/rtc-armada38x.c +T: git git://git.infradead.org/linux-mvebu.git
ARM/Mediatek RTC DRIVER M: Eddie Huang eddie.huang@mediatek.com @@@ -2059,6 -2059,7 +2068,6 @@@ S: Maintaine
ARM/QUALCOMM SUPPORT M: Andy Gross agross@kernel.org -M: David Brown david.brown@linaro.org L: linux-arm-msm@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/soc/qcom/ @@@ -2080,7 -2081,7 +2089,7 @@@ F: drivers/i2c/busses/i2c-qup. F: drivers/i2c/busses/i2c-qcom-geni.c F: drivers/mfd/ssbi.c F: drivers/mmc/host/mmci_qcom* -F: drivers/mmc/host/sdhci_msm.c +F: drivers/mmc/host/sdhci-msm.c F: drivers/pci/controller/dwc/pcie-qcom.c F: drivers/phy/qualcomm/ F: drivers/power/*/msm* @@@ -2352,7 -2353,7 +2361,7 @@@ L: linux-arm-kernel@lists.infradead.or S: Maintained
ARM/TEGRA HDMI CEC SUBSYSTEM SUPPORT -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-tegra@vger.kernel.org L: linux-media@vger.kernel.org S: Maintained @@@ -3687,7 -3688,7 +3696,7 @@@ F: drivers/crypto/ccree W: https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell...
CEC FRAMEWORK -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org @@@ -3704,7 -3705,7 +3713,7 @@@ F: Documentation/devicetree/bindings/me F: Documentation/ABI/testing/debugfs-cec-error-inj
CEC GPIO DRIVER -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: http://linuxtv.org @@@ -3896,7 -3897,7 +3905,7 @@@ F: Documentation/devicetree/bindings/hw F: Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt F: Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt F: Documentation/devicetree/bindings/sound/cirrus,lochnagar.txt -F: Documentation/hwmon/lochnagar +F: Documentation/hwmon/lochnagar.rst
CISCO FCOE HBA DRIVER M: Satish Kharat satishkh@cisco.com @@@ -3950,14 -3951,6 +3959,14 @@@ M: Miguel Ojeda <miguel.ojeda.sandonis@ S: Maintained F: .clang-format
+CLANG/LLVM BUILD SUPPORT +L: clang-built-linux@googlegroups.com +W: https://clangbuiltlinux.github.io/ +B: https://github.com/ClangBuiltLinux/linux/issues +C: irc://chat.freenode.net/clangbuiltlinux +S: Supported +K: \b(?i:clang|llvm)\b + CLEANCACHE API M: Konrad Rzeszutek Wilk konrad.wilk@oracle.com L: linux-kernel@vger.kernel.org @@@ -3988,7 -3981,7 +3997,7 @@@ S: Supporte F: drivers/platform/x86/classmate-laptop.c
COBALT MEDIA DRIVER -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org @@@ -4807,7 -4800,7 +4816,7 @@@ S: Maintaine W: http://plugable.com/category/projects/udlfb/ F: drivers/video/fbdev/udlfb.c F: include/video/udlfb.h -F: Documentation/fb/udlfb.txt +F: Documentation/fb/udlfb.rst
DISTRIBUTED LOCK MANAGER (DLM) M: Christine Caulfield ccaulfie@redhat.com @@@ -4940,13 -4933,6 +4949,6 @@@ L: linux-kernel@vger.kernel.or S: Maintained F: drivers/staging/fsl-dpaa2/ethsw
- DPAA2 PTP CLOCK DRIVER - M: Yangbo Lu yangbo.lu@nxp.com - L: netdev@vger.kernel.org - S: Maintained - F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp* - F: drivers/net/ethernet/freescale/dpaa2/dprtc* - DPT_I2O SCSI RAID DRIVER M: Adaptec OEM Raid Solutions aacraid@microsemi.com L: linux-scsi@vger.kernel.org @@@ -5618,7 -5604,8 +5620,8 @@@ F: include/linux/dynamic_debug. DYNAMIC INTERRUPT MODERATION M: Tal Gilboa talgi@mellanox.com S: Maintained - F: include/linux/net_dim.h + F: include/linux/dim.h + F: lib/dim/
DZ DECSTATION DZ11 SERIAL DRIVER M: "Maciej W. Rozycki" macro@linux-mips.org @@@ -6268,7 -6255,7 +6271,7 @@@ FPGA DFL DRIVER M: Wu Hao hao.wu@intel.com L: linux-fpga@vger.kernel.org S: Maintained -F: Documentation/fpga/dfl.txt +F: Documentation/fpga/dfl.rst F: include/uapi/linux/fpga-dfl.h F: drivers/fpga/dfl*
@@@ -6345,13 -6332,6 +6348,13 @@@ L: linux-i2c@vger.kernel.or S: Maintained F: drivers/i2c/busses/i2c-cpm.c
+FREESCALE IMX DDR PMU DRIVER +M: Frank Li Frank.li@nxp.com +L: linux-arm-kernel@lists.infradead.org +S: Maintained +F: drivers/perf/fsl_imx8_ddr_perf.c +F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt + FREESCALE IMX LPI2C DRIVER M: Dong Aisheng aisheng.dong@nxp.com L: linux-i2c@vger.kernel.org @@@ -6395,6 -6375,8 +6398,8 @@@ FREESCALE QORIQ PTP CLOCK DRIVE M: Yangbo Lu yangbo.lu@nxp.com L: netdev@vger.kernel.org S: Maintained + F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp* + F: drivers/net/ethernet/freescale/dpaa2/dprtc* F: drivers/net/ethernet/freescale/enetc/enetc_ptp.c F: drivers/ptp/ptp_qoriq.c F: drivers/ptp/ptp_qoriq_debugfs.c @@@ -6440,7 -6422,6 +6445,7 @@@ M: Li Yang <leoyang.li@nxp.com L: linuxppc-dev@lists.ozlabs.org L: linux-arm-kernel@lists.infradead.org S: Maintained +F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt F: Documentation/devicetree/bindings/soc/fsl/ F: drivers/soc/fsl/ F: include/linux/fsl/ @@@ -6483,7 -6464,7 +6488,7 @@@ M: "Rafael J. Wysocki" <rjw@rjwysocki.n M: Pavel Machek pavel@ucw.cz L: linux-pm@vger.kernel.org S: Supported -F: Documentation/power/freezing-of-tasks.txt +F: Documentation/power/freezing-of-tasks.rst F: include/linux/freezer.h F: kernel/freezer.c
@@@ -6716,9 -6697,7 +6721,7 @@@ M: Paul Bolle <pebolle@tiscali.nl L: gigaset307x-common@lists.sourceforge.net W: http://gigaset307x.sourceforge.net/ S: Odd Fixes - F: Documentation/isdn/README.gigaset - F: drivers/isdn/gigaset/ - F: include/uapi/linux/gigaset_dev.h + F: drivers/staging/isdn/gigaset/
GNSS SUBSYSTEM M: Johan Hovold johan@kernel.org @@@ -6730,7 -6709,7 +6733,7 @@@ F: drivers/gnss F: include/linux/gnss.h
GO7007 MPEG CODEC -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org S: Maintained F: drivers/media/usb/go7007/ @@@ -7034,7 -7013,7 +7037,7 @@@ F: drivers/media/usb/hdpvr HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER M: Jerry Hoemann jerry.hoemann@hpe.com S: Supported -F: Documentation/watchdog/hpwdt.txt +F: Documentation/watchdog/hpwdt.rst F: drivers/watchdog/hpwdt.c
HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa) @@@ -7217,7 -7196,7 +7220,7 @@@ F: drivers/net/ethernet/hp/hp100. HPET: High Precision Event Timers driver M: Clemens Ladisch clemens@ladisch.de S: Maintained -F: Documentation/timers/hpet.txt +F: Documentation/timers/hpet.rst F: drivers/char/hpet.c F: include/linux/hpet.h F: include/uapi/linux/hpet.h @@@ -7635,7 -7614,7 +7638,7 @@@ IDE/ATAPI DRIVER M: Borislav Petkov bp@alien8.de L: linux-ide@vger.kernel.org S: Maintained -F: Documentation/cdrom/ide-cd +F: Documentation/cdrom/ide-cd.rst F: drivers/ide/ide-cd*
IDEAPAD LAPTOP EXTRAS DRIVER @@@ -7826,34 -7805,7 +7829,34 @@@ INGENIC JZ4780 NAND DRIVE M: Harvey Hunt harveyhuntnexus@gmail.com L: linux-mtd@lists.infradead.org S: Maintained -F: drivers/mtd/nand/raw/jz4780_* +F: drivers/mtd/nand/raw/ingenic/ + +INGENIC JZ47xx SoCs +M: Paul Cercueil paul@crapouillou.net +S: Maintained +F: arch/mips/boot/dts/ingenic/ +F: arch/mips/include/asm/mach-jz4740/ +F: arch/mips/jz4740/ +F: drivers/clk/ingenic/ +F: drivers/dma/dma-jz4780.c +F: drivers/gpu/drm/ingenic/ +F: drivers/i2c/busses/i2c-jz4780.c +F: drivers/iio/adc/ingenic-adc.c +F: drivers/irqchip/irq-ingenic.c +F: drivers/memory/jz4780-nemc.c +F: drivers/mmc/host/jz4740_mmc.c +F: drivers/mtd/nand/raw/ingenic/ +F: drivers/pinctrl/pinctrl-ingenic.c +F: drivers/power/supply/ingenic-battery.c +F: drivers/pwm/pwm-jz4740.c +F: drivers/rtc/rtc-jz4740.c +F: drivers/tty/serial/8250/8250_ingenic.c +F: drivers/usb/musb/jz4740.c +F: drivers/watchdog/jz4740_wdt.c +F: include/dt-bindings/iio/adc/ingenic,adc.h +F: include/linux/mfd/ingenic-tcu.h +F: sound/soc/jz4740/ +F: sound/soc/codecs/jz47*
INOTIFY M: Jan Kara jack@suse.cz @@@ -7975,7 -7927,7 +7978,7 @@@ INTEL FRAMEBUFFER DRIVER (excluding 81 M: Maik Broemme mbroemme@libmpq.org L: linux-fbdev@vger.kernel.org S: Maintained -F: Documentation/fb/intelfb.txt +F: Documentation/fb/intelfb.rst F: drivers/video/fbdev/intelfb/
INTEL GPIO DRIVERS @@@ -8427,18 -8379,26 +8430,26 @@@ S: Supporte W: http://www.linux-iscsi.org F: drivers/infiniband/ulp/isert
- ISDN SUBSYSTEM + ISDN/mISDN SUBSYSTEM M: Karsten Keil isdn@linux-pingi.de L: isdn4linux@listserv.isdn4linux.de (subscribers-only) L: netdev@vger.kernel.org W: http://www.isdn4linux.de - T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git S: Maintained + F: drivers/isdn/mISDN + F: drivers/isdn/hardware + + ISDN/CAPI SUBSYSTEM + M: Karsten Keil isdn@linux-pingi.de + L: isdn4linux@listserv.isdn4linux.de (subscribers-only) + L: netdev@vger.kernel.org + W: http://www.isdn4linux.de + S: Odd Fixes F: Documentation/isdn/ - F: drivers/isdn/ - F: include/linux/isdn.h + F: drivers/isdn/capi/ + F: drivers/staging/isdn/ + F: net/bluetooth/cmtp/ F: include/linux/isdn/ - F: include/uapi/linux/isdn.h F: include/uapi/linux/isdn/
IT87 HARDWARE MONITORING DRIVER @@@ -9688,17 -9648,6 +9699,17 @@@ L: linux-iio@vger.kernel.or S: Maintained F: drivers/iio/dac/cio-dac.c
+MEDIA CONTROLLER FRAMEWORK +M: Sakari Ailus sakari.ailus@linux.intel.com +M: Laurent Pinchart laurent.pinchart@ideasonboard.com +L: linux-media@vger.kernel.org +W: https://www.linuxtv.org +T: git git://linuxtv.org/media_tree.git +S: Supported +F: drivers/media/mc/ +F: include/media/media-*.h +F: include/uapi/linux/media.h + MEDIA DRIVERS FOR ASCOT2E M: Sergey Kozlov serjk@netup.ru M: Abylay Ospan aospan@netup.ru @@@ -10020,13 -9969,6 +10031,13 @@@ L: linux-wireless@vger.kernel.or S: Maintained F: drivers/net/wireless/mediatek/mt7601u/
+MEDIATEK MT7621/28/88 I2C DRIVER +M: Stefan Roese sr@denx.de +L: linux-i2c@vger.kernel.org +S: Maintained +F: drivers/i2c/busses/i2c-mt7621.c +F: Documentation/devicetree/bindings/i2c/i2c-mt7621.txt + MEDIATEK NAND CONTROLLER DRIVER M: Xiaolei Li xiaolei.li@mediatek.com L: linux-mtd@lists.infradead.org @@@ -10172,6 -10114,7 +10183,7 @@@ Q: http://patchwork.ozlabs.org/project/ S: Supported F: drivers/net/ethernet/mellanox/mlx5/core/ F: include/linux/mlx5/ + F: Documentation/networking/device_drivers/mellanox/
MELLANOX MLX5 IB driver M: Leon Romanovsky leonro@mellanox.com @@@ -10282,7 -10225,7 +10294,7 @@@ F: drivers/watchdog/menz69_wdt.
MESON AO CEC DRIVER FOR AMLOGIC SOCS M: Neil Armstrong narmstrong@baylibre.com -L: linux-media@lists.freedesktop.org +L: linux-media@vger.kernel.org L: linux-amlogic@lists.infradead.org W: http://linux-meson.com/ S: Supported @@@ -10298,14 -10241,6 +10310,14 @@@ S: Maintaine F: drivers/mtd/nand/raw/meson_* F: Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
+MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS +M: Maxime Jourdan mjourdan@baylibre.com +L: linux-media@vger.kernel.org +L: linux-amlogic@lists.infradead.org +S: Supported +F: drivers/staging/media/meson/vdec/ +T: git git://linuxtv.org/media_tree.git + METHODE UDPU SUPPORT M: Vladimir Vid vladimir.vid@sartura.hr S: Maintained @@@ -10359,9 -10294,7 +10371,9 @@@ MICROCHIP ISC DRIVE M: Eugen Hristev eugen.hristev@microchip.com L: linux-media@vger.kernel.org S: Supported -F: drivers/media/platform/atmel/atmel-isc.c +F: drivers/media/platform/atmel/atmel-sama5d2-isc.c +F: drivers/media/platform/atmel/atmel-isc.h +F: drivers/media/platform/atmel/atmel-isc-base.c F: drivers/media/platform/atmel/atmel-isc-regs.h F: Documentation/devicetree/bindings/media/atmel-isc.txt
@@@ -10921,6 -10854,14 +10933,6 @@@ F: driver/net/net_failover. F: include/net/net_failover.h F: Documentation/networking/net_failover.rst
-NETEFFECT IWARP RNIC DRIVER (IW_NES) -M: Faisal Latif faisal.latif@intel.com -L: linux-rdma@vger.kernel.org -W: http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-Cluster-... -S: Supported -F: drivers/infiniband/hw/nes/ -F: include/uapi/rdma/nes-abi.h - NETEM NETWORK EMULATOR M: Stephen Hemminger stephen@networkplumber.org L: netem@lists.linux-foundation.org (moderated for non-subscribers) @@@ -10937,7 -10878,7 +10949,7 @@@ F: drivers/net/ethernet/neterion
NETFILTER M: Pablo Neira Ayuso pablo@netfilter.org - M: Jozsef Kadlecsik kadlec@blackhole.kfki.hu + M: Jozsef Kadlecsik kadlec@netfilter.org M: Florian Westphal fw@strlen.de L: netfilter-devel@vger.kernel.org L: coreteam@netfilter.org @@@ -11150,6 -11091,15 +11162,15 @@@ L: netdev@vger.kernel.or S: Supported F: drivers/net/ethernet/qlogic/netxen/
+ NEXTHOP + M: David Ahern dsahern@kernel.org + L: netdev@vger.kernel.org + S: Maintained + F: include/net/nexthop.h + F: include/uapi/linux/nexthop.h + F: include/net/netns/nexthop.h + F: net/ipv4/nexthop.c + NFC SUBSYSTEM L: netdev@vger.kernel.org S: Orphan @@@ -11353,7 -11303,7 +11374,7 @@@ NXP FXAS21002C DRIVE M: Rui Miguel Silva rmfrfs@gmail.com L: linux-iio@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/iio/gyroscope/fxas21002c.txt +F: Documentation/devicetree/bindings/iio/gyroscope/nxp,fxas21002c.txt F: drivers/iio/gyro/fxas21002c_core.c F: drivers/iio/gyro/fxas21002c.h F: drivers/iio/gyro/fxas21002c_i2c.c @@@ -11755,13 -11705,11 +11776,13 @@@ F: drivers/scsi/st.
OP-TEE DRIVER M: Jens Wiklander jens.wiklander@linaro.org +L: tee-dev@lists.linaro.org S: Maintained F: drivers/tee/optee/
OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER M: Sumit Garg sumit.garg@linaro.org +L: tee-dev@lists.linaro.org S: Maintained F: drivers/char/hw_random/optee-rng.c
@@@ -11848,7 -11796,7 +11869,7 @@@ S: Maintaine T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git F: drivers/opp/ F: include/linux/pm_opp.h -F: Documentation/power/opp.txt +F: Documentation/power/opp.rst F: Documentation/devicetree/bindings/opp/
OPL4 DRIVER @@@ -12227,7 -12175,7 +12248,7 @@@ M: Sam Bobroff <sbobroff@linux.ibm.com M: Oliver O'Halloran oohall@gmail.com L: linuxppc-dev@lists.ozlabs.org S: Supported -F: Documentation/PCI/pci-error-recovery.txt +F: Documentation/PCI/pci-error-recovery.rst F: drivers/pci/pcie/aer.c F: drivers/pci/pcie/dpc.c F: drivers/pci/pcie/err.c @@@ -12240,7 -12188,7 +12261,7 @@@ PCI ERROR RECOVER M: Linas Vepstas linasvepstas@gmail.com L: linux-pci@vger.kernel.org S: Supported -F: Documentation/PCI/pci-error-recovery.txt +F: Documentation/PCI/pci-error-recovery.rst
PCI MSI DRIVER FOR ALTERA MSI IP M: Ley Foon Tan lftan@altera.com @@@ -12733,7 -12681,7 +12754,7 @@@ M: Rodolfo Giometti <giometti@enneenne. W: http://wiki.enneenne.com/index.php/LinuxPPS_support L: linuxpps@ml.enneenne.com (subscribers-only) S: Maintained -F: Documentation/pps/ +F: Documentation/driver-api/pps.rst F: Documentation/devicetree/bindings/pps/pps-gpio.txt F: Documentation/ABI/testing/sysfs-pps F: drivers/pps/ @@@ -12839,7 -12787,7 +12860,7 @@@ L: netdev@vger.kernel.or S: Maintained W: http://linuxptp.sourceforge.net/ F: Documentation/ABI/testing/sysfs-ptp -F: Documentation/ptp/* +F: Documentation/driver-api/ptp.rst F: drivers/net/phy/dp83640* F: drivers/ptp/* F: include/linux/ptp_cl* @@@ -13127,7 -13075,7 +13148,7 @@@ M: Niklas Cassel <niklas.cassel@linaro. L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c -F: Documentation/devicetree/bindings/net/qcom,dwmac.txt +F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
QUALCOMM GENERIC INTERFACE I2C DRIVER M: Alok Chauhan alokc@codeaurora.org @@@ -13571,11 -13519,11 +13592,11 @@@ S: Maintaine F: drivers/media/platform/rockchip/rga/ F: Documentation/devicetree/bindings/media/rockchip-rga.txt
-ROCKCHIP VPU CODEC DRIVER +HANTRO VPU CODEC DRIVER M: Ezequiel Garcia ezequiel@collabora.com L: linux-media@vger.kernel.org S: Maintained -F: drivers/staging/media/platform/rockchip/vpu/ +F: drivers/staging/media/platform/hantro/ F: Documentation/devicetree/bindings/media/rockchip-vpu.txt
ROCKER DRIVER @@@ -13776,7 -13724,7 +13797,7 @@@ L: linux-s390@vger.kernel.or L: kvm@vger.kernel.org S: Supported F: drivers/s390/cio/vfio_ccw* -F: Documentation/s390/vfio-ccw.txt +F: Documentation/s390/vfio-ccw.rst F: include/uapi/linux/vfio_ccw.h
S390 ZCRYPT DRIVER @@@ -13796,7 -13744,7 +13817,7 @@@ S: Supporte F: drivers/s390/crypto/vfio_ap_drv.c F: drivers/s390/crypto/vfio_ap_private.h F: drivers/s390/crypto/vfio_ap_ops.c -F: Documentation/s390/vfio-ap.txt +F: Documentation/s390/vfio-ap.rst
S390 ZFCP DRIVER M: Steffen Maier maier@linux.ibm.com @@@ -14433,7 -14381,7 +14454,7 @@@ M: Sudip Mukherjee <sudip.mukherjee@cod L: linux-fbdev@vger.kernel.org S: Maintained F: drivers/video/fbdev/sm712* -F: Documentation/fb/sm712fb.txt +F: Documentation/fb/sm712fb.rst
SIMPLE FIRMWARE INTERFACE (SFI) M: Len Brown lenb@kernel.org @@@ -14503,7 -14451,7 +14524,7 @@@ SIS FRAMEBUFFER DRIVE M: Thomas Winischhofer thomas@winischhofer.net W: http://www.winischhofer.net/linuxsisvga.shtml S: Maintained -F: Documentation/fb/sisfb.txt +F: Documentation/fb/sisfb.rst F: drivers/video/fbdev/sis/ F: include/video/sisfb.h
@@@ -15539,7 -15487,6 +15560,7 @@@ F: include/media/i2c/tw9910.
TEE SUBSYSTEM M: Jens Wiklander jens.wiklander@linaro.org +L: tee-dev@lists.linaro.org S: Maintained F: include/linux/tee_drv.h F: include/uapi/linux/tee.h @@@ -15694,7 -15641,7 +15715,7 @@@ M: Viresh Kumar <viresh.kumar@linaro.or M: Javi Merino javi.merino@kernel.org L: linux-pm@vger.kernel.org S: Supported -F: Documentation/thermal/cpu-cooling-api.txt +F: Documentation/thermal/cpu-cooling-api.rst F: drivers/thermal/cpu_cooling.c F: include/linux/cpu_cooling.h
@@@ -16692,7 -16639,7 +16713,7 @@@ M: Michal Januszewski <spock@gentoo.org L: linux-fbdev@vger.kernel.org W: https://github.com/mjanusz/v86d S: Maintained -F: Documentation/fb/uvesafb.txt +F: Documentation/fb/uvesafb.rst F: drivers/video/fbdev/uvesafb.*
VF610 NAND DRIVER @@@ -16767,7 -16714,7 +16788,7 @@@ S: Maintaine F: drivers/net/ethernet/via/via-velocity.*
VICODEC VIRTUAL CODEC DRIVER -M: Hans Verkuil hans.verkuil@cisco.com +M: Hans Verkuil hverkuil-cisco@xs4all.nl L: linux-media@vger.kernel.org T: git git://linuxtv.org/media_tree.git W: https://linuxtv.org @@@ -16790,7 -16737,6 +16811,7 @@@ VIDEOBUF2 FRAMEWOR M: Pawel Osciak pawel@osciak.com M: Marek Szyprowski m.szyprowski@samsung.com M: Kyungmin Park kyungmin.park@samsung.com +R: Tomasz Figa tfiga@chromium.org L: linux-media@vger.kernel.org S: Maintained F: drivers/media/common/videobuf2/* @@@ -17349,7 -17295,6 +17370,7 @@@ N: xd XDP SOCKETS (AF_XDP) M: Björn Töpel bjorn.topel@intel.com M: Magnus Karlsson magnus.karlsson@intel.com +R: Jonathan Lemon jonathan.lemon@gmail.com L: netdev@vger.kernel.org L: bpf@vger.kernel.org S: Maintained @@@ -17443,13 -17388,7 +17464,13 @@@ W: http://xfs.org T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git S: Supported F: Documentation/filesystems/xfs.txt +F: Documentation/ABI/testing/sysfs-fs-xfs +F: Documentation/filesystems/xfs.txt +F: Documentation/filesystems/xfs-delayed-logging-design.txt +F: Documentation/filesystems/xfs-self-describing-metadata.txt F: fs/xfs/ +F: include/uapi/linux/dqblk_xfs.h +F: include/uapi/linux/fsmap.h
XILINX AXI ETHERNET DRIVER M: Anirudha Sarangi anirudh@xilinx.com diff --combined arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index e83c080acc8b,22a1c74dddf3..7975519b4f56 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@@ -70,27 -70,6 +70,27 @@@ clock-output-names = "sysclk"; };
+ dpclk: clock-dp { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <27000000>; + clock-output-names= "dpclk"; + }; + + aclk: clock-axi { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <650000000>; + clock-output-names= "aclk"; + }; + + pclk: clock-apb { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <650000000>; + clock-output-names= "pclk"; + }; + reboot { compatible ="syscon-reboot"; regmap = <&dcfg>; @@@ -306,24 -285,13 +306,24 @@@ #interrupt-cells = <2>; };
- wdog0: watchdog@23c0000 { - compatible = "fsl,ls1028a-wdt", "fsl,imx21-wdt"; - reg = <0x0 0x23c0000 0x0 0x10000>; - interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&clockgen 4 1>; - big-endian; - status = "disabled"; + usb0: usb@3100000 { + compatible = "fsl,ls1028a-dwc3", "snps,dwc3"; + reg = <0x0 0x3100000 0x0 0x10000>; + interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>; + dr_mode = "host"; + snps,dis_rxdet_inp3_quirk; + snps,quirk-frame-length-adjustment = <0x20>; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; + }; + + usb1: usb@3110000 { + compatible = "fsl,ls1028a-dwc3", "snps,dwc3"; + reg = <0x0 0x3110000 0x0 0x10000>; + interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>; + dr_mode = "host"; + snps,dis_rxdet_inp3_quirk; + snps,quirk-frame-length-adjustment = <0x20>; + snps,incr-burst-type-adjustment = <1>, <4>, <8>, <16>; };
sata: sata@3200000 { @@@ -388,79 -356,6 +388,79 @@@ <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>; };
+ crypto: crypto@8000000 { + compatible = "fsl,sec-v5.0", "fsl,sec-v4.0"; + fsl,sec-era = <10>; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0x0 0x00 0x8000000 0x100000>; + reg = <0x00 0x8000000 0x0 0x100000>; + interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + dma-coherent; + + sec_jr0: jr@10000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x10000 0x10000>; + interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>; + }; + + sec_jr1: jr@20000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x20000 0x10000>; + interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>; + }; + + sec_jr2: jr@30000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x30000 0x10000>; + interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>; + }; + + sec_jr3: jr@40000 { + compatible = "fsl,sec-v5.0-job-ring", + "fsl,sec-v4.0-job-ring"; + reg = <0x40000 0x10000>; + interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + qdma: dma-controller@8380000 { + compatible = "fsl,ls1028a-qdma", "fsl,ls1021a-qdma"; + reg = <0x0 0x8380000 0x0 0x1000>, /* Controller regs */ + <0x0 0x8390000 0x0 0x10000>, /* Status regs */ + <0x0 0x83a0000 0x0 0x40000>; /* Block regs */ + interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 253 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "qdma-error", "qdma-queue0", + "qdma-queue1", "qdma-queue2", "qdma-queue3"; + dma-channels = <8>; + block-number = <1>; + block-offset = <0x10000>; + fsl,dma-queues = <2>; + status-sizes = <64>; + queue-sizes = <64 64>; + }; + + cluster1_core0_watchdog: watchdog@c000000 { + compatible = "arm,sp805", "arm,primecell"; + reg = <0x0 0xc000000 0x0 0x1000>; + clocks = <&clockgen 4 15>, <&clockgen 4 15>; + clock-names = "apb_pclk", "wdog_clk"; + }; + + cluster1_core1_watchdog: watchdog@c010000 { + compatible = "arm,sp805", "arm,primecell"; + reg = <0x0 0xc010000 0x0 0x1000>; + clocks = <&clockgen 4 15>, <&clockgen 4 15>; + clock-names = "apb_pclk", "wdog_clk"; + }; + sai1: audio-controller@f100000 { #sound-dai-cells = <0>; compatible = "fsl,vf610-sai"; @@@ -536,23 -431,12 +536,29 @@@ compatible = "fsl,enetc"; reg = <0x000100 0 0 0 0>; }; + ethernet@0,4 { + compatible = "fsl,enetc-ptp"; + reg = <0x000400 0 0 0 0>; + clocks = <&clockgen 4 0>; + little-endian; + }; }; }; + + malidp0: display@f080000 { + compatible = "arm,mali-dp500"; + reg = <0x0 0xf080000 0x0 0x10000>; + interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>, + <0 223 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "DE", "SE"; + clocks = <&dpclk>, <&aclk>, <&aclk>, <&pclk>; + clock-names = "pxlclk", "mclk", "aclk", "pclk"; + arm,malidp-output-port-lines = /bits/ 8 <8 8 8>; + + port { + dp0_out: endpoint { + + }; + }; + }; }; diff --combined arch/mips/configs/malta_defconfig index c9c4145c6fc0,0de92ac1ca64..59eedf55419d --- a/arch/mips/configs/malta_defconfig +++ b/arch/mips/configs/malta_defconfig @@@ -210,11 -210,11 +210,10 @@@ CONFIG_NET_ACT_NAT= CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m - CONFIG_NET_CLS_IND=y CONFIG_CFG80211=m CONFIG_MAC80211=m CONFIG_MAC80211_MESH=y CONFIG_RFKILL=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=m CONFIG_MTD=y diff --combined arch/mips/configs/malta_kvm_defconfig index 841f19adaec7,efc3abace048..8ef612552a19 --- a/arch/mips/configs/malta_kvm_defconfig +++ b/arch/mips/configs/malta_kvm_defconfig @@@ -215,11 -215,11 +215,10 @@@ CONFIG_NET_ACT_NAT= CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m - CONFIG_NET_CLS_IND=y CONFIG_CFG80211=m CONFIG_MAC80211=m CONFIG_MAC80211_MESH=y CONFIG_RFKILL=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=m CONFIG_MTD=y diff --combined arch/mips/configs/malta_kvm_guest_defconfig index 764ba62f7a5c,c6ceeca4394d..d2a008c9907c --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ b/arch/mips/configs/malta_kvm_guest_defconfig @@@ -212,11 -212,11 +212,10 @@@ CONFIG_NET_ACT_NAT= CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m - CONFIG_NET_CLS_IND=y CONFIG_CFG80211=m CONFIG_MAC80211=m CONFIG_MAC80211_MESH=y CONFIG_RFKILL=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=m CONFIG_MTD=y diff --combined arch/mips/configs/maltaup_xpa_defconfig index de5bb1c9aeb8,56861aef2756..970df6d42728 --- a/arch/mips/configs/maltaup_xpa_defconfig +++ b/arch/mips/configs/maltaup_xpa_defconfig @@@ -212,11 -212,11 +212,10 @@@ CONFIG_NET_ACT_NAT= CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m - CONFIG_NET_CLS_IND=y CONFIG_CFG80211=m CONFIG_MAC80211=m CONFIG_MAC80211_MESH=y CONFIG_RFKILL=m -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_CONNECTOR=m diff --combined arch/mips/configs/rb532_defconfig index 97d96117e11a,864c70fbe668..5b947183852b --- a/arch/mips/configs/rb532_defconfig +++ b/arch/mips/configs/rb532_defconfig @@@ -103,8 -103,8 +103,7 @@@ CONFIG_GACT_PROB= CONFIG_NET_ACT_MIRRED=m CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_PEDIT=m - CONFIG_NET_CLS_IND=y CONFIG_HAMRADIO=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_BLOCK2MTD=y diff --combined arch/powerpc/configs/ppc6xx_defconfig index 463aa3e53084,aa51b9b66fa2..1c074fb95df2 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@@ -301,7 -301,6 +301,6 @@@ CONFIG_NET_ACT_NAT= CONFIG_NET_ACT_PEDIT=m CONFIG_NET_ACT_SIMP=m CONFIG_NET_ACT_SKBEDIT=m - CONFIG_NET_CLS_IND=y CONFIG_IRDA=m CONFIG_IRLAN=m CONFIG_IRNET=m @@@ -1124,7 -1123,6 +1123,7 @@@ CONFIG_NLS_KOI8_R= CONFIG_NLS_KOI8_U=m CONFIG_DEBUG_INFO=y CONFIG_UNUSED_SYMBOLS=y +CONFIG_HEADERS_INSTALL=y CONFIG_HEADERS_CHECK=y CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y diff --combined arch/sh/configs/se7712_defconfig index 6ac7d362e106,1e116529735f..9a527f978106 --- a/arch/sh/configs/se7712_defconfig +++ b/arch/sh/configs/se7712_defconfig @@@ -63,7 -63,7 +63,6 @@@ CONFIG_NET_SCH_NETEM= CONFIG_NET_CLS_TCINDEX=y CONFIG_NET_CLS_ROUTE4=y CONFIG_NET_CLS_FW=y - CONFIG_NET_CLS_IND=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y diff --combined arch/sh/configs/se7721_defconfig index ffd15acc2a04,c66e512719ab..3b0e1eb6e874 --- a/arch/sh/configs/se7721_defconfig +++ b/arch/sh/configs/se7721_defconfig @@@ -62,7 -62,7 +62,6 @@@ CONFIG_NET_SCH_NETEM= CONFIG_NET_CLS_TCINDEX=y CONFIG_NET_CLS_ROUTE4=y CONFIG_NET_CLS_FW=y - CONFIG_NET_CLS_IND=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_MTD=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y diff --combined arch/sh/configs/titan_defconfig index 1c1c78e74fbb,171ab05ce4fc..4ec961ace688 --- a/arch/sh/configs/titan_defconfig +++ b/arch/sh/configs/titan_defconfig @@@ -142,7 -142,7 +142,6 @@@ CONFIG_GACT_PROB= CONFIG_NET_ACT_MIRRED=m CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_PEDIT=m - CONFIG_NET_CLS_IND=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_FW_LOADER=m CONFIG_CONNECTOR=m CONFIG_MTD=m diff --combined drivers/infiniband/hw/cxgb4/cm.c index 0147c407ac6c,09fcfc9e052d..e87fc0408470 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@@ -953,7 -953,7 +953,7 @@@ static int send_mpa_req(struct c4iw_ep mpalen = sizeof(*mpa) + ep->plen; if (mpa_rev_to_use == 2) mpalen += sizeof(struct mpa_v2_conn_params); - wrlen = roundup(mpalen + sizeof *req, 16); + wrlen = roundup(mpalen + sizeof(*req), 16); skb = get_skb(skb, wrlen, GFP_KERNEL); if (!skb) { connect_reply_upcall(ep, -ENOMEM); @@@ -997,9 -997,8 +997,9 @@@ }
if (mpa_rev_to_use == 2) { - mpa->private_data_size = htons(ntohs(mpa->private_data_size) + - sizeof (struct mpa_v2_conn_params)); + mpa->private_data_size = + htons(ntohs(mpa->private_data_size) + + sizeof(struct mpa_v2_conn_params)); pr_debug("initiator ird %u ord %u\n", ep->ird, ep->ord); mpa_v2_params.ird = htons((u16)ep->ird); @@@ -1058,7 -1057,7 +1058,7 @@@ static int send_mpa_reject(struct c4iw_ mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) mpalen += sizeof(struct mpa_v2_conn_params); - wrlen = roundup(mpalen + sizeof *req, 16); + wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { @@@ -1089,9 -1088,8 +1089,9 @@@
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; - mpa->private_data_size = htons(ntohs(mpa->private_data_size) + - sizeof (struct mpa_v2_conn_params)); + mpa->private_data_size = + htons(ntohs(mpa->private_data_size) + + sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons(((u16)ep->ird) | (peer2peer ? MPA_V2_PEER2PEER_MODEL : 0)); @@@ -1138,7 -1136,7 +1138,7 @@@ static int send_mpa_reply(struct c4iw_e mpalen = sizeof(*mpa) + plen; if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) mpalen += sizeof(struct mpa_v2_conn_params); - wrlen = roundup(mpalen + sizeof *req, 16); + wrlen = roundup(mpalen + sizeof(*req), 16);
skb = get_skb(NULL, wrlen, GFP_KERNEL); if (!skb) { @@@ -1173,9 -1171,8 +1173,9 @@@
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { mpa->flags |= MPA_ENHANCED_RDMA_CONN; - mpa->private_data_size = htons(ntohs(mpa->private_data_size) + - sizeof (struct mpa_v2_conn_params)); + mpa->private_data_size = + htons(ntohs(mpa->private_data_size) + + sizeof(struct mpa_v2_conn_params)); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); if (peer2peer && (ep->mpa_attr.p2p_type != @@@ -3233,17 -3230,22 +3233,22 @@@ static int pick_local_ipaddrs(struct c4 int found = 0; struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; + const struct in_ifaddr *ifa;
ind = in_dev_get(dev->rdev.lldi.ports[0]); if (!ind) return -EADDRNOTAVAIL; - for_primary_ifa(ind) { + rcu_read_lock(); + in_dev_for_each_ifa_rcu(ifa, ind) { + if (ifa->ifa_flags & IFA_F_SECONDARY) + continue; laddr->sin_addr.s_addr = ifa->ifa_address; raddr->sin_addr.s_addr = ifa->ifa_address; found = 1; break; } - endfor_ifa(ind); + rcu_read_unlock(); + in_dev_put(ind); return found ? 0 : -EADDRNOTAVAIL; } diff --combined drivers/infiniband/hw/mlx5/cq.c index 22230fd7d741,0220736b073e..bfe3efdd77d7 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@@ -522,9 -522,9 +522,9 @@@ repoll case MLX5_CQE_SIG_ERR: sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
- read_lock(&dev->mdev->priv.mkey_table.lock); - mmkey = __mlx5_mr_lookup(dev->mdev, - mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); + xa_lock(&dev->mdev->priv.mkey_table); + mmkey = xa_load(&dev->mdev->priv.mkey_table, + mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); mr = to_mibmr(mmkey); get_sig_err_item(sig_err_cqe, &mr->sig->err_item); mr->sig->sig_err_exists = true; @@@ -537,7 -537,7 +537,7 @@@ mr->sig->err_item.expected, mr->sig->err_item.actual);
- read_unlock(&dev->mdev->priv.mkey_table.lock); + xa_unlock(&dev->mdev->priv.mkey_table); goto repoll; }
@@@ -884,14 -884,14 +884,14 @@@ static void notify_soft_wc_handler(stru cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); }
-struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, - const struct ib_cq_init_attr *attr, - struct ib_udata *udata) +int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata) { + struct ib_device *ibdev = ibcq->device; int entries = attr->cqe; int vector = attr->comp_vector; struct mlx5_ib_dev *dev = to_mdev(ibdev); - struct mlx5_ib_cq *cq; + struct mlx5_ib_cq *cq = to_mcq(ibcq); int uninitialized_var(index); int uninitialized_var(inlen); u32 *cqb = NULL; @@@ -903,14 -903,18 +903,14 @@@
if (entries < 0 || (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) - return ERR_PTR(-EINVAL); + return -EINVAL;
if (check_cq_create_flags(attr->flags)) - return ERR_PTR(-EOPNOTSUPP); + return -EOPNOTSUPP;
entries = roundup_pow_of_two(entries + 1); if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) - return ERR_PTR(-EINVAL); - - cq = kzalloc(sizeof(*cq), GFP_KERNEL); - if (!cq) - return ERR_PTR(-ENOMEM); + return -EINVAL;
cq->ibcq.cqe = entries - 1; mutex_init(&cq->resize_mutex); @@@ -925,13 -929,13 +925,13 @@@ err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, &index, &inlen); if (err) - goto err_create; + return err; } else { cqe_size = cache_line_size() == 128 ? 128 : 64; err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, &index, &inlen); if (err) - goto err_create; + return err;
INIT_WORK(&cq->notify_work, notify_soft_wc_handler); } @@@ -976,7 -980,7 +976,7 @@@
kvfree(cqb); - return &cq->ibcq; + return 0;
err_cmd: mlx5_core_destroy_cq(dev->mdev, &cq->mcq); @@@ -987,10 -991,14 +987,10 @@@ err_cqb destroy_cq_user(cq, udata); else destroy_cq_kernel(dev, cq); - -err_create: - kfree(cq); - - return ERR_PTR(err); + return err; }
-int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) +void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(cq->device); struct mlx5_ib_cq *mcq = to_mcq(cq); @@@ -1000,6 -1008,10 +1000,6 @@@ destroy_cq_user(mcq, udata); else destroy_cq_kernel(dev, mcq); - - kfree(mcq); - - return 0; }
static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) @@@ -1125,6 -1137,11 +1125,6 @@@ static int resize_user(struct mlx5_ib_d return 0; }
-static void un_resize_user(struct mlx5_ib_cq *cq) -{ - ib_umem_release(cq->resize_umem); -} - static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, int entries, int cqe_size) { @@@ -1147,6 -1164,12 +1147,6 @@@ ex return err; }
-static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) -{ - free_cq_buf(dev, cq->resize_buf); - cq->resize_buf = NULL; -} - static int copy_resize_cqes(struct mlx5_ib_cq *cq) { struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); @@@ -1327,11 -1350,10 +1327,11 @@@ ex_alloc kvfree(in);
ex_resize: - if (udata) - un_resize_user(cq); - else - un_resize_kernel(dev, cq); + ib_umem_release(cq->resize_umem); + if (!udata) { + free_cq_buf(dev, cq->resize_buf); + cq->resize_buf = NULL; + } ex: mutex_unlock(&cq->resize_mutex); return err; diff --combined drivers/infiniband/hw/mlx5/main.c index 05d2bfcb3d60,b1d5f4382d85..7037b53fccc7 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@@ -52,7 -52,6 +52,7 @@@ #include <linux/mlx5/port.h> #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> +#include <linux/mlx5/eswitch.h> #include <linux/list.h> #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> @@@ -889,7 -888,7 +889,7 @@@ static int mlx5_ib_query_device(struct } props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; if (MLX5_CAP_GEN(mdev, sho)) { - props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; + props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; /* At this stage no support for signature handover */ props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | IB_PROT_T10DIF_TYPE_2 | @@@ -1009,8 -1008,6 +1009,8 @@@ props->max_srq_sge = max_rq_sg - 1; props->max_fast_reg_page_list_len = 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); + props->max_pi_fast_reg_page_list_len = + props->max_fast_reg_page_list_len / 2; get_atomic_caps_qp(dev, props); props->masked_atomic_cap = IB_ATOMIC_NONE; props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); @@@ -2669,11 -2666,15 +2669,15 @@@ int parse_flow_flow_action(struct mlx5_ } }
- static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, - u32 *match_v, const union ib_flow_spec *ib_spec, + static int parse_flow_attr(struct mlx5_core_dev *mdev, + struct mlx5_flow_spec *spec, + const union ib_flow_spec *ib_spec, const struct ib_flow_attr *flow_attr, struct mlx5_flow_act *action, u32 prev_type) { + struct mlx5_flow_context *flow_context = &spec->flow_context; + u32 *match_c = spec->match_criteria; + u32 *match_v = spec->match_value; void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, @@@ -2992,8 -2993,8 +2996,8 @@@ if (ib_spec->flow_tag.tag_id >= BIT(24)) return -EINVAL;
- action->flow_tag = ib_spec->flow_tag.tag_id; - action->flags |= FLOW_ACT_HAS_TAG; + flow_context->flow_tag = ib_spec->flow_tag.tag_id; + flow_context->flags |= FLOW_CONTEXT_HAS_TAG; break; case IB_FLOW_SPEC_ACTION_DROP: if (FIELDS_NOT_SUPPORTED(ib_spec->drop, @@@ -3087,7 -3088,8 +3091,8 @@@ is_valid_esp_aes_gcm(struct mlx5_core_d return VALID_SPEC_NA;
return is_crypto && is_ipsec && - (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ? + (!egress || (!is_drop && + !(spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG))) ? VALID_SPEC_VALID : VALID_SPEC_INVALID; }
@@@ -3255,14 -3257,11 +3260,14 @@@ static struct mlx5_ib_flow_prio *get_fl int max_table_size; int num_entries; int num_groups; + bool esw_encap; u32 flags = 0; int priority;
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size)); + esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != + DEVLINK_ESWITCH_ENCAP_MODE_NONE; if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { enum mlx5_flow_namespace_type fn_type;
@@@ -3275,10 -3274,10 +3280,10 @@@ if (ft_type == MLX5_IB_FT_RX) { fn_type = MLX5_FLOW_NAMESPACE_BYPASS; prio = &dev->flow_db->prios[priority]; - if (!dev->is_rep && + if (!dev->is_rep && !esw_encap && MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; - if (!dev->is_rep && + if (!dev->is_rep && !esw_encap && MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, reformat_l3_tunnel_to_l2)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; @@@ -3288,7 -3287,7 +3293,7 @@@ log_max_ft_size)); fn_type = MLX5_FLOW_NAMESPACE_EGRESS; prio = &dev->flow_db->egress_prios[priority]; - if (!dev->is_rep && + if (!dev->is_rep && !esw_encap && MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; } @@@ -3470,6 -3469,37 +3475,37 @@@ free return ret; }
+ static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev, + struct mlx5_flow_spec *spec, + struct mlx5_eswitch_rep *rep) + { + struct mlx5_eswitch *esw = dev->mdev->priv.eswitch; + void *misc; + + if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters_2); + + MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, + mlx5_eswitch_get_vport_metadata_for_match(esw, + rep->vport)); + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters_2); + + MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0); + } else { + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + + MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + } + } + static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, struct mlx5_ib_flow_prio *ft_prio, const struct ib_flow_attr *flow_attr, @@@ -3479,7 -3509,7 +3515,7 @@@ { struct mlx5_flow_table *ft = ft_prio->flow_table; struct mlx5_ib_flow_handler *handler; - struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG}; + struct mlx5_flow_act flow_act = {}; struct mlx5_flow_spec *spec; struct mlx5_flow_destination dest_arr[2] = {}; struct mlx5_flow_destination *rule_dst = dest_arr; @@@ -3510,8 -3540,7 +3546,7 @@@ }
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { - err = parse_flow_attr(dev->mdev, spec->match_criteria, - spec->match_value, + err = parse_flow_attr(dev->mdev, spec, ib_flow, flow_attr, &flow_act, prev_type); if (err < 0) @@@ -3525,19 -3554,15 +3560,15 @@@ set_underlay_qp(dev, spec, underlay_qpn);
if (dev->is_rep) { - void *misc; + struct mlx5_eswitch_rep *rep;
- if (!dev->port[flow_attr->port - 1].rep) { + rep = dev->port[flow_attr->port - 1].rep; + if (!rep) { err = -EINVAL; goto free; } - misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, - misc_parameters); - MLX5_SET(fte_match_set_misc, misc, source_port, - dev->port[flow_attr->port - 1].rep->vport); - misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - misc_parameters); - MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + mlx5_ib_set_rule_source_port(dev, spec, rep); }
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); @@@ -3578,11 -3603,11 +3609,11 @@@ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; }
- if ((flow_act.flags & FLOW_ACT_HAS_TAG) && + if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) && (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) { mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n", - flow_act.flow_tag, flow_attr->type); + spec->flow_context.flow_tag, flow_attr->type); err = -EINVAL; goto free; } @@@ -3898,7 -3923,6 +3929,7 @@@ _get_flow_table(struct mlx5_ib_dev *dev struct mlx5_flow_namespace *ns = NULL; struct mlx5_ib_flow_prio *prio = NULL; int max_table_size = 0; + bool esw_encap; u32 flags = 0; int priority;
@@@ -3907,30 -3931,22 +3938,30 @@@ else priority = ib_prio_to_core_prio(fs_matcher->priority, false);
+ esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) != + DEVLINK_ESWITCH_ENCAP_MODE_NONE; if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) { max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size)); - if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap)) + if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, - reformat_l3_tunnel_to_l2)) + reformat_l3_tunnel_to_l2) && + !esw_encap) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS) { max_table_size = BIT( MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size)); - if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat)) + if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) && !esw_encap) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) { max_table_size = BIT( MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap) + flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP; + if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, reformat_l3_tunnel_to_l2) && + esw_encap) + flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; priority = FDB_BYPASS_PATH; }
@@@ -3962,6 -3978,7 +3993,7 @@@ _create_raw_flow_rule(struct mlx5_ib_de struct mlx5_ib_flow_prio *ft_prio, struct mlx5_flow_destination *dst, struct mlx5_ib_flow_matcher *fs_matcher, + struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, void *cmd_in, int inlen, int dst_num) @@@ -3984,6 -4001,7 +4016,7 @@@ memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, fs_matcher->mask_len); spec->match_criteria_enable = fs_matcher->match_criteria_enable; + spec->flow_context = *flow_context;
handler->rule = mlx5_add_flow_rules(ft, spec, flow_act, dst, dst_num); @@@ -4048,6 -4066,7 +4081,7 @@@ static bool raw_fs_is_multicast(struct struct mlx5_ib_flow_handler * mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, + struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, u32 counter_id, void *cmd_in, int inlen, int dest_id, @@@ -4100,7 -4119,8 +4134,8 @@@ dst_num++; }
- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act, + handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, + flow_context, flow_act, cmd_in, inlen, dst_num);
if (IS_ERR(handler)) { @@@ -4906,19 -4926,18 +4941,19 @@@ static int create_dev_resources(struct if (ret) goto error0;
- devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL); - if (IS_ERR(devr->c0)) { - ret = PTR_ERR(devr->c0); + devr->c0 = rdma_zalloc_drv_obj(ibdev, ib_cq); + if (!devr->c0) { + ret = -ENOMEM; goto error1; } - devr->c0->device = &dev->ib_dev; - devr->c0->uobject = NULL; - devr->c0->comp_handler = NULL; - devr->c0->event_handler = NULL; - devr->c0->cq_context = NULL; + + devr->c0->device = &dev->ib_dev; atomic_set(&devr->c0->usecnt, 0);
+ ret = mlx5_ib_create_cq(devr->c0, &cq_attr, NULL); + if (ret) + goto err_create_cq; + devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); if (IS_ERR(devr->x0)) { ret = PTR_ERR(devr->x0); @@@ -5010,8 -5029,6 +5045,8 @@@ error3 mlx5_ib_dealloc_xrcd(devr->x0, NULL); error2: mlx5_ib_destroy_cq(devr->c0, NULL); +err_create_cq: + kfree(devr->c0); error1: mlx5_ib_dealloc_pd(devr->p0, NULL); error0: @@@ -5030,7 -5047,6 +5065,7 @@@ static void destroy_dev_resources(struc mlx5_ib_dealloc_xrcd(devr->x0, NULL); mlx5_ib_dealloc_xrcd(devr->x1, NULL); mlx5_ib_destroy_cq(devr->c0, NULL); + kfree(devr->c0); mlx5_ib_dealloc_pd(devr->p0, NULL); kfree(devr->p0);
@@@ -6063,6 -6079,7 +6098,6 @@@ static int mlx5_ib_stage_init_init(stru if (mlx5_use_mad_ifc(dev)) get_ext_port_caps(dev);
- dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->ib_dev.phys_port_cnt = dev->num_ports; @@@ -6142,13 -6159,8 +6177,13 @@@ static void mlx5_ib_stage_flow_db_clean }
static const struct ib_device_ops mlx5_ib_dev_ops = { + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_MLX5, + .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION, + .add_gid = mlx5_ib_add_gid, .alloc_mr = mlx5_ib_alloc_mr, + .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity, .alloc_pd = mlx5_ib_alloc_pd, .alloc_ucontext = mlx5_ib_alloc_ucontext, .attach_mcast = mlx5_ib_mcg_attach, @@@ -6178,7 -6190,6 +6213,7 @@@ .get_dma_mr = mlx5_ib_get_dma_mr, .get_link_layer = mlx5_ib_port_link_layer, .map_mr_sg = mlx5_ib_map_mr_sg, + .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, .mmap = mlx5_ib_mmap, .modify_cq = mlx5_ib_modify_cq, .modify_device = mlx5_ib_modify_device, @@@ -6203,7 -6214,6 +6238,7 @@@ .resize_cq = mlx5_ib_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), + INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), @@@ -6246,6 -6256,7 +6281,6 @@@ static int mlx5_ib_stage_caps_init(stru struct mlx5_core_dev *mdev = dev->mdev; int err;
- dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; dev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | @@@ -6314,6 -6325,7 +6349,6 @@@ if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops); - dev->ib_dev.driver_id = RDMA_DRIVER_MLX5; ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) diff --combined drivers/infiniband/hw/mlx5/mlx5_ib.h index bdb83fc85f94,1c205c2bd486..32b36394d7c1 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@@ -431,6 -431,9 +431,6 @@@ struct mlx5_ib_qp
int create_type;
- /* Store signature errors */ - bool signature_en; - struct list_head qps_list; struct list_head cq_recv_list; struct list_head cq_send_list; @@@ -584,9 -587,6 +584,9 @@@ struct mlx5_ib_mr void *descs; dma_addr_t desc_map; int ndescs; + int data_length; + int meta_ndescs; + int meta_length; int max_descs; int desc_size; int access_mode; @@@ -605,13 -605,6 +605,13 @@@ int access_flags; /* Needed for rereg MR */
struct mlx5_ib_mr *parent; + /* Needed for IB_MR_TYPE_INTEGRITY */ + struct mlx5_ib_mr *pi_mr; + struct mlx5_ib_mr *klm_mr; + struct mlx5_ib_mr *mtt_mr; + u64 data_iova; + u64 pi_iova; + atomic_t num_leaf_free; wait_queue_head_t q_leaf_free; struct mlx5_async_work cb_work; @@@ -1123,9 -1116,10 +1123,9 @@@ int mlx5_ib_read_user_wqe_rq(struct mlx int buflen, size_t *bc); int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer, int buflen, size_t *bc); -struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, - const struct ib_cq_init_attr *attr, - struct ib_udata *udata); -int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); +int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, + struct ib_udata *udata); +void mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata); int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period); @@@ -1155,15 -1149,8 +1155,15 @@@ int mlx5_ib_rereg_user_mr(struct ib_mr int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg, struct ib_udata *udata); +struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, + u32 max_num_sg, + u32 max_num_meta_sg); int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset); +int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, + int data_sg_nents, unsigned int *data_sg_offset, + struct scatterlist *meta_sg, int meta_sg_nents, + unsigned int *meta_sg_offset); int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad_hdr *in, size_t in_mad_size, @@@ -1215,7 -1202,7 +1215,7 @@@ int mlx5_ib_check_mr_status(struct ib_m struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata); -int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); +void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, u32 wq_attr_mask, struct ib_udata *udata); struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, @@@ -1330,6 -1317,7 +1330,7 @@@ extern const struct uapi_definition mlx extern const struct uapi_definition mlx5_ib_flow_defs[]; struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add( struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher, + struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act, u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type); bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type); diff --combined drivers/infiniband/hw/mlx5/mr.c index 6ac77e09a34a,83b452d977d4..20ece6e0b2fc --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@@ -130,7 -130,7 +130,7 @@@ static void reg_mr_callback(int status struct mlx5_cache_ent *ent = &cache->ent[c]; u8 key; unsigned long flags; - struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table; + struct xarray *mkeys = &dev->mdev->priv.mkey_table; int err;
spin_lock_irqsave(&ent->lock, flags); @@@ -158,12 -158,12 +158,12 @@@ ent->size++; spin_unlock_irqrestore(&ent->lock, flags);
- write_lock_irqsave(&table->lock, flags); - err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key), - &mr->mmkey); + xa_lock_irqsave(mkeys, flags); + err = xa_err(__xa_store(mkeys, mlx5_base_mkey(mr->mmkey.key), + &mr->mmkey, GFP_ATOMIC)); + xa_unlock_irqrestore(mkeys, flags); if (err) pr_err("Error inserting to mkey tree. 0x%x\n", -err); - write_unlock_irqrestore(&table->lock, flags);
if (!completion_done(&ent->compl)) complete(&ent->compl); @@@ -1507,9 -1507,10 +1507,9 @@@ int mlx5_ib_rereg_user_mr(struct ib_mr return 0;
err: - if (mr->umem) { - ib_umem_release(mr->umem); - mr->umem = NULL; - } + ib_umem_release(mr->umem); + mr->umem = NULL; + clean_mr(dev, mr); return err; } @@@ -1605,9 -1606,8 +1605,9 @@@ static void dereg_mr(struct mlx5_ib_de synchronize_srcu(&dev->mr_srcu); /* Destroy all page mappings */ if (umem_odp->page_list) - mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem), - ib_umem_end(umem)); + mlx5_ib_invalidate_range(umem_odp, + ib_umem_start(umem_odp), + ib_umem_end(umem_odp)); else mlx5_ib_free_implicit_mr(mr); /* @@@ -1629,85 -1629,28 +1629,85 @@@ * remove the DMA mapping. */ mlx5_mr_cache_free(dev, mr); - if (umem) { - ib_umem_release(umem); + ib_umem_release(umem); + if (umem) atomic_sub(npages, &dev->mdev->priv.reg_pages); - } + if (!mr->allocated_from_cache) kfree(mr); }
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { - dereg_mr(to_mdev(ibmr->device), to_mmr(ibmr)); + struct mlx5_ib_mr *mmr = to_mmr(ibmr); + + if (ibmr->type == IB_MR_TYPE_INTEGRITY) { + dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr); + dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr); + } + + dereg_mr(to_mdev(ibmr->device), mmr); + return 0; }
-struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, - u32 max_num_sg, struct ib_udata *udata) +static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs, + int access_mode, int page_shift) +{ + void *mkc; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + + MLX5_SET(mkc, mkc, free, 1); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET(mkc, mkc, translations_octword_size, ndescs); + MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3); + MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7); + MLX5_SET(mkc, mkc, umr_en, 1); + MLX5_SET(mkc, mkc, log_page_size, page_shift); +} + +static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, + int ndescs, int desc_size, int page_shift, + int access_mode, u32 *in, int inlen) { struct mlx5_ib_dev *dev = to_mdev(pd->device); + int err; + + mr->access_mode = access_mode; + mr->desc_size = desc_size; + mr->max_descs = ndescs; + + err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size); + if (err) + return err; + + mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift); + + err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); + if (err) + goto err_free_descs; + + mr->mmkey.type = MLX5_MKEY_MR; + mr->ibmr.lkey = mr->mmkey.key; + mr->ibmr.rkey = mr->mmkey.key; + + return 0; + +err_free_descs: + mlx5_free_priv_descs(mr); + return err; +} + +static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, + u32 max_num_sg, u32 max_num_meta_sg, + int desc_size, int access_mode) +{ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); - int ndescs = ALIGN(max_num_sg, 4); + int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4); + int page_shift = 0; struct mlx5_ib_mr *mr; - void *mkc; u32 *in; int err;
@@@ -1715,168 -1658,99 +1715,168 @@@ if (!mr) return ERR_PTR(-ENOMEM);
+ mr->ibmr.pd = pd; + mr->ibmr.device = pd->device; + in = kzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_free; }
+ if (access_mode == MLX5_MKC_ACCESS_MODE_MTT) + page_shift = PAGE_SHIFT; + + err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift, + access_mode, in, inlen); + if (err) + goto err_free_in; + + mr->umem = NULL; + kfree(in); + + return mr; + +err_free_in: + kfree(in); +err_free: + kfree(mr); + return ERR_PTR(err); +} + +static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, + int ndescs, u32 *in, int inlen) +{ + return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt), + PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in, + inlen); +} + +static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, + int ndescs, u32 *in, int inlen) +{ + return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm), + 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); +} + +static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr, + int max_num_sg, int max_num_meta_sg, + u32 *in, int inlen) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + u32 psv_index[2]; + void *mkc; + int err; + + mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); + if (!mr->sig) + return -ENOMEM; + + /* create mem & wire PSVs */ + err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index); + if (err) + goto err_free_sig; + + mr->sig->psv_memory.psv_idx = psv_index[0]; + mr->sig->psv_wire.psv_idx = psv_index[1]; + + mr->sig->sig_status_checked = true; + mr->sig->sig_err_exists = false; + /* Next UMR, Arm SIGERR */ + ++mr->sig->sigerr_count; + mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, + sizeof(struct mlx5_klm), + MLX5_MKC_ACCESS_MODE_KLMS); + if (IS_ERR(mr->klm_mr)) { + err = PTR_ERR(mr->klm_mr); + goto err_destroy_psv; + } + mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg, + sizeof(struct mlx5_mtt), + MLX5_MKC_ACCESS_MODE_MTT); + if (IS_ERR(mr->mtt_mr)) { + err = PTR_ERR(mr->mtt_mr); + goto err_free_klm_mr; + } + + /* Set bsf descriptors for mkey */ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); - MLX5_SET(mkc, mkc, free, 1); - MLX5_SET(mkc, mkc, translations_octword_size, ndescs); - MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); + MLX5_SET(mkc, mkc, bsf_en, 1); + MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
- if (mr_type == IB_MR_TYPE_MEM_REG) { - mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT; - MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); - err = mlx5_alloc_priv_descs(pd->device, mr, - ndescs, sizeof(struct mlx5_mtt)); - if (err) - goto err_free_in; + err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0, + MLX5_MKC_ACCESS_MODE_KLMS, in, inlen); + if (err) + goto err_free_mtt_mr;
- mr->desc_size = sizeof(struct mlx5_mtt); - mr->max_descs = ndescs; - } else if (mr_type == IB_MR_TYPE_SG_GAPS) { - mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; + return 0;
- err = mlx5_alloc_priv_descs(pd->device, mr, - ndescs, sizeof(struct mlx5_klm)); - if (err) - goto err_free_in; - mr->desc_size = sizeof(struct mlx5_klm); - mr->max_descs = ndescs; - } else if (mr_type == IB_MR_TYPE_SIGNATURE) { - u32 psv_index[2]; - - MLX5_SET(mkc, mkc, bsf_en, 1); - MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE); - mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL); - if (!mr->sig) { - err = -ENOMEM; - goto err_free_in; - } +err_free_mtt_mr: + dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr); + mr->mtt_mr = NULL; +err_free_klm_mr: + dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr); + mr->klm_mr = NULL; +err_destroy_psv: + if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx)) + mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", + mr->sig->psv_memory.psv_idx); + if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx)) + mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", + mr->sig->psv_wire.psv_idx); +err_free_sig: + kfree(mr->sig);
- /* create mem & wire PSVs */ - err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, - 2, psv_index); - if (err) - goto err_free_sig; + return err; +} + +static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, + enum ib_mr_type mr_type, u32 max_num_sg, + u32 max_num_meta_sg) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + int ndescs = ALIGN(max_num_sg, 4); + struct mlx5_ib_mr *mr; + u32 *in; + int err;
- mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS; - mr->sig->psv_memory.psv_idx = psv_index[0]; - mr->sig->psv_wire.psv_idx = psv_index[1]; + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM);
- mr->sig->sig_status_checked = true; - mr->sig->sig_err_exists = false; - /* Next UMR, Arm SIGERR */ - ++mr->sig->sigerr_count; - } else { + in = kzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free; + } + + mr->ibmr.device = pd->device; + mr->umem = NULL; + + switch (mr_type) { + case IB_MR_TYPE_MEM_REG: + err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen); + break; + case IB_MR_TYPE_SG_GAPS: + err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen); + break; + case IB_MR_TYPE_INTEGRITY: + err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg, + max_num_meta_sg, in, inlen); + break; + default: mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type); err = -EINVAL; - goto err_free_in; }
- MLX5_SET(mkc, mkc, access_mode_1_0, mr->access_mode & 0x3); - MLX5_SET(mkc, mkc, access_mode_4_2, (mr->access_mode >> 2) & 0x7); - MLX5_SET(mkc, mkc, umr_en, 1); - - mr->ibmr.device = pd->device; - err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); if (err) - goto err_destroy_psv; + goto err_free_in;
- mr->mmkey.type = MLX5_MKEY_MR; - mr->ibmr.lkey = mr->mmkey.key; - mr->ibmr.rkey = mr->mmkey.key; - mr->umem = NULL; kfree(in);
return &mr->ibmr;
-err_destroy_psv: - if (mr->sig) { - if (mlx5_core_destroy_psv(dev->mdev, - mr->sig->psv_memory.psv_idx)) - mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", - mr->sig->psv_memory.psv_idx); - if (mlx5_core_destroy_psv(dev->mdev, - mr->sig->psv_wire.psv_idx)) - mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", - mr->sig->psv_wire.psv_idx); - } - mlx5_free_priv_descs(mr); -err_free_sig: - kfree(mr->sig); err_free_in: kfree(in); err_free: @@@ -1884,19 -1758,6 +1884,19 @@@ return ERR_PTR(err); }
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, + u32 max_num_sg, struct ib_udata *udata) +{ + return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0); +} + +struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd, + u32 max_num_sg, u32 max_num_meta_sg) +{ + return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg, + max_num_meta_sg); +} + struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata) { @@@ -2026,53 -1887,16 +2026,53 @@@ done }
static int +mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, + int data_sg_nents, unsigned int *data_sg_offset, + struct scatterlist *meta_sg, int meta_sg_nents, + unsigned int *meta_sg_offset) +{ + struct mlx5_ib_mr *mr = to_mmr(ibmr); + unsigned int sg_offset = 0; + int n = 0; + + mr->meta_length = 0; + if (data_sg_nents == 1) { + n++; + mr->ndescs = 1; + if (data_sg_offset) + sg_offset = *data_sg_offset; + mr->data_length = sg_dma_len(data_sg) - sg_offset; + mr->data_iova = sg_dma_address(data_sg) + sg_offset; + if (meta_sg_nents == 1) { + n++; + mr->meta_ndescs = 1; + if (meta_sg_offset) + sg_offset = *meta_sg_offset; + else + sg_offset = 0; + mr->meta_length = sg_dma_len(meta_sg) - sg_offset; + mr->pi_iova = sg_dma_address(meta_sg) + sg_offset; + } + ibmr->length = mr->data_length + mr->meta_length; + } + + return n; +} + +static int mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, struct scatterlist *sgl, unsigned short sg_nents, - unsigned int *sg_offset_p) + unsigned int *sg_offset_p, + struct scatterlist *meta_sgl, + unsigned short meta_sg_nents, + unsigned int *meta_sg_offset_p) { struct scatterlist *sg = sgl; struct mlx5_klm *klms = mr->descs; unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; u32 lkey = mr->ibmr.pd->local_dma_lkey; - int i; + int i, j = 0;
mr->ibmr.iova = sg_dma_address(sg) + sg_offset; mr->ibmr.length = 0; @@@ -2087,36 -1911,12 +2087,36 @@@
sg_offset = 0; } - mr->ndescs = i;
if (sg_offset_p) *sg_offset_p = sg_offset;
- return i; + mr->ndescs = i; + mr->data_length = mr->ibmr.length; + + if (meta_sg_nents) { + sg = meta_sgl; + sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0; + for_each_sg(meta_sgl, sg, meta_sg_nents, j) { + if (unlikely(i + j >= mr->max_descs)) + break; + klms[i + j].va = cpu_to_be64(sg_dma_address(sg) + + sg_offset); + klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) - + sg_offset); + klms[i + j].key = cpu_to_be32(lkey); + mr->ibmr.length += sg_dma_len(sg) - sg_offset; + + sg_offset = 0; + } + if (meta_sg_offset_p) + *meta_sg_offset_p = sg_offset; + + mr->meta_ndescs = j; + mr->meta_length = mr->ibmr.length - mr->data_length; + } + + return i + j; }
static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) @@@ -2133,181 -1933,6 +2133,181 @@@ return 0; }
+static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) +{ + struct mlx5_ib_mr *mr = to_mmr(ibmr); + __be64 *descs; + + if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) + return -ENOMEM; + + descs = mr->descs; + descs[mr->ndescs + mr->meta_ndescs++] = + cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); + + return 0; +} + +static int +mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, + int data_sg_nents, unsigned int *data_sg_offset, + struct scatterlist *meta_sg, int meta_sg_nents, + unsigned int *meta_sg_offset) +{ + struct mlx5_ib_mr *mr = to_mmr(ibmr); + struct mlx5_ib_mr *pi_mr = mr->mtt_mr; + int n; + + pi_mr->ndescs = 0; + pi_mr->meta_ndescs = 0; + pi_mr->meta_length = 0; + + ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, + pi_mr->desc_size * pi_mr->max_descs, + DMA_TO_DEVICE); + + pi_mr->ibmr.page_size = ibmr->page_size; + n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset, + mlx5_set_page); + if (n != data_sg_nents) + return n; + + pi_mr->data_iova = pi_mr->ibmr.iova; + pi_mr->data_length = pi_mr->ibmr.length; + pi_mr->ibmr.length = pi_mr->data_length; + ibmr->length = pi_mr->data_length; + + if (meta_sg_nents) { + u64 page_mask = ~((u64)ibmr->page_size - 1); + u64 iova = pi_mr->data_iova; + + n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents, + meta_sg_offset, mlx5_set_page_pi); + + pi_mr->meta_length = pi_mr->ibmr.length; + /* + * PI address for the HW is the offset of the metadata address + * relative to the first data page address. + * It equals to first data page address + size of data pages + + * metadata offset at the first metadata page + */ + pi_mr->pi_iova = (iova & page_mask) + + pi_mr->ndescs * ibmr->page_size + + (pi_mr->ibmr.iova & ~page_mask); + /* + * In order to use one MTT MR for data and metadata, we register + * also the gaps between the end of the data and the start of + * the metadata (the sig MR will verify that the HW will access + * to right addresses). This mapping is safe because we use + * internal mkey for the registration. + */ + pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova; + pi_mr->ibmr.iova = iova; + ibmr->length += pi_mr->meta_length; + } + + ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, + pi_mr->desc_size * pi_mr->max_descs, + DMA_TO_DEVICE); + + return n; +} + +static int +mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, + int data_sg_nents, unsigned int *data_sg_offset, + struct scatterlist *meta_sg, int meta_sg_nents, + unsigned int *meta_sg_offset) +{ + struct mlx5_ib_mr *mr = to_mmr(ibmr); + struct mlx5_ib_mr *pi_mr = mr->klm_mr; + int n; + + pi_mr->ndescs = 0; + pi_mr->meta_ndescs = 0; + pi_mr->meta_length = 0; + + ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map, + pi_mr->desc_size * pi_mr->max_descs, + DMA_TO_DEVICE); + + n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset, + meta_sg, meta_sg_nents, meta_sg_offset); + + ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map, + pi_mr->desc_size * pi_mr->max_descs, + DMA_TO_DEVICE); + + /* This is zero-based memory region */ + pi_mr->data_iova = 0; + pi_mr->ibmr.iova = 0; + pi_mr->pi_iova = pi_mr->data_length; + ibmr->length = pi_mr->ibmr.length; + + return n; +} + +int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, + int data_sg_nents, unsigned int *data_sg_offset, + struct scatterlist *meta_sg, int meta_sg_nents, + unsigned int *meta_sg_offset) +{ + struct mlx5_ib_mr *mr = to_mmr(ibmr); + struct mlx5_ib_mr *pi_mr = NULL; + int n; + + WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); + + mr->ndescs = 0; + mr->data_length = 0; + mr->data_iova = 0; + mr->meta_ndescs = 0; + mr->pi_iova = 0; + /* + * As a performance optimization, if possible, there is no need to + * perform UMR operation to register the data/metadata buffers. + * First try to map the sg lists to PA descriptors with local_dma_lkey. + * Fallback to UMR only in case of a failure. + */ + n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents, + data_sg_offset, meta_sg, meta_sg_nents, + meta_sg_offset); + if (n == data_sg_nents + meta_sg_nents) + goto out; + /* + * As a performance optimization, if possible, there is no need to map + * the sg lists to KLM descriptors. First try to map the sg lists to MTT + * descriptors and fallback to KLM only in case of a failure. + * It's more efficient for the HW to work with MTT descriptors + * (especially in high load). + * Use KLM (indirect access) only if it's mandatory. + */ + pi_mr = mr->mtt_mr; + n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents, + data_sg_offset, meta_sg, meta_sg_nents, + meta_sg_offset); + if (n == data_sg_nents + meta_sg_nents) + goto out; + + pi_mr = mr->klm_mr; + n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents, + data_sg_offset, meta_sg, meta_sg_nents, + meta_sg_offset); + if (unlikely(n != data_sg_nents + meta_sg_nents)) + return -ENOMEM; + +out: + /* This is zero-based memory region */ + ibmr->iova = 0; + mr->pi_mr = pi_mr; + if (pi_mr) + ibmr->sig_attrs->meta_length = pi_mr->meta_length; + else + ibmr->sig_attrs->meta_length = mr->meta_length; + + return 0; +} + int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { @@@ -2321,8 -1946,7 +2321,8 @@@ DMA_TO_DEVICE);
if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS) - n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset); + n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0, + NULL); else n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx5_set_page); diff --combined drivers/infiniband/hw/mlx5/odp.c index 3d18b6ea9efa,c594489eb2d7..5991b28bdf60 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@@ -150,7 -150,7 +150,7 @@@ static struct ib_umem_odp *odp_lookup(u if (!rb) goto not_found; odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb); - if (ib_umem_start(&odp->umem) > start + length) + if (ib_umem_start(odp) > start + length) goto not_found; } not_found: @@@ -200,7 -200,7 +200,7 @@@ void mlx5_odp_populate_klm(struct mlx5_ static void mr_leaf_free_action(struct work_struct *work) { struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work); - int idx = ib_umem_start(&odp->umem) >> MLX5_IMR_MTT_SHIFT; + int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
mr->parent = NULL; @@@ -224,6 -224,7 +224,6 @@@ void mlx5_ib_invalidate_range(struct ib const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(struct mlx5_mtt)) - 1; u64 idx = 0, blk_start_idx = 0; - struct ib_umem *umem; int in_block = 0; u64 addr;
@@@ -231,14 -232,15 +231,14 @@@ pr_err("invalidation called on NULL umem or non-ODP umem\n"); return; } - umem = &umem_odp->umem;
mr = umem_odp->private;
if (!mr || !mr->ibmr.pd) return;
- start = max_t(u64, ib_umem_start(umem), start); - end = min_t(u64, ib_umem_end(umem), end); + start = max_t(u64, ib_umem_start(umem_odp), start); + end = min_t(u64, ib_umem_end(umem_odp), end);
/* * Iteration one - zap the HW's MTTs. The notifiers_count ensures that @@@ -247,8 -249,8 +247,8 @@@ * but they will write 0s as well, so no difference in the end result. */
- for (addr = start; addr < end; addr += BIT(umem->page_shift)) { - idx = (addr - ib_umem_start(umem)) >> umem->page_shift; + for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { + idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; /* * Strive to write the MTTs in chunks, but avoid overwriting * non-existing MTTs. The huristic here can be improved to @@@ -542,12 -544,13 +542,12 @@@ static int mr_leaf_free(struct ib_umem_ void *cookie) { struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie; - struct ib_umem *umem = &umem_odp->umem;
if (mr->parent != imr) return 0;
- ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem), - ib_umem_end(umem)); + ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), + ib_umem_end(umem_odp));
if (umem_odp->dying) return 0; @@@ -599,9 -602,9 +599,9 @@@ static int pagefault_mr(struct mlx5_ib_ }
next_mr: - size = min_t(size_t, bcnt, ib_umem_end(&odp->umem) - io_virt); + size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
- page_shift = mr->umem->page_shift; + page_shift = odp->page_shift; page_mask = ~(BIT(page_shift) - 1); start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; access_mask = ODP_READ_ALLOWED_BIT; @@@ -765,7 -768,7 +765,7 @@@ static int pagefault_single_data_segmen bcnt -= *bytes_committed;
next_mr: - mmkey = __mlx5_mr_lookup(dev->mdev, mlx5_base_mkey(key)); + mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key)); if (!mkey_is_eq(mmkey, key)) { mlx5_ib_dbg(dev, "failed to find mkey %x\n", key); ret = -EFAULT; @@@ -1683,8 -1686,8 +1683,8 @@@ static void num_pending_prefetch_dec(st struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr;
- mmkey = __mlx5_mr_lookup(dev->mdev, - mlx5_base_mkey(sg_list[i].lkey)); + mmkey = xa_load(&dev->mdev->priv.mkey_table, + mlx5_base_mkey(sg_list[i].lkey)); mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); atomic_dec(&mr->num_pending_prefetch); } @@@ -1703,8 -1706,8 +1703,8 @@@ static bool num_pending_prefetch_inc(st struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr;
- mmkey = __mlx5_mr_lookup(dev->mdev, - mlx5_base_mkey(sg_list[i].lkey)); + mmkey = xa_load(&dev->mdev->priv.mkey_table, + mlx5_base_mkey(sg_list[i].lkey)); if (!mmkey || mmkey->key != sg_list[i].lkey) { ret = false; break; diff --combined drivers/infiniband/hw/qedr/main.c index a0a7ba0a5af4,5ebf3c53b3fb..533157a2a3be --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@@ -183,10 -183,6 +183,10 @@@ static void qedr_roce_register_device(s }
static const struct ib_device_ops qedr_dev_ops = { + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_QEDR, + .uverbs_abi_ver = QEDR_ABI_VERSION, + .alloc_mr = qedr_alloc_mr, .alloc_pd = qedr_alloc_pd, .alloc_ucontext = qedr_alloc_ucontext, @@@ -224,7 -220,6 +224,7 @@@ .resize_cq = qedr_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah), + INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_pd, qedr_pd, ibpd), INIT_RDMA_OBJ_SIZE(ib_srq, qedr_srq, ibsrq), INIT_RDMA_OBJ_SIZE(ib_ucontext, qedr_ucontext, ibucontext), @@@ -236,6 -231,8 +236,6 @@@ static int qedr_register_device(struct
dev->ibdev.node_guid = dev->attr.node_guid; memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC)); - dev->ibdev.owner = THIS_MODULE; - dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) | QEDR_UVERBS(QUERY_DEVICE) | @@@ -277,6 -274,7 +277,6 @@@ rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group); ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
- dev->ibdev.driver_id = RDMA_DRIVER_QEDR; rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1); if (rc) return rc; @@@ -314,7 -312,8 +314,8 @@@ static void qedr_free_mem_sb(struct qed struct qed_sb_info *sb_info, int sb_id) { if (sb_info->sb_virt) { - dev->ops->common->sb_release(dev->cdev, sb_info, sb_id); + dev->ops->common->sb_release(dev->cdev, sb_info, sb_id, + QED_SB_TYPE_CNQ); dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt), (void *)sb_info->sb_virt, sb_info->sb_phys); } @@@ -506,11 -505,13 +507,13 @@@ static irqreturn_t qedr_irq_handler(in static void qedr_sync_free_irqs(struct qedr_dev *dev) { u32 vector; + u16 idx; int i;
for (i = 0; i < dev->int_info.used_cnt; i++) { if (dev->int_info.msix_cnt) { - vector = dev->int_info.msix[i * dev->num_hwfns].vector; + idx = i * dev->num_hwfns + dev->affin_hwfn_idx; + vector = dev->int_info.msix[idx].vector; synchronize_irq(vector); free_irq(vector, &dev->cnq_array[i]); } @@@ -522,6 -523,7 +525,7 @@@ static int qedr_req_msix_irqs(struct qedr_dev *dev) { int i, rc = 0; + u16 idx;
if (dev->num_cnq > dev->int_info.msix_cnt) { DP_ERR(dev, @@@ -531,7 -533,8 +535,8 @@@ }
for (i = 0; i < dev->num_cnq; i++) { - rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector, + idx = i * dev->num_hwfns + dev->affin_hwfn_idx; + rc = request_irq(dev->int_info.msix[idx].vector, qedr_irq_handler, 0, dev->cnq_array[i].name, &dev->cnq_array[i]); if (rc) { @@@ -868,6 -871,16 +873,16 @@@ static struct qedr_dev *qedr_add(struc dev->user_dpm_enabled = dev_info.user_dpm_enabled; dev->rdma_type = dev_info.rdma_type; dev->num_hwfns = dev_info.common.num_hwfns; + + if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) { + rc = dev->ops->iwarp_set_engine_affin(cdev, false); + if (rc) { + DP_ERR(dev, "iWARP is disabled over a 100g device Enabling it may impact L2 performance. To enable it run devlink dev param set <dev> name iwarp_cmt value true cmode runtime\n"); + goto init_err; + } + } + dev->affin_hwfn_idx = dev->ops->common->get_affin_hwfn_idx(cdev); + dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev); @@@ -928,6 -941,10 +943,10 @@@ static void qedr_remove(struct qedr_de qedr_stop_hw(dev); qedr_sync_free_irqs(dev); qedr_free_resources(dev); + + if (IS_IWARP(dev) && QEDR_IS_CMT(dev)) + dev->ops->iwarp_set_engine_affin(dev->cdev, true); + ib_dealloc_device(&dev->ibdev); }
diff --combined drivers/infiniband/hw/usnic/usnic_ib_main.c index 6ae5ce007fed,34c1f9d6c915..03f54eb9404b --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@@ -329,10 -329,6 +329,10 @@@ static void usnic_get_dev_fw_str(struc }
static const struct ib_device_ops usnic_dev_ops = { + .owner = THIS_MODULE, + .driver_id = RDMA_DRIVER_USNIC, + .uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION, + .alloc_pd = usnic_ib_alloc_pd, .alloc_ucontext = usnic_ib_alloc_ucontext, .create_cq = usnic_ib_create_cq, @@@ -354,7 -350,6 +354,7 @@@ .query_qp = usnic_ib_query_qp, .reg_user_mr = usnic_ib_reg_mr, INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd), + INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq), INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext), };
@@@ -389,10 -384,12 +389,10 @@@ static void *usnic_ib_device_add(struc
us_ibdev->pdev = dev; us_ibdev->netdev = pci_get_drvdata(dev); - us_ibdev->ib_dev.owner = THIS_MODULE; us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; us_ibdev->ib_dev.dev.parent = &dev->dev; - us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
us_ibdev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | @@@ -415,6 -412,7 +415,6 @@@
ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
- us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC; rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1); @@@ -429,11 -427,16 +429,16 @@@ if (netif_carrier_ok(us_ibdev->netdev)) usnic_fwd_carrier_up(us_ibdev->ufdev);
- ind = in_dev_get(netdev); - if (ind->ifa_list) - usnic_fwd_add_ipaddr(us_ibdev->ufdev, - ind->ifa_list->ifa_address); - in_dev_put(ind); + rcu_read_lock(); + ind = __in_dev_get_rcu(netdev); + if (ind) { + const struct in_ifaddr *ifa; + + ifa = rcu_dereference(ind->ifa_list); + if (ifa) + usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); + } + rcu_read_unlock();
usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, us_ibdev->ufdev->inaddr, &gid.raw[0]); diff --combined drivers/net/dsa/mv88e6xxx/chip.c index 2e8b1ab2c6f7,40b1fb22d8df..6b17cd961d06 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@@ -118,9 -118,9 +118,9 @@@ static irqreturn_t mv88e6xxx_g1_irq_thr u16 ctl1; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err) goto out; @@@ -135,13 -135,13 +135,13 @@@ } }
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); if (err) goto unlock; err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); unlock: - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); if (err) goto out; ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); @@@ -162,7 -162,7 +162,7 @@@ static void mv88e6xxx_g1_irq_bus_lock(s { struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); }
static void mv88e6xxx_g1_irq_bus_sync_unlock(struct irq_data *d) @@@ -184,7 -184,7 +184,7 @@@ goto out;
out: - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static const struct irq_chip mv88e6xxx_g1_irq_chip = { @@@ -239,9 -239,9 +239,9 @@@ static void mv88e6xxx_g1_irq_free(struc */ free_irq(chip->irq, chip);
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); mv88e6xxx_g1_irq_free_common(chip); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) @@@ -310,12 -310,12 +310,12 @@@ static int mv88e6xxx_g1_irq_setup(struc */ irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
- mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); err = request_threaded_irq(chip->irq, NULL, mv88e6xxx_g1_irq_thread_fn, IRQF_ONESHOT | IRQF_SHARED, dev_name(chip->dev), chip); - mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (err) mv88e6xxx_g1_irq_free_common(chip);
@@@ -359,9 -359,9 +359,9 @@@ static void mv88e6xxx_irq_poll_free(str kthread_cancel_delayed_work_sync(&chip->irq_poll_work); kthread_destroy_worker(chip->kworker);
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); mv88e6xxx_g1_irq_free_common(chip); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) @@@ -496,11 -496,11 +496,11 @@@ static void mv88e6xxx_adjust_link(struc mv88e6xxx_phy_is_internal(ds, port)) return;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed, phydev->duplex, phydev->pause, phydev->interface); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP) dev_err(ds->dev, "p%d: failed to configure MAC\n", port); @@@ -616,12 -616,12 +616,12 @@@ static int mv88e6xxx_link_state(struct struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (chip->info->ops->port_link_state) err = chip->info->ops->port_link_state(chip, port, state); else err = -EOPNOTSUPP; - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -651,10 -651,10 +651,10 @@@ static void mv88e6xxx_mac_config(struc } pause = !!phylink_test(state->advertising, Pause);
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_setup_mac(chip, port, link, speed, duplex, pause, state->interface); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP) dev_err(ds->dev, "p%d: failed to configure MAC\n", port); @@@ -665,9 -665,9 +665,9 @@@ static void mv88e6xxx_mac_link_force(st struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = chip->info->ops->port_set_link(chip, port, link); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err) dev_err(chip->dev, "p%d: failed to force MAC link\n", port); @@@ -825,6 -825,12 +825,12 @@@ static int mv88e6095_stats_get_strings( STATS_TYPE_BANK0 | STATS_TYPE_PORT); }
+ static int mv88e6250_stats_get_strings(struct mv88e6xxx_chip *chip, + uint8_t *data) + { + return mv88e6xxx_stats_get_strings(chip, data, STATS_TYPE_BANK0); + } + static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip, uint8_t *data) { @@@ -859,7 -865,7 +865,7 @@@ static void mv88e6xxx_get_strings(struc if (stringset != ETH_SS_STATS) return;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
if (chip->info->ops->stats_get_strings) count = chip->info->ops->stats_get_strings(chip, data); @@@ -872,7 -878,7 +878,7 @@@ data += count * ETH_GSTRING_LEN; mv88e6xxx_atu_vtu_get_strings(data);
- mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip, @@@ -895,6 -901,11 +901,11 @@@ static int mv88e6095_stats_get_sset_cou STATS_TYPE_PORT); }
+ static int mv88e6250_stats_get_sset_count(struct mv88e6xxx_chip *chip) + { + return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0); + } + static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip) { return mv88e6xxx_stats_get_sset_count(chip, STATS_TYPE_BANK0 | @@@ -910,7 -921,7 +921,7 @@@ static int mv88e6xxx_get_sset_count(str if (sset != ETH_SS_STATS) return 0;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (chip->info->ops->stats_get_sset_count) count = chip->info->ops->stats_get_sset_count(chip); if (count < 0) @@@ -927,7 -938,7 +938,7 @@@ count += ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings);
out: - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return count; } @@@ -942,11 -953,11 +953,11 @@@ static int mv88e6xxx_stats_get_stats(st for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { stat = &mv88e6xxx_hw_stats[i]; if (stat->type & types) { - mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port, bank1_select, histogram); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
j++; } @@@ -962,6 -973,13 +973,13 @@@ static int mv88e6095_stats_get_stats(st 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX); }
+ static int mv88e6250_stats_get_stats(struct mv88e6xxx_chip *chip, int port, + uint64_t *data) + { + return mv88e6xxx_stats_get_stats(chip, port, data, STATS_TYPE_BANK0, + 0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX); + } + static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port, uint64_t *data) { @@@ -998,14 -1016,14 +1016,14 @@@ static void mv88e6xxx_get_stats(struct if (chip->info->ops->stats_get_stats) count = chip->info->ops->stats_get_stats(chip, port, data);
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (chip->info->ops->serdes_get_stats) { data += count; count = chip->info->ops->serdes_get_stats(chip, port, data); } data += count; mv88e6xxx_atu_vtu_get_stats(chip, port, data); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, @@@ -1014,10 -1032,10 +1032,10 @@@ struct mv88e6xxx_chip *chip = ds->priv; int ret;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
ret = mv88e6xxx_stats_snapshot(chip, port); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (ret < 0) return; @@@ -1044,7 -1062,7 +1062,7 @@@ static void mv88e6xxx_get_regs(struct d
memset(p, 0xff, 32 * sizeof(u16));
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
for (i = 0; i < 32; i++) {
@@@ -1053,7 -1071,7 +1071,7 @@@ p[i] = reg; }
- mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port, @@@ -1119,9 -1137,9 +1137,9 @@@ static void mv88e6xxx_port_stp_state_se struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_set_state(chip, port, state); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err) dev_err(ds->dev, "p%d: failed to update state\n", port); @@@ -1306,9 -1324,9 +1324,9 @@@ static void mv88e6xxx_port_fast_age(str struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_g1_atu_remove(chip, 0, port, false); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err) dev_err(ds->dev, "p%d: failed to flush ATU\n", port); @@@ -1436,7 -1454,7 +1454,7 @@@ static int mv88e6xxx_port_check_hw_vlan if (!vid_begin) return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
do { err = mv88e6xxx_vtu_getnext(chip, &vlan); @@@ -1476,7 -1494,7 +1494,7 @@@ } while (vlan.vid < vid_end);
unlock: - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -1492,9 -1510,9 +1510,9 @@@ static int mv88e6xxx_port_vlan_filterin if (!chip->info->max_vid) return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_set_8021q_mode(chip, port, mode); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -1628,7 -1646,7 +1646,7 @@@ static void mv88e6xxx_port_vlan_add(str else member = MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_TAGGED;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) if (_mv88e6xxx_port_vlan_add(chip, port, vid, member)) @@@ -1639,7 -1657,7 +1657,7 @@@ dev_err(ds->dev, "p%d: failed to set PVID %d\n", port, vlan->vid_end);
- mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip, @@@ -1685,7 -1703,7 +1703,7 @@@ static int mv88e6xxx_port_vlan_del(stru if (!chip->info->max_vid) return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid); if (err) @@@ -1704,7 -1722,7 +1722,7 @@@ }
unlock: - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -1715,10 -1733,10 +1733,10 @@@ static int mv88e6xxx_port_fdb_add(struc struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -1729,10 -1747,10 +1747,10 @@@ static int mv88e6xxx_port_fdb_del(struc struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UNUSED); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -1749,9 -1767,7 +1767,7 @@@ static int mv88e6xxx_port_db_dump_fid(s eth_broadcast_addr(addr.mac);
do { - mutex_lock(&chip->reg_lock); err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr); - mutex_unlock(&chip->reg_lock); if (err) return err;
@@@ -1784,10 -1800,7 +1800,7 @@@ static int mv88e6xxx_port_db_dump(struc int err;
/* Dump port's default Filtering Information Database (VLAN ID 0) */ - mutex_lock(&chip->reg_lock); err = mv88e6xxx_port_get_fid(chip, port, &fid); - mutex_unlock(&chip->reg_lock); - if (err) return err;
@@@ -1797,9 -1810,7 +1810,7 @@@
/* Dump VLANs' Filtering Information Databases */ do { - mutex_lock(&chip->reg_lock); err = mv88e6xxx_vtu_getnext(chip, &vlan); - mutex_unlock(&chip->reg_lock); if (err) return err;
@@@ -1819,8 -1830,13 +1830,13 @@@ static int mv88e6xxx_port_fdb_dump(stru dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_chip *chip = ds->priv; + int err;
- return mv88e6xxx_port_db_dump(chip, port, cb, data); + mv88e6xxx_reg_lock(chip); + err = mv88e6xxx_port_db_dump(chip, port, cb, data); + mv88e6xxx_reg_unlock(chip); + + return err; }
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip, @@@ -1867,9 -1883,9 +1883,9 @@@ static int mv88e6xxx_port_bridge_join(s struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_bridge_map(chip, br); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -1879,11 -1895,11 +1895,11 @@@ static void mv88e6xxx_port_bridge_leave { struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (mv88e6xxx_bridge_map(chip, br) || mv88e6xxx_port_vlan_map(chip, port)) dev_err(ds->dev, "failed to remap in-chip Port VLAN\n"); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds, int dev, @@@ -1895,9 -1911,9 +1911,9 @@@ if (!mv88e6xxx_has_pvt(chip)) return 0;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_pvt_map(chip, dev, port); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -1910,10 -1926,10 +1926,10 @@@ static void mv88e6xxx_crosschip_bridge_ if (!mv88e6xxx_has_pvt(chip)) return;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (mv88e6xxx_pvt_map(chip, dev, port)) dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n"); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip) @@@ -2264,14 -2280,14 +2280,14 @@@ static int mv88e6xxx_port_enable(struc struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_serdes_power(chip, port, true);
if (!err && chip->info->ops->serdes_irq_setup) err = chip->info->ops->serdes_irq_setup(chip, port);
- mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -2280,7 -2296,7 +2296,7 @@@ static void mv88e6xxx_port_disable(stru { struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED)) dev_err(chip->dev, "failed to disable port\n"); @@@ -2291,7 -2307,7 +2307,7 @@@ if (mv88e6xxx_serdes_power(chip, port, false)) dev_err(chip->dev, "failed to power off SERDES\n");
- mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, @@@ -2300,9 -2316,9 +2316,9 @@@ struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_g1_atu_set_age_time(chip, ageing_time); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -2432,7 -2448,7 +2448,7 @@@ static int mv88e6xxx_setup(struct dsa_s chip->ds = ds; ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip);
if (chip->info->ops->setup_errata) { err = chip->info->ops->setup_errata(chip); @@@ -2539,7 -2555,7 +2555,7 @@@ goto unlock;
unlock: - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -2554,9 -2570,9 +2570,9 @@@ static int mv88e6xxx_mdio_read(struct m if (!chip->info->ops->phy_read) return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = chip->info->ops->phy_read(chip, bus, phy, reg, &val); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (reg == MII_PHYSID2) { /* Some internal PHYs don't have a model number. */ @@@ -2589,9 -2605,9 +2605,9 @@@ static int mv88e6xxx_mdio_write(struct if (!chip->info->ops->phy_write) return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = chip->info->ops->phy_write(chip, bus, phy, reg, val); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -2606,9 -2622,9 +2622,9 @@@ static int mv88e6xxx_mdio_register(stru int err;
if (external) { - mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err) return err; @@@ -2729,9 -2745,9 +2745,9 @@@ static int mv88e6xxx_get_eeprom(struct if (!chip->info->ops->get_eeprom) return -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = chip->info->ops->get_eeprom(chip, eeprom, data); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err) return err; @@@ -2753,9 -2769,9 +2769,9 @@@ static int mv88e6xxx_set_eeprom(struct if (eeprom->magic != 0xc3ec4951) return -EINVAL;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = chip->info->ops->set_eeprom(chip, eeprom, data); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -3444,6 -3460,44 +3460,44 @@@ static const struct mv88e6xxx_ops mv88e .phylink_validate = mv88e6352_phylink_validate, };
+ static const struct mv88e6xxx_ops mv88e6250_ops = { + /* MV88E6XXX_FAMILY_6250 */ + .ieee_pri_map = mv88e6250_g1_ieee_pri_map, + .ip_pri_map = mv88e6085_g1_ip_pri_map, + .irl_init_all = mv88e6352_g2_irl_init_all, + .get_eeprom = mv88e6xxx_g2_get_eeprom16, + .set_eeprom = mv88e6xxx_g2_set_eeprom16, + .set_switch_mac = mv88e6xxx_g2_set_switch_mac, + .phy_read = mv88e6xxx_g2_smi_phy_read, + .phy_write = mv88e6xxx_g2_smi_phy_write, + .port_set_link = mv88e6xxx_port_set_link, + .port_set_duplex = mv88e6xxx_port_set_duplex, + .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, + .port_set_speed = mv88e6250_port_set_speed, + .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_frame_mode = mv88e6351_port_set_frame_mode, + .port_set_egress_floods = mv88e6352_port_set_egress_floods, + .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, + .port_pause_limit = mv88e6097_port_pause_limit, + .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, + .port_link_state = mv88e6250_port_link_state, + .stats_snapshot = mv88e6320_g1_stats_snapshot, + .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_get_sset_count = mv88e6250_stats_get_sset_count, + .stats_get_strings = mv88e6250_stats_get_strings, + .stats_get_stats = mv88e6250_stats_get_stats, + .set_cpu_port = mv88e6095_g1_set_cpu_port, + .set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6250_watchdog_ops, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, + .reset = mv88e6250_g1_reset, + .vtu_getnext = mv88e6250_g1_vtu_getnext, + .vtu_loadpurge = mv88e6250_g1_vtu_loadpurge, + .phylink_validate = mv88e6065_phylink_validate, + }; + static const struct mv88e6xxx_ops mv88e6290_ops = { /* MV88E6XXX_FAMILY_6390 */ .setup_errata = mv88e6390_setup_errata, @@@ -4229,6 -4283,27 +4283,27 @@@ static const struct mv88e6xxx_info mv88 .ops = &mv88e6240_ops, },
+ [MV88E6250] = { + .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6250, + .family = MV88E6XXX_FAMILY_6250, + .name = "Marvell 88E6250", + .num_databases = 64, + .num_ports = 7, + .num_internal_phys = 5, + .max_vid = 4095, + .port_base_addr = 0x08, + .phy_base_addr = 0x00, + .global1_addr = 0x0f, + .global2_addr = 0x07, + .age_time_coeff = 15000, + .g1_irqs = 9, + .g2_irqs = 10, + .atu_move_port_mask = 0xf, + .dual_chip = true, + .tag_protocol = DSA_TAG_PROTO_DSA, + .ops = &mv88e6250_ops, + }, + [MV88E6290] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6290, .family = MV88E6XXX_FAMILY_6390, @@@ -4457,9 -4532,9 +4532,9 @@@ static int mv88e6xxx_detect(struct mv88 u16 id; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_read(chip, 0, MV88E6XXX_PORT_SWITCH_ID, &id); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); if (err) return err;
@@@ -4522,12 -4597,12 +4597,12 @@@ static void mv88e6xxx_port_mdb_add(stru { struct mv88e6xxx_chip *chip = ds->priv;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC)) dev_err(ds->dev, "p%d: failed to load multicast MAC address\n", port); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); }
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, @@@ -4536,10 -4611,10 +4611,10 @@@ struct mv88e6xxx_chip *chip = ds->priv; int err;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, MV88E6XXX_G1_ATU_DATA_STATE_UNUSED); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -4550,12 -4625,12 +4625,12 @@@ static int mv88e6xxx_port_egress_floods struct mv88e6xxx_chip *chip = ds->priv; int err = -EOPNOTSUPP;
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (chip->info->ops->port_set_egress_floods) err = chip->info->ops->port_set_egress_floods(chip, port, unicast, multicast); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
return err; } @@@ -4711,8 -4786,6 +4786,8 @@@ static int mv88e6xxx_probe(struct mdio_ err = PTR_ERR(chip->reset); goto out; } + if (chip->reset) + usleep_range(1000, 2000);
err = mv88e6xxx_detect(chip); if (err) @@@ -4728,9 -4801,9 +4803,9 @@@ chip->eeprom_len = pdata->eeprom_len; }
- mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); err = mv88e6xxx_switch_reset(chip); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip); if (err) goto out;
@@@ -4749,12 -4822,12 +4824,12 @@@ * the PHYs will link their interrupts to these interrupt * controllers */ - mutex_lock(&chip->reg_lock); + mv88e6xxx_reg_lock(chip); if (chip->irq > 0) err = mv88e6xxx_g1_irq_setup(chip); else err = mv88e6xxx_irq_poll_setup(chip); - mutex_unlock(&chip->reg_lock); + mv88e6xxx_reg_unlock(chip);
if (err) goto out; @@@ -4839,6 -4912,10 +4914,10 @@@ static const struct of_device_id mv88e6 .compatible = "marvell,mv88e6190", .data = &mv88e6xxx_table[MV88E6190], }, + { + .compatible = "marvell,mv88e6250", + .data = &mv88e6xxx_table[MV88E6250], + }, { /* sentinel */ }, };
diff --combined drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index c12c1bab0fe4,c4986b519191..656ed80647f0 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@@ -684,7 -684,7 +684,7 @@@ static void *bnx2x_frag_alloc(const str if (unlikely(gfpflags_allow_blocking(gfp_mask))) return (void *)__get_free_page(gfp_mask);
- return netdev_alloc_frag(fp->rx_frag_size); + return napi_alloc_frag(fp->rx_frag_size); }
return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask); @@@ -3857,12 -3857,9 +3857,12 @@@ netdev_tx_t bnx2x_start_xmit(struct sk_
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { if (!(bp->flags & TX_TIMESTAMPING_EN)) { + bp->eth_stats.ptp_skip_tx_ts++; BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n"); } else if (bp->ptp_tx_skb) { - BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n"); + bp->eth_stats.ptp_skip_tx_ts++; + netdev_err_once(bp->dev, + "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n"); } else { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; /* schedule check for Tx timestamp */ diff --combined drivers/net/ethernet/cadence/macb.h index 98735584570d,515bfd2c9e3f..03983bd46eef --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@@ -496,11 -496,7 +496,11 @@@
/* Bitfields in TISUBN */ #define GEM_SUBNSINCR_OFFSET 0 -#define GEM_SUBNSINCR_SIZE 16 +#define GEM_SUBNSINCRL_OFFSET 24 +#define GEM_SUBNSINCRL_SIZE 8 +#define GEM_SUBNSINCRH_OFFSET 0 +#define GEM_SUBNSINCRH_SIZE 16 +#define GEM_SUBNSINCR_SIZE 24
/* Bitfields in TI */ #define GEM_NSINCR_OFFSET 0 @@@ -838,9 -834,6 +838,9 @@@ struct gem_tx_ts /* limit RX checksum offload to TCP and UDP packets */ #define GEM_RX_CSUM_CHECKED_MASK 2
+/* Scaled PPM fraction */ +#define PPM_FRACTION 16 + /* struct macb_tx_skb - data about an skb which is being transmitted * @skb: skb currently being transmitted, only set for the last buffer * of the frame @@@ -1067,7 -1060,8 +1067,8 @@@ struct macb_or_gem_ops int (*mog_alloc_rx_buffers)(struct macb *bp); void (*mog_free_rx_buffers)(struct macb *bp); void (*mog_init_rings)(struct macb *bp); - int (*mog_rx)(struct macb_queue *queue, int budget); + int (*mog_rx)(struct macb_queue *queue, struct napi_struct *napi, + int budget); };
/* MACB-PTP interface: adapt to platform needs. */ diff --combined drivers/net/ethernet/ti/cpsw.c index 4e3026f9abed,32b7b3b74a6b..1e70ae7bbd61 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@@ -457,16 -457,13 +457,13 @@@ static void cpsw_rx_handler(void *token }
requeue: - if (netif_dormant(ndev)) { - dev_kfree_skb_any(new_skb); - return; - } - ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch; ret = cpdma_chan_submit(ch, new_skb, new_skb->data, skb_tailroom(new_skb), 0); - if (WARN_ON(ret < 0)) + if (ret < 0) { + WARN_ON(ret == -ENOMEM); dev_kfree_skb_any(new_skb); + } }
void cpsw_split_res(struct cpsw_common *cpsw) @@@ -1051,9 -1048,9 +1048,9 @@@ int cpsw_fill_rx_channels(struct cpsw_p }
skb_set_queue_mapping(skb, ch); - ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb, - skb->data, skb_tailroom(skb), - 0); + ret = cpdma_chan_idle_submit(cpsw->rxv[ch].ch, skb, + skb->data, + skb_tailroom(skb), 0); if (ret < 0) { cpsw_err(priv, ifup, "cannot submit skb to channel %d rx, error %d\n", @@@ -1423,8 -1420,11 +1420,11 @@@ static int cpsw_ndo_open(struct net_dev return 0;
err_cleanup: - cpdma_ctlr_stop(cpsw->dma); - for_each_slave(priv, cpsw_slave_stop, cpsw); + if (!cpsw->usage_count) { + cpdma_ctlr_stop(cpsw->dma); + for_each_slave(priv, cpsw_slave_stop, cpsw); + } + pm_runtime_put_sync(cpsw->dev); netif_carrier_off(priv->ndev); return ret; @@@ -2179,7 -2179,6 +2179,7 @@@ static int cpsw_probe_dt(struct cpsw_pl return ret; }
+ slave_data->slave_node = slave_node; slave_data->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); parp = of_get_property(slave_node, "phy_id", &lenp); @@@ -2263,8 -2262,7 +2263,7 @@@ no_phy_slave
static void cpsw_remove_dt(struct platform_device *pdev) { - struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_common *cpsw = platform_get_drvdata(pdev); struct cpsw_platform_data *data = &cpsw->data; struct device_node *node = pdev->dev.of_node; struct device_node *slave_node; @@@ -2331,7 -2329,6 +2330,7 @@@ static int cpsw_probe_dual_emac(struct
/* register the network device */ SET_NETDEV_DEV(ndev, cpsw->dev); + ndev->dev.of_node = cpsw->slaves[1].data->slave_node; ret = register_netdev(ndev); if (ret) dev_err(cpsw->dev, "cpsw: error registering net device\n"); @@@ -2476,7 -2473,7 +2475,7 @@@ static int cpsw_probe(struct platform_d goto clean_cpts; }
- platform_set_drvdata(pdev, ndev); + platform_set_drvdata(pdev, cpsw); priv = netdev_priv(ndev); priv->cpsw = cpsw; priv->ndev = ndev; @@@ -2509,7 -2506,6 +2508,7 @@@
/* register the network device */ SET_NETDEV_DEV(ndev, dev); + ndev->dev.of_node = cpsw->slaves[0].data->slave_node; ret = register_netdev(ndev); if (ret) { dev_err(dev, "error registering net device\n"); @@@ -2570,9 -2566,8 +2569,8 @@@ clean_runtime_disable_ret
static int cpsw_remove(struct platform_device *pdev) { - struct net_device *ndev = platform_get_drvdata(pdev); - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - int ret; + struct cpsw_common *cpsw = platform_get_drvdata(pdev); + int i, ret;
ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { @@@ -2580,9 -2575,9 +2578,9 @@@ return ret; }
- if (cpsw->data.dual_emac) - unregister_netdev(cpsw->slaves[1].ndev); - unregister_netdev(ndev); + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev) + unregister_netdev(cpsw->slaves[i].ndev);
cpts_release(cpsw->cpts); cpdma_ctlr_destroy(cpsw->dma); @@@ -2595,20 -2590,13 +2593,13 @@@ #ifdef CONFIG_PM_SLEEP static int cpsw_suspend(struct device *dev) { - struct net_device *ndev = dev_get_drvdata(dev); - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); - - if (cpsw->data.dual_emac) { - int i; + struct cpsw_common *cpsw = dev_get_drvdata(dev); + int i;
- for (i = 0; i < cpsw->data.slaves; i++) { + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev) if (netif_running(cpsw->slaves[i].ndev)) cpsw_ndo_stop(cpsw->slaves[i].ndev); - } - } else { - if (netif_running(ndev)) - cpsw_ndo_stop(ndev); - }
/* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev); @@@ -2618,25 -2606,20 +2609,20 @@@
static int cpsw_resume(struct device *dev) { - struct net_device *ndev = dev_get_drvdata(dev); - struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_common *cpsw = dev_get_drvdata(dev); + int i;
/* Select default pin state */ pinctrl_pm_select_default_state(dev);
/* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */ rtnl_lock(); - if (cpsw->data.dual_emac) { - int i;
- for (i = 0; i < cpsw->data.slaves; i++) { + for (i = 0; i < cpsw->data.slaves; i++) + if (cpsw->slaves[i].ndev) if (netif_running(cpsw->slaves[i].ndev)) cpsw_ndo_open(cpsw->slaves[i].ndev); - } - } else { - if (netif_running(ndev)) - cpsw_ndo_open(ndev); - } + rtnl_unlock();
return 0; diff --combined fs/afs/cmservice.c index 602d75bf9bb2,1ea39971eb91..4f1b6f466ff5 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@@ -256,11 -256,8 +256,11 @@@ static void SRXAFSCB_CallBack(struct wo * server holds up change visibility till it receives our reply so as * to maintain cache coherency. */ - if (call->server) + if (call->server) { + trace_afs_server(call->server, atomic_read(&call->server->usage), + afs_server_trace_callback); afs_break_callbacks(call->server, call->count, call->request); + }
afs_send_empty_reply(call); afs_put_call(call); @@@ -583,9 -580,8 +583,8 @@@ static int afs_deliver_cb_probe_uuid(st */ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) { - struct afs_interface *ifs; struct afs_call *call = container_of(work, struct afs_call, work); - int loop, nifs; + int loop;
struct { struct /* InterfaceAddr */ { @@@ -603,19 -599,7 +602,7 @@@
_enter("");
- nifs = 0; - ifs = kcalloc(32, sizeof(*ifs), GFP_KERNEL); - if (ifs) { - nifs = afs_get_ipv4_interfaces(call->net, ifs, 32, false); - if (nifs < 0) { - kfree(ifs); - ifs = NULL; - nifs = 0; - } - } - memset(&reply, 0, sizeof(reply)); - reply.ia.nifs = htonl(nifs);
reply.ia.uuid[0] = call->net->uuid.time_low; reply.ia.uuid[1] = htonl(ntohs(call->net->uuid.time_mid)); @@@ -625,15 -609,6 +612,6 @@@ for (loop = 0; loop < 6; loop++) reply.ia.uuid[loop + 5] = htonl((s8) call->net->uuid.node[loop]);
- if (ifs) { - for (loop = 0; loop < nifs; loop++) { - reply.ia.ifaddr[loop] = ifs[loop].address.s_addr; - reply.ia.netmask[loop] = ifs[loop].netmask.s_addr; - reply.ia.mtu[loop] = htonl(ifs[loop].mtu); - } - kfree(ifs); - } - reply.cap.capcount = htonl(1); reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION); afs_send_simple_reply(call, &reply, sizeof(reply)); diff --combined fs/afs/internal.h index be37fafbaeb5,0f84d0da5417..f66a3be12fd6 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@@ -514,7 -514,6 +514,7 @@@ struct afs_server atomic_t usage; u32 addr_version; /* Address list version */ u32 cm_epoch; /* Server RxRPC epoch */ + unsigned int debug_id; /* Debugging ID for traces */
/* file service access */ rwlock_t fs_lock; /* access lock */ @@@ -720,15 -719,6 +720,6 @@@ struct afs_permits };
/* - * record of one of a system's set of network interfaces - */ - struct afs_interface { - struct in_addr address; /* IPv4 address bound to interface */ - struct in_addr netmask; /* netmask applied to address */ - unsigned mtu; /* MTU of interface */ - }; - - /* * Error prioritisation and accumulation. */ struct afs_error { @@@ -845,9 -835,9 +836,9 @@@ extern struct fscache_cookie_def afs_vn * callback.c */ extern void afs_init_callback_state(struct afs_server *); -extern void __afs_break_callback(struct afs_vnode *); -extern void afs_break_callback(struct afs_vnode *); -extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*); +extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason); +extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason); +extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break *);
extern int afs_register_server_cb_interest(struct afs_vnode *, struct afs_server_list *, unsigned int); @@@ -1091,12 -1081,6 +1082,6 @@@ extern struct vfsmount *afs_d_automount extern void afs_mntpt_kill_timer(void);
/* - * netdevices.c - */ - extern int afs_get_ipv4_interfaces(struct afs_net *, struct afs_interface *, - size_t, bool); - - /* * proc.c */ #ifdef CONFIG_PROC_FS @@@ -1241,12 -1225,17 +1226,12 @@@ extern void __exit afs_clean_up_permit_ */ extern spinlock_t afs_server_peer_lock;
-static inline struct afs_server *afs_get_server(struct afs_server *server) -{ - atomic_inc(&server->usage); - return server; -} - extern struct afs_server *afs_find_server(struct afs_net *, const struct sockaddr_rxrpc *); extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *); extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *); -extern void afs_put_server(struct afs_net *, struct afs_server *); +extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace); +extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace); extern void afs_manage_servers(struct work_struct *); extern void afs_servers_timer(struct timer_list *); extern void __net_exit afs_purge_servers(struct afs_net *); @@@ -1430,7 -1419,7 +1415,7 @@@ static inline void afs_check_for_remote { if (fc->ac.error == -ENOENT) { set_bit(AFS_VNODE_DELETED, &vnode->flags); - afs_break_callback(vnode); + afs_break_callback(vnode, afs_cb_break_for_deleted); } }
diff --combined include/linux/mlx5/qp.h index 08e43cd9e742,d1f353c64797..90f5f889742d --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@@ -37,8 -37,7 +37,8 @@@ #include <linux/mlx5/driver.h>
#define MLX5_INVALID_LKEY 0x100 -#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) +/* UMR (3 WQE_BB's) + SIG (3 WQE_BB's) + PSV (mem) + PSV (wire) */ +#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 8) #define MLX5_DIF_SIZE 8 #define MLX5_STRIDE_BLOCK_OP 0x400 #define MLX5_CPY_GRD_MASK 0xc0 @@@ -552,11 -551,6 +552,6 @@@ static inline struct mlx5_core_qp *__ml return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); }
- static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key) - { - return radix_tree_lookup(&dev->priv.mkey_table.tree, key); - } - int mlx5_core_create_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *qp, u32 *in, int inlen, diff --combined include/linux/phylink.h index 5b130140fb8f,0fe57a261c9c..300ecdb6790a --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@@ -54,6 -54,21 +54,21 @@@ struct phylink_link_state unsigned int an_complete:1; };
+ enum phylink_op_type { + PHYLINK_NETDEV = 0, + PHYLINK_DEV, + }; + + /** + * struct phylink_config - PHYLINK configuration structure + * @dev: a pointer to a struct device associated with the MAC + * @type: operation type of PHYLINK instance + */ + struct phylink_config { + struct device *dev; + enum phylink_op_type type; + }; + /** * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. @@@ -66,16 -81,17 +81,17 @@@ * The individual methods are described more fully below. */ struct phylink_mac_ops { - void (*validate)(struct net_device *ndev, unsigned long *supported, + void (*validate)(struct phylink_config *config, + unsigned long *supported, struct phylink_link_state *state); - int (*mac_link_state)(struct net_device *ndev, + int (*mac_link_state)(struct phylink_config *config, struct phylink_link_state *state); - void (*mac_config)(struct net_device *ndev, unsigned int mode, + void (*mac_config)(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); - void (*mac_an_restart)(struct net_device *ndev); - void (*mac_link_down)(struct net_device *ndev, unsigned int mode, + void (*mac_an_restart)(struct phylink_config *config); + void (*mac_link_down)(struct phylink_config *config, unsigned int mode, phy_interface_t interface); - void (*mac_link_up)(struct net_device *ndev, unsigned int mode, + void (*mac_link_up)(struct phylink_config *config, unsigned int mode, phy_interface_t interface, struct phy_device *phy); }; @@@ -83,7 -99,7 +99,7 @@@ #if 0 /* For kernel-doc purposes only. */ /** * validate - Validate and update the link configuration - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @supported: ethtool bitmask for supported link modes. * @state: a pointer to a &struct phylink_link_state. * @@@ -93,26 -109,19 +109,26 @@@ * Note that the PHY may be able to transform from one connection * technology to another, so, eg, don't clear 1000BaseX just * because the MAC is unable to BaseX mode. This is more about - * clearing unsupported speeds and duplex settings. + * clearing unsupported speeds and duplex settings. The port modes + * should not be cleared; phylink_set_port_modes() will help with this. * * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode * based on @state->advertising and/or @state->speed and update - * @state->interface accordingly. + * @state->interface accordingly. See phylink_helper_basex_speed(). + * + * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink expects the + * MAC driver to return all supported link modes. + * + * If the @state->interface mode is not supported, then the @supported + * mask must be cleared. */ - void validate(struct net_device *ndev, unsigned long *supported, + void validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state);
/** * mac_link_state() - Read the current link state from the hardware - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @state: a pointer to a &struct phylink_link_state. * * Read the current link state from the MAC, reporting the current @@@ -121,12 -130,12 +137,12 @@@ * negotiation completion state in @state->an_complete, and link * up state in @state->link. */ - int mac_link_state(struct net_device *ndev, + int mac_link_state(struct phylink_config *config, struct phylink_link_state *state);
/** * mac_config() - configure the MAC for the selected mode and state - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @state: a pointer to a &struct phylink_link_state. * @@@ -175,18 -184,18 +191,18 @@@ * down. This "update" behaviour is critical to avoid bouncing the * link up status. */ - void mac_config(struct net_device *ndev, unsigned int mode, + void mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state);
/** * mac_an_restart() - restart 802.3z BaseX autonegotiation - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. */ - void mac_an_restart(struct net_device *ndev); + void mac_an_restart(struct phylink_config *config);
/** * mac_link_down() - take the link down - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @@@ -195,12 -204,12 +211,12 @@@ * Energy Efficient Ethernet MAC configuration. Interface type * selection must be done in mac_config(). */ - void mac_link_down(struct net_device *ndev, unsigned int mode, + void mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface);
/** * mac_link_up() - allow the link to come up - * @ndev: a pointer to a &struct net_device for the MAC. + * @config: a pointer to a &struct phylink_config. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @phy: any attached phy @@@ -211,13 -220,14 +227,14 @@@ * phy_init_eee() and perform appropriate MAC configuration for EEE. * Interface type selection must be done in mac_config(). */ - void mac_link_up(struct net_device *ndev, unsigned int mode, + void mac_link_up(struct phylink_config *config, unsigned int mode, phy_interface_t interface, struct phy_device *phy); #endif
- struct phylink *phylink_create(struct net_device *, struct fwnode_handle *, - phy_interface_t iface, const struct phylink_mac_ops *ops); + struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, + phy_interface_t iface, + const struct phylink_mac_ops *ops); void phylink_destroy(struct phylink *);
int phylink_connect_phy(struct phylink *, struct phy_device *); diff --combined include/net/ip_vs.h index b36a1df93e7c,cb1ad0cc5c7b..3759167f91f5 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@@ -603,6 -603,7 +603,7 @@@ struct ip_vs_dest_user_kern
u16 tun_type; /* tunnel type */ __be16 tun_port; /* tunnel port */ + u16 tun_flags; /* tunnel flags */ };
@@@ -665,6 -666,7 +666,7 @@@ struct ip_vs_dest atomic_t last_weight; /* server latest weight */ __u16 tun_type; /* tunnel type */ __be16 tun_port; /* tunnel port */ + __u16 tun_flags; /* tunnel flags */
refcount_t refcnt; /* reference counter */ struct ip_vs_stats stats; /* statistics */ @@@ -808,12 -810,11 +810,12 @@@ struct ipvs_master_sync_state struct ip_vs_sync_buff *sync_buff; unsigned long sync_queue_len; unsigned int sync_queue_delay; - struct task_struct *master_thread; struct delayed_work master_wakeup_work; struct netns_ipvs *ipvs; };
+struct ip_vs_sync_thread_data; + /* How much time to keep dests in trash */ #define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
@@@ -944,8 -945,7 +946,8 @@@ struct netns_ipvs spinlock_t sync_lock; struct ipvs_master_sync_state *ms; spinlock_t sync_buff_lock; - struct task_struct **backup_threads; + struct ip_vs_sync_thread_data *master_tinfo; + struct ip_vs_sync_thread_data *backup_tinfo; int threads_mask; volatile int sync_state; struct mutex sync_mutex; @@@ -1406,6 -1406,9 +1408,9 @@@ bool ip_vs_has_real_service(struct netn struct ip_vs_dest * ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport); + struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af, + const union nf_inet_addr *daddr, + __be16 tun_port);
int ip_vs_use_count_inc(void); void ip_vs_use_count_dec(void); @@@ -1499,6 -1502,9 +1504,9 @@@ static inline int ip_vs_todrop(struct n static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } #endif
+ #define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \ + IP_VS_CONN_F_FWD_MASK) + /* ip_vs_fwd_tag returns the forwarding tag of the connection */ #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK)
diff --combined include/uapi/linux/bpf.h index 29a5bc3d5c66,b077507efa3f..db59598d6409 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@@ -262,6 -262,24 +262,24 @@@ enum bpf_attach_type */ #define BPF_F_ANY_ALIGNMENT (1U << 1)
+ /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. + * Verifier does sub-register def/use analysis and identifies instructions whose + * def only matters for low 32-bit, high 32-bit is never referenced later + * through implicit zero extension. Therefore verifier notifies JIT back-ends + * that it is safe to ignore clearing high 32-bit for these instructions. This + * saves some back-ends a lot of code-gen. However such optimization is not + * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends + * hence hasn't used verifier's analysis result. But, we really want to have a + * way to be able to verify the correctness of the described optimization on + * x86_64 on which testsuites are frequently exercised. + * + * So, this flag is introduced. Once it is set, verifier will randomize high + * 32-bit for those instructions who has been identified as safe to ignore them. + * Then, if verifier is not doing correct analysis, such randomization will + * regress tests to expose bugs. + */ + #define BPF_F_TEST_RND_HI32 (1U << 2) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * two extensions: * @@@ -2674,6 -2692,20 +2692,20 @@@ union bpf_attr * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. + * + * int bpf_send_signal(u32 sig) + * Description + * Send signal *sig* to the current task. + * Return + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@@ -2784,7 -2816,8 +2816,8 @@@ FN(strtol), \ FN(strtoul), \ FN(sk_storage_get), \ - FN(sk_storage_delete), + FN(sk_storage_delete), \ + FN(send_signal),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@@ -3052,6 -3085,10 +3085,10 @@@ struct bpf_sock_tuple }; };
+ struct bpf_xdp_sock { + __u32 queue_id; + }; + #define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type. @@@ -3143,7 -3180,6 +3180,7 @@@ struct bpf_prog_info char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; + __u32 :31; /* alignment pad */ __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; @@@ -3213,6 -3249,7 +3250,7 @@@ struct bpf_sock_addr __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ + __bpf_md_ptr(struct bpf_sock *, sk); };
/* User bpf_sock_ops struct to access socket values and specify request ops @@@ -3264,6 -3301,7 +3302,7 @@@ struct bpf_sock_ops __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; + __bpf_md_ptr(struct bpf_sock *, sk); };
/* Definitions for bpf_sock_ops_cb_flags */ diff --combined include/uapi/linux/nl80211.h index fa7ebbc6ff27,8fc3a43cac75..75758ec26c8b --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@@ -235,6 -235,15 +235,15 @@@ */
/** + * DOC: SAE authentication offload + * + * By setting @NL80211_EXT_FEATURE_SAE_OFFLOAD flag drivers can indicate they + * support offloading SAE authentication for WPA3-Personal networks. In + * %NL80211_CMD_CONNECT the password for SAE should be specified using + * %NL80211_ATTR_SAE_PASSWORD. + */ + + /** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors @@@ -2341,6 -2350,12 +2350,12 @@@ enum nl80211_commands * should be picking up the lowest tx power, either tx power per-interface * or per-station. * + * @NL80211_ATTR_SAE_PASSWORD: attribute for passing SAE password material. It + * is used with %NL80211_CMD_CONNECT to provide password for offloading + * SAE authentication for WPA3-Personal networks. + * + * @NL80211_ATTR_TWT_RESPONDER: Enable target wait time responder support. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@@ -2794,6 -2809,10 +2809,10 @@@ enum nl80211_attrs NL80211_ATTR_STA_TX_POWER_SETTING, NL80211_ATTR_STA_TX_POWER,
+ NL80211_ATTR_SAE_PASSWORD, + + NL80211_ATTR_TWT_RESPONDER, + /* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST, @@@ -4406,6 -4425,7 +4425,7 @@@ enum nl80211_mfp enum nl80211_wpa_versions { NL80211_WPA_VERSION_1 = 1 << 0, NL80211_WPA_VERSION_2 = 1 << 1, + NL80211_WPA_VERSION_3 = 1 << 2, };
/** @@@ -5314,7 -5334,7 +5334,7 @@@ enum nl80211_feature_flags NL80211_FEATURE_TDLS_CHANNEL_SWITCH = 1 << 28, NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR = 1 << 29, NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR = 1 << 30, - NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1 << 31, + NL80211_FEATURE_ND_RANDOM_MAC_ADDR = 1U << 31, };
/** @@@ -5422,6 -5442,9 +5442,9 @@@ * @NL80211_EXT_FEATURE_STA_TX_PWR: This driver supports controlling tx power * to a station. * + * @NL80211_EXT_FEATURE_SAE_OFFLOAD: Device wants to do SAE authentication in + * station mode (SAE password is passed as part of the connect command). + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@@ -5466,6 -5489,7 +5489,7 @@@ enum nl80211_ext_feature_index NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD, NL80211_EXT_FEATURE_EXT_KEY_ID, NL80211_EXT_FEATURE_STA_TX_PWR, + NL80211_EXT_FEATURE_SAE_OFFLOAD,
/* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --combined kernel/bpf/core.c index f2148db91439,ad3be85f1411..7bb5a31f13ed --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@@ -1364,10 -1364,10 +1364,10 @@@ select_insn insn++; CONT; ALU_ARSH_X: - DST = (u64) (u32) ((*(s32 *) &DST) >> SRC); + DST = (u64) (u32) (((s32) DST) >> SRC); CONT; ALU_ARSH_K: - DST = (u64) (u32) ((*(s32 *) &DST) >> IMM); + DST = (u64) (u32) (((s32) DST) >> IMM); CONT; ALU64_ARSH_X: (*(s64 *) &DST) >>= SRC; @@@ -1791,38 -1791,33 +1791,33 @@@ struct bpf_prog_array *bpf_prog_array_a return &empty_prog_array.hdr; }
- void bpf_prog_array_free(struct bpf_prog_array __rcu *progs) + void bpf_prog_array_free(struct bpf_prog_array *progs) { - if (!progs || - progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr) + if (!progs || progs == &empty_prog_array.hdr) return; kfree_rcu(progs, rcu); }
- int bpf_prog_array_length(struct bpf_prog_array __rcu *array) + int bpf_prog_array_length(struct bpf_prog_array *array) { struct bpf_prog_array_item *item; u32 cnt = 0;
- rcu_read_lock(); - item = rcu_dereference(array)->items; - for (; item->prog; item++) + for (item = array->items; item->prog; item++) if (item->prog != &dummy_bpf_prog.prog) cnt++; - rcu_read_unlock(); return cnt; }
- static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array, + static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt) { struct bpf_prog_array_item *item; int i = 0;
- item = rcu_dereference_check(array, 1)->items; - for (; item->prog; item++) { + for (item = array->items; item->prog; item++) { if (item->prog == &dummy_bpf_prog.prog) continue; prog_ids[i] = item->prog->aux->id; @@@ -1835,7 -1830,7 +1830,7 @@@ return !!(item->prog); }
- int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array, + int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, __u32 __user *prog_ids, u32 cnt) { unsigned long err = 0; @@@ -1846,18 -1841,12 +1841,12 @@@ * cnt = bpf_prog_array_length(); * if (cnt > 0) * bpf_prog_array_copy_to_user(..., cnt); - * so below kcalloc doesn't need extra cnt > 0 check, but - * bpf_prog_array_length() releases rcu lock and - * prog array could have been swapped with empty or larger array, - * so always copy 'cnt' prog_ids to the user. - * In a rare race the user will see zero prog_ids + * so below kcalloc doesn't need extra cnt > 0 check. */ ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); if (!ids) return -ENOMEM; - rcu_read_lock(); nospc = bpf_prog_array_copy_core(array, ids, cnt); - rcu_read_unlock(); err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); kfree(ids); if (err) @@@ -1867,19 -1856,19 +1856,19 @@@ return 0; }
- void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array, + void bpf_prog_array_delete_safe(struct bpf_prog_array *array, struct bpf_prog *old_prog) { - struct bpf_prog_array_item *item = array->items; + struct bpf_prog_array_item *item;
- for (; item->prog; item++) + for (item = array->items; item->prog; item++) if (item->prog == old_prog) { WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); break; } }
- int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, + int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, struct bpf_prog_array **new_array) @@@ -1943,7 -1932,7 +1932,7 @@@ return 0; }
- int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, + int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt) { @@@ -2086,6 -2075,15 +2075,15 @@@ bool __weak bpf_helper_changes_pkt_data return false; }
+ /* Return TRUE if the JIT backend wants verifier to enable sub-register usage + * analysis code and wants explicit zero extension inserted by verifier. + * Otherwise, return FALSE. + */ + bool __weak bpf_jit_needs_zext(void) + { + return false; + } + /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call * skb_copy_bits(), so provide a weak definition of it for NET-less config. */ diff --combined lib/Kconfig index e09b3e081a53,78ddb9526b62..f8d4088551d7 --- a/lib/Kconfig +++ b/lib/Kconfig @@@ -531,6 -531,14 +531,6 @@@ config LRU_CACH config CLZ_TAB bool
-config DDR - bool "JEDEC DDR data" - help - Data from JEDEC specs for DDR SDRAM memories, - particularly the AC timing parameters and addressing - information. This data is useful for drivers handling - DDR SDRAM controllers. - config IRQ_POLL bool "IRQ polling library" help @@@ -554,6 -562,14 +554,14 @@@ config SIGNATUR Digital signature verification. Currently only RSA is supported. Implementation is done using GnuPG MPI library
+ config DIMLIB + bool "DIM library" + default y + help + Dynamic Interrupt Moderation library. + Implements an algorithm for dynamically change CQ modertion values + according to run time performance. + # # libfdt files, only selected if needed. # diff --combined lib/Makefile index cb66bc9c5b2f,dcb558c7554d..4f88c9b4fa02 --- a/lib/Makefile +++ b/lib/Makefile @@@ -202,10 -202,13 +202,11 @@@ obj-$(CONFIG_GLOB) += glob. obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
obj-$(CONFIG_MPILIB) += mpi/ + obj-$(CONFIG_DIMLIB) += dim/ obj-$(CONFIG_SIGNATURE) += digsig.o
lib-$(CONFIG_CLZ_TAB) += clz_tab.o
-obj-$(CONFIG_DDR) += jedec_ddr_data.o - obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
diff --combined net/batman-adv/hard-interface.c index 3719cfd026f0,b5465e6e380d..c90e47342bb0 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@@ -16,6 -16,7 +16,7 @@@ #include <linux/if_ether.h> #include <linux/kernel.h> #include <linux/kref.h> + #include <linux/limits.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/printk.h> @@@ -795,9 -796,6 +796,9 @@@ int batadv_hardif_enable_interface(stru
batadv_hardif_recalc_extra_skbroom(soft_iface);
+ if (bat_priv->algo_ops->iface.enabled) + bat_priv->algo_ops->iface.enabled(hard_iface); + out: return 0;
@@@ -923,9 -921,7 +924,7 @@@ batadv_hardif_add_interface(struct net_ hard_iface->soft_iface = NULL; hard_iface->if_status = BATADV_IF_NOT_IN_USE;
- ret = batadv_debugfs_add_hardif(hard_iface); - if (ret) - goto free_sysfs; + batadv_debugfs_add_hardif(hard_iface);
INIT_LIST_HEAD(&hard_iface->list); INIT_HLIST_HEAD(&hard_iface->neigh_list); @@@ -947,8 -943,6 +946,6 @@@
return hard_iface;
- free_sysfs: - batadv_sysfs_del_hardif(&hard_iface->hardif_obj); free_if: kfree(hard_iface); release_dev: diff --combined net/batman-adv/types.h index e0b25104cbfa,c2996296b953..6ae139d74e0f --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@@ -14,20 -14,22 +14,22 @@@ #include <linux/average.h> #include <linux/bitops.h> #include <linux/compiler.h> + #include <linux/if.h> #include <linux/if_ether.h> #include <linux/kref.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/sched.h> /* for linux/wait.h */ + #include <linux/seq_file.h> + #include <linux/skbuff.h> #include <linux/spinlock.h> + #include <linux/timer.h> #include <linux/types.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <uapi/linux/batadv_packet.h> #include <uapi/linux/batman_adv.h>
- struct seq_file; - #ifdef CONFIG_BATMAN_ADV_DAT
/** @@@ -402,6 -404,17 +404,17 @@@ struct batadv_orig_node * list */ struct hlist_node mcast_want_all_ipv6_node; + + /** + * @mcast_want_all_rtr4_node: a list node for the mcast.want_all_rtr4 + * list + */ + struct hlist_node mcast_want_all_rtr4_node; + /** + * @mcast_want_all_rtr6_node: a list node for the mcast.want_all_rtr6 + * list + */ + struct hlist_node mcast_want_all_rtr6_node; #endif
/** @capabilities: announced capabilities of this originator */ @@@ -1169,6 -1182,26 +1182,26 @@@ struct batadv_mcast_querier_state };
/** + * struct batadv_mcast_mla_flags - flags for the querier, bridge and tvlv state + */ + struct batadv_mcast_mla_flags { + /** @querier_ipv4: the current state of an IGMP querier in the mesh */ + struct batadv_mcast_querier_state querier_ipv4; + + /** @querier_ipv6: the current state of an MLD querier in the mesh */ + struct batadv_mcast_querier_state querier_ipv6; + + /** @enabled: whether the multicast tvlv is currently enabled */ + unsigned char enabled:1; + + /** @bridged: whether the soft interface has a bridge on top */ + unsigned char bridged:1; + + /** @tvlv_flags: the flags we have last sent in our mcast tvlv */ + u8 tvlv_flags; + }; + + /** * struct batadv_priv_mcast - per mesh interface mcast data */ struct batadv_priv_mcast { @@@ -1196,20 -1229,22 +1229,22 @@@ */ struct hlist_head want_all_ipv6_list;
- /** @querier_ipv4: the current state of an IGMP querier in the mesh */ - struct batadv_mcast_querier_state querier_ipv4; - - /** @querier_ipv6: the current state of an MLD querier in the mesh */ - struct batadv_mcast_querier_state querier_ipv6; - - /** @flags: the flags we have last sent in our mcast tvlv */ - u8 flags; + /** + * @want_all_rtr4_list: a list of orig_nodes wanting all routable IPv4 + * multicast traffic + */ + struct hlist_head want_all_rtr4_list;
- /** @enabled: whether the multicast tvlv is currently enabled */ - unsigned char enabled:1; + /** + * @want_all_rtr6_list: a list of orig_nodes wanting all routable IPv6 + * multicast traffic + */ + struct hlist_head want_all_rtr6_list;
- /** @bridged: whether the soft interface has a bridge on top */ - unsigned char bridged:1; + /** + * @mla_flags: flags for the querier, bridge and tvlv state + */ + struct batadv_mcast_mla_flags mla_flags;
/** * @mla_lock: a lock protecting mla_list and mla_flags @@@ -1228,6 -1263,12 +1263,12 @@@ /** @num_want_all_ipv6: counter for items in want_all_ipv6_list */ atomic_t num_want_all_ipv6;
+ /** @num_want_all_rtr4: counter for items in want_all_rtr4_list */ + atomic_t num_want_all_rtr4; + + /** @num_want_all_rtr6: counter for items in want_all_rtr6_list */ + atomic_t num_want_all_rtr6; + /** * @want_lists_lock: lock for protecting modifications to mcasts * want_all_{unsnoopables,ipv4,ipv6}_list (traversals are rcu-locked) @@@ -2129,9 -2170,6 +2170,9 @@@ struct batadv_algo_iface_ops /** @enable: init routing info when hard-interface is enabled */ int (*enable)(struct batadv_hard_iface *hard_iface);
+ /** @enabled: notification when hard-interface was enabled (optional) */ + void (*enabled)(struct batadv_hard_iface *hard_iface); + /** @disable: de-init routing info when hard-interface is disabled */ void (*disable)(struct batadv_hard_iface *hard_iface);
diff --combined net/bridge/netfilter/Kconfig index 36a98d36d339,f4fb0b9b927d..d978f6d820f3 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig @@@ -19,6 -19,20 +19,20 @@@ config NF_LOG_BRIDG tristate "Bridge packet logging" select NF_LOG_COMMON
+ config NF_CONNTRACK_BRIDGE + tristate "IPv4/IPV6 bridge connection tracking support" + depends on NF_CONNTRACK + default n + help + Connection tracking keeps a record of what packets have passed + through your machine, in order to figure out how they are related + into connections. This is used to enhance packet filtering via + stateful policies. Enable this if you want native tracking from + the bridge. This provides a replacement for the `br_netfilter' + infrastructure. + + To compile it as a module, choose M here. If unsure, say N. + endif # NF_TABLES_BRIDGE
menuconfig BRIDGE_NF_EBTABLES @@@ -114,7 -128,7 +128,7 @@@ config BRIDGE_EBT_LIMI equivalent of the iptables limit match.
If you want to compile it as a module, say M here and read - file:Documentation/kbuild/modules.txt. If unsure, say `N'. + file:Documentation/kbuild/modules.rst. If unsure, say `N'.
config BRIDGE_EBT_MARK tristate "ebt: mark filter support" diff --combined net/core/filter.c index 3fdf1b21be36,2014d76e0d2a..4d87e79bc75c --- a/net/core/filter.c +++ b/net/core/filter.c @@@ -62,6 -62,7 +62,7 @@@ #include <net/inet_hashtables.h> #include <net/inet6_hashtables.h> #include <net/ip_fib.h> + #include <net/nexthop.h> #include <net/flow.h> #include <net/arp.h> #include <net/ipv6.h> @@@ -4670,7 -4671,7 +4671,7 @@@ static int bpf_ipv4_fib_lookup(struct n if (res.type != RTN_UNICAST) return BPF_FIB_LKUP_RET_NOT_FWDED;
- if (res.fi->fib_nhs > 1) + if (fib_info_num_path(res.fi) > 1) fib_select_path(net, &res, &fl4, NULL);
if (check_mtu) { @@@ -4737,7 -4738,7 +4738,7 @@@ static int bpf_ipv6_fib_lookup(struct n return -ENODEV;
idev = __in6_dev_get_safely(dev); - if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) + if (unlikely(!idev || !idev->cnf.forwarding)) return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) { @@@ -5694,6 -5695,46 +5695,46 @@@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct s return INET_ECN_set_ce(skb); }
+ bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, + struct bpf_insn_access_aux *info) + { + if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id)) + return false; + + if (off % size != 0) + return false; + + switch (off) { + default: + return size == sizeof(__u32); + } + } + + u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, u32 *target_size) + { + struct bpf_insn *insn = insn_buf; + + #define BPF_XDP_SOCK_GET(FIELD) \ + do { \ + BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) > \ + FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \ + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\ + si->dst_reg, si->src_reg, \ + offsetof(struct xdp_sock, FIELD)); \ + } while (0) + + switch (si->off) { + case offsetof(struct bpf_xdp_sock, queue_id): + BPF_XDP_SOCK_GET(queue_id); + break; + } + + return insn - insn_buf; + } + static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = { .func = bpf_skb_ecn_set_ce, .gpl_only = false, @@@ -5896,6 -5937,10 +5937,10 @@@ sock_addr_func_proto(enum bpf_func_id f case BPF_FUNC_skc_lookup_tcp: return &bpf_sock_addr_skc_lookup_tcp_proto; #endif /* CONFIG_INET */ + case BPF_FUNC_sk_storage_get: + return &bpf_sk_storage_get_proto; + case BPF_FUNC_sk_storage_delete: + return &bpf_sk_storage_delete_proto; default: return bpf_base_func_proto(func_id); } @@@ -5933,6 -5978,10 +5978,10 @@@ cg_skb_func_proto(enum bpf_func_id func return &bpf_sk_storage_get_proto; case BPF_FUNC_sk_storage_delete: return &bpf_sk_storage_delete_proto; + #ifdef CONFIG_SOCK_CGROUP_DATA + case BPF_FUNC_skb_cgroup_id: + return &bpf_skb_cgroup_id_proto; + #endif #ifdef CONFIG_INET case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; @@@ -6113,6 -6162,14 +6162,14 @@@ sock_ops_func_proto(enum bpf_func_id fu return &bpf_get_local_storage_proto; case BPF_FUNC_perf_event_output: return &bpf_sockopt_event_output_proto; + case BPF_FUNC_sk_storage_get: + return &bpf_sk_storage_get_proto; + case BPF_FUNC_sk_storage_delete: + return &bpf_sk_storage_delete_proto; + #ifdef CONFIG_INET + case BPF_FUNC_tcp_sock: + return &bpf_tcp_sock_proto; + #endif /* CONFIG_INET */ default: return bpf_base_func_proto(func_id); } @@@ -6800,6 -6857,13 +6857,13 @@@ static bool sock_addr_is_valid_access(i if (size != size_default) return false; break; + case offsetof(struct bpf_sock_addr, sk): + if (type != BPF_READ) + return false; + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_SOCKET; + break; default: if (type == BPF_READ) { if (size != size_default) @@@ -6843,6 -6907,11 +6907,11 @@@ static bool sock_ops_is_valid_access(in if (size != sizeof(__u64)) return false; break; + case offsetof(struct bpf_sock_ops, sk): + if (size != sizeof(__u64)) + return false; + info->reg_type = PTR_TO_SOCKET_OR_NULL; + break; default: if (size != size_default) return false; @@@ -7750,6 -7819,11 +7819,11 @@@ static u32 sock_addr_convert_ctx_access struct bpf_sock_addr_kern, struct in6_addr, t_ctx, s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); break; + case offsetof(struct bpf_sock_addr, sk): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_addr_kern, sk)); + break; }
return insn - insn_buf; @@@ -8009,6 -8083,19 +8083,19 @@@ static u32 sock_ops_convert_ctx_access( SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, struct sock, type); break; + case offsetof(struct bpf_sock_ops, sk): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( + struct bpf_sock_ops_kern, + is_fullsock), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, + is_fullsock)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( + struct bpf_sock_ops_kern, sk), + si->dst_reg, si->src_reg, + offsetof(struct bpf_sock_ops_kern, sk)); + break; } return insn - insn_buf; } diff --combined net/ipv4/igmp.c index 85107bf812f2,9a206931a342..180f6896b98b --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@@ -332,14 -332,15 +332,15 @@@ static __be32 igmpv3_get_srcaddr(struc const struct flowi4 *fl4) { struct in_device *in_dev = __in_dev_get_rcu(dev); + const struct in_ifaddr *ifa;
if (!in_dev) return htonl(INADDR_ANY);
- for_ifa(in_dev) { + in_dev_for_each_ifa_rcu(ifa, in_dev) { if (fl4->saddr == ifa->ifa_local) return fl4->saddr; - } endfor_ifa(in_dev); + }
return htonl(INADDR_ANY); } @@@ -1228,8 -1229,12 +1229,8 @@@ static void igmpv3_del_delrec(struct in if (pmc) { im->interface = pmc->interface; if (im->sfmode == MCAST_INCLUDE) { - im->tomb = pmc->tomb; - pmc->tomb = NULL; - - im->sources = pmc->sources; - pmc->sources = NULL; - + swap(im->tomb, pmc->tomb); + swap(im->sources, pmc->sources); for (psf = im->sources; psf; psf = psf->sf_next) psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; } else { diff --combined net/key/af_key.c index fe5fc4bab7ee,39b3d95094eb..b67ed3a8486c --- a/net/key/af_key.c +++ b/net/key/af_key.c @@@ -928,8 -928,7 +928,7 @@@ static struct sk_buff *__pfkey_xfrm_sta pfkey_sockaddr_fill(&x->props.saddr, 0, (struct sockaddr *) (addr + 1), x->props.family); - if (!addr->sadb_address_prefixlen) - BUG(); + BUG_ON(!addr->sadb_address_prefixlen);
/* dst address */ addr = skb_put(skb, sizeof(struct sadb_address) + sockaddr_size); @@@ -944,8 -943,7 +943,7 @@@ pfkey_sockaddr_fill(&x->id.daddr, 0, (struct sockaddr *) (addr + 1), x->props.family); - if (!addr->sadb_address_prefixlen) - BUG(); + BUG_ON(!addr->sadb_address_prefixlen);
if (!xfrm_addr_equal(&x->sel.saddr, &x->props.saddr, x->props.family)) { @@@ -2438,10 -2436,8 +2436,10 @@@ static int key_pol_get_resp(struct soc goto out; } err = pfkey_xfrm_policy2msg(out_skb, xp, dir); - if (err < 0) + if (err < 0) { + kfree_skb(out_skb); goto out; + }
out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = hdr->sadb_msg_version; @@@ -2692,10 -2688,8 +2690,10 @@@ static int dump_sp(struct xfrm_policy * return PTR_ERR(out_skb);
err = pfkey_xfrm_policy2msg(out_skb, xp, dir); - if (err < 0) + if (err < 0) { + kfree_skb(out_skb); return err; + }
out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = pfk->dump.msg_version; diff --combined net/netfilter/ipvs/ip_vs_core.c index d5103a9eb302,e8651fd621ef..f662f198b458 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@@ -34,6 -34,7 +34,7 @@@ #include <net/tcp.h> #include <net/udp.h> #include <net/icmp.h> /* for icmp_send */ + #include <net/gue.h> #include <net/route.h> #include <net/ip6_checksum.h> #include <net/netns/generic.h> /* net_generic() */ @@@ -892,7 -893,7 +893,7 @@@ static int handle_response_icmp(int af if (IPPROTO_TCP == protocol || IPPROTO_UDP == protocol || IPPROTO_SCTP == protocol) offset += 2 * sizeof(__u16); - if (!skb_make_writable(skb, offset)) + if (skb_ensure_writable(skb, offset)) goto out;
#ifdef CONFIG_IP_VS_IPV6 @@@ -1282,7 -1283,7 +1283,7 @@@ handle_response(int af, struct sk_buff
IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
- if (!skb_make_writable(skb, iph->len)) + if (skb_ensure_writable(skb, iph->len)) goto drop;
/* mangle the packet */ @@@ -1574,6 -1575,41 +1575,41 @@@ ip_vs_try_to_schedule(struct netns_ipv return 1; }
+ /* Check the UDP tunnel and return its header length */ + static int ipvs_udp_decap(struct netns_ipvs *ipvs, struct sk_buff *skb, + unsigned int offset, __u16 af, + const union nf_inet_addr *daddr, __u8 *proto) + { + struct udphdr _udph, *udph; + struct ip_vs_dest *dest; + + udph = skb_header_pointer(skb, offset, sizeof(_udph), &_udph); + if (!udph) + goto unk; + offset += sizeof(struct udphdr); + dest = ip_vs_find_tunnel(ipvs, af, daddr, udph->dest); + if (!dest) + goto unk; + if (dest->tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) { + struct guehdr _gueh, *gueh; + + gueh = skb_header_pointer(skb, offset, sizeof(_gueh), &_gueh); + if (!gueh) + goto unk; + if (gueh->control != 0 || gueh->version != 0) + goto unk; + /* Later we can support also IPPROTO_IPV6 */ + if (gueh->proto_ctype != IPPROTO_IPIP) + goto unk; + *proto = gueh->proto_ctype; + return sizeof(struct udphdr) + sizeof(struct guehdr) + + (gueh->hlen << 2); + } + + unk: + return 0; + } + /* * Handle ICMP messages in the outside-to-inside direction (incoming). * Find any that might be relevant, check against existing connections, @@@ -1593,6 -1629,7 +1629,7 @@@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd; unsigned int offset, offset2, ihl, verdict; bool ipip, new_cp = false; + union nf_inet_addr *raddr;
*related = 1;
@@@ -1631,20 -1668,51 +1668,51 @@@ cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ + raddr = (union nf_inet_addr *)&cih->daddr;
/* Special case for errors for IPIP packets */ ipip = false; if (cih->protocol == IPPROTO_IPIP) { + struct ip_vs_dest *dest; + if (unlikely(cih->frag_off & htons(IP_OFFSET))) return NF_ACCEPT; /* Error for our IPIP must arrive at LOCAL_IN */ if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL)) return NF_ACCEPT; + dest = ip_vs_find_tunnel(ipvs, AF_INET, raddr, 0); + /* Only for known tunnel */ + if (!dest || dest->tun_type != IP_VS_CONN_F_TUNNEL_TYPE_IPIP) + return NF_ACCEPT; offset += cih->ihl * 4; cih = skb_header_pointer(skb, offset, sizeof(_ciph), &_ciph); if (cih == NULL) return NF_ACCEPT; /* The packet looks wrong, ignore */ ipip = true; + } else if (cih->protocol == IPPROTO_UDP && /* Can be UDP encap */ + /* Error for our tunnel must arrive at LOCAL_IN */ + (skb_rtable(skb)->rt_flags & RTCF_LOCAL)) { + __u8 iproto; + int ulen; + + /* Non-first fragment has no UDP header */ + if (unlikely(cih->frag_off & htons(IP_OFFSET))) + return NF_ACCEPT; + offset2 = offset + cih->ihl * 4; + ulen = ipvs_udp_decap(ipvs, skb, offset2, AF_INET, raddr, + &iproto); + if (ulen > 0) { + /* Skip IP and UDP tunnel headers */ + offset = offset2 + ulen; + /* Now we should be at the original IP header */ + cih = skb_header_pointer(skb, offset, sizeof(_ciph), + &_ciph); + if (cih && cih->version == 4 && cih->ihl >= 5 && + iproto == IPPROTO_IPIP) + ipip = true; + else + return NF_ACCEPT; + } }
pd = ip_vs_proto_data_get(ipvs, cih->protocol); @@@ -2245,6 -2313,7 +2313,6 @@@ static const struct nf_hook_ops ip_vs_o static int __net_init __ip_vs_init(struct net *net) { struct netns_ipvs *ipvs; - int ret;
ipvs = net_generic(net, ip_vs_net_id); if (ipvs == NULL) @@@ -2276,11 -2345,17 +2344,11 @@@ if (ip_vs_sync_net_init(ipvs) < 0) goto sync_fail;
- ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); - if (ret < 0) - goto hook_fail; - return 0; /* * Error handling */
-hook_fail: - ip_vs_sync_net_cleanup(ipvs); sync_fail: ip_vs_conn_net_cleanup(ipvs); conn_fail: @@@ -2310,19 -2385,6 +2378,19 @@@ static void __net_exit __ip_vs_cleanup( net->ipvs = NULL; }
+static int __net_init __ip_vs_dev_init(struct net *net) +{ + int ret; + + ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); + if (ret < 0) + goto hook_fail; + return 0; + +hook_fail: + return ret; +} + static void __net_exit __ip_vs_dev_cleanup(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@@ -2342,7 -2404,6 +2410,7 @@@ static struct pernet_operations ipvs_co };
static struct pernet_operations ipvs_core_dev_ops = { + .init = __ip_vs_dev_init, .exit = __ip_vs_dev_cleanup, };
diff --combined net/netfilter/ipvs/ip_vs_ctl.c index 741d91aa4a8d,84384d896e29..a8abba7e3d2d --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@@ -510,15 -510,36 +510,36 @@@ static inline unsigned int ip_vs_rs_has static void ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) { unsigned int hash; + __be16 port;
if (dest->in_rs_table) return;
+ switch (IP_VS_DFWD_METHOD(dest)) { + case IP_VS_CONN_F_MASQ: + port = dest->port; + break; + case IP_VS_CONN_F_TUNNEL: + switch (dest->tun_type) { + case IP_VS_CONN_F_TUNNEL_TYPE_GUE: + port = dest->tun_port; + break; + case IP_VS_CONN_F_TUNNEL_TYPE_IPIP: + port = 0; + break; + default: + return; + } + break; + default: + return; + } + /* * Hash by proto,addr,port, * which are the parameters of the real service. */ - hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port); + hash = ip_vs_rs_hashkey(dest->af, &dest->addr, port);
hlist_add_head_rcu(&dest->d_list, &ipvs->rs_table[hash]); dest->in_rs_table = 1; @@@ -550,7 -571,8 +571,8 @@@ bool ip_vs_has_real_service(struct netn if (dest->port == dport && dest->af == af && ip_vs_addr_equal(af, &dest->addr, daddr) && - (dest->protocol == protocol || dest->vfwmark)) { + (dest->protocol == protocol || dest->vfwmark) && + IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) { /* HIT */ return true; } @@@ -580,7 -602,37 +602,37 @@@ struct ip_vs_dest *ip_vs_find_real_serv if (dest->port == dport && dest->af == af && ip_vs_addr_equal(af, &dest->addr, daddr) && - (dest->protocol == protocol || dest->vfwmark)) { + (dest->protocol == protocol || dest->vfwmark) && + IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_MASQ) { + /* HIT */ + return dest; + } + } + + return NULL; + } + + /* Find real service record by <af,addr,tun_port>. + * In case of multiple records with the same <af,addr,tun_port>, only + * the first found record is returned. + * + * To be called under RCU lock. + */ + struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af, + const union nf_inet_addr *daddr, + __be16 tun_port) + { + struct ip_vs_dest *dest; + unsigned int hash; + + /* Check for "full" addressed entries */ + hash = ip_vs_rs_hashkey(af, daddr, tun_port); + + hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) { + if (dest->tun_port == tun_port && + dest->af == af && + ip_vs_addr_equal(af, &dest->addr, daddr) && + IP_VS_DFWD_METHOD(dest) == IP_VS_CONN_F_TUNNEL) { /* HIT */ return dest; } @@@ -826,24 -878,29 +878,29 @@@ __ip_vs_update_dest(struct ip_vs_servic conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK; conn_flags |= IP_VS_CONN_F_INACTIVE;
+ /* Need to rehash? */ + if ((udest->conn_flags & IP_VS_CONN_F_FWD_MASK) != + IP_VS_DFWD_METHOD(dest) || + udest->tun_type != dest->tun_type || + udest->tun_port != dest->tun_port) + ip_vs_rs_unhash(dest); + /* set the tunnel info */ dest->tun_type = udest->tun_type; dest->tun_port = udest->tun_port; + dest->tun_flags = udest->tun_flags;
/* set the IP_VS_CONN_F_NOOUTPUT flag if not masquerading/NAT */ if ((conn_flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) { conn_flags |= IP_VS_CONN_F_NOOUTPUT; } else { - /* - * Put the real service in rs_table if not present. - * For now only for NAT! - */ - ip_vs_rs_hash(ipvs, dest); /* FTP-NAT requires conntrack for mangling */ if (svc->port == FTPPORT) ip_vs_register_conntrack(svc); } atomic_set(&dest->conn_flags, conn_flags); + /* Put the real service in rs_table if not present. */ + ip_vs_rs_hash(ipvs, dest);
/* bind the service */ old_svc = rcu_dereference_protected(dest->svc, 1); @@@ -2396,7 -2453,9 +2453,7 @@@ do_ip_vs_set_ctl(struct sock *sk, int c cfg.syncid = dm->syncid; ret = start_sync_thread(ipvs, &cfg, dm->state); } else { - mutex_lock(&ipvs->sync_mutex); ret = stop_sync_thread(ipvs, dm->state); - mutex_unlock(&ipvs->sync_mutex); } goto out_dec; } @@@ -2904,6 -2963,7 +2961,7 @@@ static const struct nla_policy ip_vs_de [IPVS_DEST_ATTR_ADDR_FAMILY] = { .type = NLA_U16 }, [IPVS_DEST_ATTR_TUN_TYPE] = { .type = NLA_U8 }, [IPVS_DEST_ATTR_TUN_PORT] = { .type = NLA_U16 }, + [IPVS_DEST_ATTR_TUN_FLAGS] = { .type = NLA_U16 }, };
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type, @@@ -3210,6 -3270,8 +3268,8 @@@ static int ip_vs_genl_fill_dest(struct dest->tun_type) || nla_put_be16(skb, IPVS_DEST_ATTR_TUN_PORT, dest->tun_port) || + nla_put_u16(skb, IPVS_DEST_ATTR_TUN_FLAGS, + dest->tun_flags) || nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) || nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) || nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, @@@ -3330,7 -3392,8 +3390,8 @@@ static int ip_vs_genl_parse_dest(struc /* If a full entry was requested, check for the additional fields */ if (full_entry) { struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh, - *nla_l_thresh, *nla_tun_type, *nla_tun_port; + *nla_l_thresh, *nla_tun_type, *nla_tun_port, + *nla_tun_flags;
nla_fwd = attrs[IPVS_DEST_ATTR_FWD_METHOD]; nla_weight = attrs[IPVS_DEST_ATTR_WEIGHT]; @@@ -3338,6 -3401,7 +3399,7 @@@ nla_l_thresh = attrs[IPVS_DEST_ATTR_L_THRESH]; nla_tun_type = attrs[IPVS_DEST_ATTR_TUN_TYPE]; nla_tun_port = attrs[IPVS_DEST_ATTR_TUN_PORT]; + nla_tun_flags = attrs[IPVS_DEST_ATTR_TUN_FLAGS];
if (!(nla_fwd && nla_weight && nla_u_thresh && nla_l_thresh)) return -EINVAL; @@@ -3353,6 -3417,9 +3415,9 @@@
if (nla_tun_port) udest->tun_port = nla_get_be16(nla_tun_port); + + if (nla_tun_flags) + udest->tun_flags = nla_get_u16(nla_tun_flags); }
return 0; @@@ -3513,8 -3580,10 +3578,8 @@@ static int ip_vs_genl_del_daemon(struc if (!attrs[IPVS_DAEMON_ATTR_STATE]) return -EINVAL;
- mutex_lock(&ipvs->sync_mutex); ret = stop_sync_thread(ipvs, nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); - mutex_unlock(&ipvs->sync_mutex); return ret; }
diff --combined net/netfilter/nf_nat_proto.c index 83a24cc5753b,888292e8fbb2..7ac733ebd060 --- a/net/netfilter/nf_nat_proto.c +++ b/net/netfilter/nf_nat_proto.c @@@ -70,7 -70,7 +70,7 @@@ static bool udp_manip_pkt(struct sk_buf struct udphdr *hdr; bool do_csum;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) + if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false;
hdr = (struct udphdr *)(skb->data + hdroff); @@@ -88,7 -88,7 +88,7 @@@ static bool udplite_manip_pkt(struct sk #ifdef CONFIG_NF_CT_PROTO_UDPLITE struct udphdr *hdr;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) + if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false;
hdr = (struct udphdr *)(skb->data + hdroff); @@@ -114,7 -114,7 +114,7 @@@ sctp_manip_pkt(struct sk_buff *skb if (skb->len >= hdroff + sizeof(*hdr)) hdrsize = sizeof(*hdr);
- if (!skb_make_writable(skb, hdroff + hdrsize)) + if (skb_ensure_writable(skb, hdroff + hdrsize)) return false;
hdr = (struct sctphdr *)(skb->data + hdroff); @@@ -155,7 -155,7 +155,7 @@@ tcp_manip_pkt(struct sk_buff *skb if (skb->len >= hdroff + sizeof(struct tcphdr)) hdrsize = sizeof(struct tcphdr);
- if (!skb_make_writable(skb, hdroff + hdrsize)) + if (skb_ensure_writable(skb, hdroff + hdrsize)) return false;
hdr = (struct tcphdr *)(skb->data + hdroff); @@@ -195,7 -195,7 +195,7 @@@ dccp_manip_pkt(struct sk_buff *skb if (skb->len >= hdroff + sizeof(struct dccp_hdr)) hdrsize = sizeof(struct dccp_hdr);
- if (!skb_make_writable(skb, hdroff + hdrsize)) + if (skb_ensure_writable(skb, hdroff + hdrsize)) return false;
hdr = (struct dccp_hdr *)(skb->data + hdroff); @@@ -229,7 -229,7 +229,7 @@@ icmp_manip_pkt(struct sk_buff *skb { struct icmphdr *hdr;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) + if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false;
hdr = (struct icmphdr *)(skb->data + hdroff); @@@ -247,7 -247,7 +247,7 @@@ icmpv6_manip_pkt(struct sk_buff *skb { struct icmp6hdr *hdr;
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) + if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false;
hdr = (struct icmp6hdr *)(skb->data + hdroff); @@@ -275,7 -275,7 +275,7 @@@ gre_manip_pkt(struct sk_buff *skb
/* pgreh includes two optional 32bit fields which are not required * to be there. That's where the magic '8' comes from */ - if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8)) + if (skb_ensure_writable(skb, hdroff + sizeof(*pgreh) - 8)) return false;
greh = (void *)skb->data + hdroff; @@@ -347,7 -347,7 +347,7 @@@ static bool nf_nat_ipv4_manip_pkt(struc struct iphdr *iph; unsigned int hdroff;
- if (!skb_make_writable(skb, iphdroff + sizeof(*iph))) + if (skb_ensure_writable(skb, iphdroff + sizeof(*iph))) return false;
iph = (void *)skb->data + iphdroff; @@@ -378,7 -378,7 +378,7 @@@ static bool nf_nat_ipv6_manip_pkt(struc int hdroff; u8 nexthdr;
- if (!skb_make_writable(skb, iphdroff + sizeof(*ipv6h))) + if (skb_ensure_writable(skb, iphdroff + sizeof(*ipv6h))) return false;
ipv6h = (void *)skb->data + iphdroff; @@@ -562,9 -562,9 +562,9 @@@ int nf_nat_icmp_reply_translation(struc
WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
- if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) + if (skb_ensure_writable(skb, hdrlen + sizeof(*inside))) return 0; - if (nf_ip_checksum(skb, hooknum, hdrlen, 0)) + if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP)) return 0;
inside = (void *)skb->data + hdrlen; @@@ -784,7 -784,7 +784,7 @@@ int nf_nat_icmpv6_reply_translation(str
WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY);
- if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) + if (skb_ensure_writable(skb, hdrlen + sizeof(*inside))) return 0; if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6)) return 0; diff --combined samples/bpf/xdp_redirect_user.c index 003c0c6e38c5,ce71be187205..4817f43a2848 --- a/samples/bpf/xdp_redirect_user.c +++ b/samples/bpf/xdp_redirect_user.c @@@ -16,7 -16,7 +16,7 @@@
#include "bpf_util.h" #include <bpf/bpf.h> - #include "bpf/libbpf.h" + #include "libbpf.h"
static int ifindex_in; static int ifindex_out; @@@ -189,7 -189,7 +189,7 @@@ int main(int argc, char **argv }
memset(&info, 0, sizeof(info)); - ret = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + ret = bpf_obj_get_info_by_fd(dummy_prog_fd, &info, &info_len); if (ret) { printf("can't get prog info - %s\n", strerror(errno)); return ret; diff --combined tools/include/uapi/linux/bpf.h index 29a5bc3d5c66,b077507efa3f..db59598d6409 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@@ -262,6 -262,24 +262,24 @@@ enum bpf_attach_type */ #define BPF_F_ANY_ALIGNMENT (1U << 1)
+ /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. + * Verifier does sub-register def/use analysis and identifies instructions whose + * def only matters for low 32-bit, high 32-bit is never referenced later + * through implicit zero extension. Therefore verifier notifies JIT back-ends + * that it is safe to ignore clearing high 32-bit for these instructions. This + * saves some back-ends a lot of code-gen. However such optimization is not + * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends + * hence hasn't used verifier's analysis result. But, we really want to have a + * way to be able to verify the correctness of the described optimization on + * x86_64 on which testsuites are frequently exercised. + * + * So, this flag is introduced. Once it is set, verifier will randomize high + * 32-bit for those instructions who has been identified as safe to ignore them. + * Then, if verifier is not doing correct analysis, such randomization will + * regress tests to expose bugs. + */ + #define BPF_F_TEST_RND_HI32 (1U << 2) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * two extensions: * @@@ -2674,6 -2692,20 +2692,20 @@@ union bpf_attr * 0 on success. * * **-ENOENT** if the bpf-local-storage cannot be found. + * + * int bpf_send_signal(u32 sig) + * Description + * Send signal *sig* to the current task. + * Return + * 0 on success or successfully queued. + * + * **-EBUSY** if work queue under nmi is full. + * + * **-EINVAL** if *sig* is invalid. + * + * **-EPERM** if no permission to send the *sig*. + * + * **-EAGAIN** if bpf program can try again. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@@ -2784,7 -2816,8 +2816,8 @@@ FN(strtol), \ FN(strtoul), \ FN(sk_storage_get), \ - FN(sk_storage_delete), + FN(sk_storage_delete), \ + FN(send_signal),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@@ -3052,6 -3085,10 +3085,10 @@@ struct bpf_sock_tuple }; };
+ struct bpf_xdp_sock { + __u32 queue_id; + }; + #define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type. @@@ -3143,7 -3180,6 +3180,7 @@@ struct bpf_prog_info char name[BPF_OBJ_NAME_LEN]; __u32 ifindex; __u32 gpl_compatible:1; + __u32 :31; /* alignment pad */ __u64 netns_dev; __u64 netns_ino; __u32 nr_jited_ksyms; @@@ -3213,6 -3249,7 +3250,7 @@@ struct bpf_sock_addr __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write. * Stored in network byte order. */ + __bpf_md_ptr(struct bpf_sock *, sk); };
/* User bpf_sock_ops struct to access socket values and specify request ops @@@ -3264,6 -3301,7 +3302,7 @@@ struct bpf_sock_ops __u32 sk_txhash; __u64 bytes_received; __u64 bytes_acked; + __bpf_md_ptr(struct bpf_sock *, sk); };
/* Definitions for bpf_sock_ops_cb_flags */